id
stringlengths
25
30
content
stringlengths
14
942k
max_stars_repo_path
stringlengths
49
55
crossvul-cpp_data_bad_2080_0
/* * Implementation of the security services. * * Authors : Stephen Smalley, <sds@epoch.ncsc.mil> * James Morris <jmorris@redhat.com> * * Updated: Trusted Computer Solutions, Inc. <dgoeddel@trustedcs.com> * * Support for enhanced MLS infrastructure. * Support for context based audit filters. * * Updated: Frank Mayer <mayerf@tresys.com> and Karl MacMillan <kmacmillan@tresys.com> * * Added conditional policy language extensions * * Updated: Hewlett-Packard <paul@paul-moore.com> * * Added support for NetLabel * Added support for the policy capability bitmap * * Updated: Chad Sellers <csellers@tresys.com> * * Added validation of kernel classes and permissions * * Updated: KaiGai Kohei <kaigai@ak.jp.nec.com> * * Added support for bounds domain and audit messaged on masked permissions * * Updated: Guido Trentalancia <guido@trentalancia.com> * * Added support for runtime switching of the policy type * * Copyright (C) 2008, 2009 NEC Corporation * Copyright (C) 2006, 2007 Hewlett-Packard Development Company, L.P. * Copyright (C) 2004-2006 Trusted Computer Solutions, Inc. * Copyright (C) 2003 - 2004, 2006 Tresys Technology, LLC * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com> * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 2. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/spinlock.h> #include <linux/rcupdate.h> #include <linux/errno.h> #include <linux/in.h> #include <linux/sched.h> #include <linux/audit.h> #include <linux/mutex.h> #include <linux/selinux.h> #include <linux/flex_array.h> #include <linux/vmalloc.h> #include <net/netlabel.h> #include "flask.h" #include "avc.h" #include "avc_ss.h" #include "security.h" #include "context.h" #include "policydb.h" #include "sidtab.h" #include "services.h" #include "conditional.h" #include "mls.h" #include "objsec.h" #include "netlabel.h" #include "xfrm.h" #include "ebitmap.h" #include "audit.h" int selinux_policycap_netpeer; int selinux_policycap_openperm; int selinux_policycap_alwaysnetwork; static DEFINE_RWLOCK(policy_rwlock); static struct sidtab sidtab; struct policydb policydb; int ss_initialized; /* * The largest sequence number that has been used when * providing an access decision to the access vector cache. * The sequence number only changes when a policy change * occurs. */ static u32 latest_granting; /* Forward declaration. */ static int context_struct_to_string(struct context *context, char **scontext, u32 *scontext_len); static void context_struct_compute_av(struct context *scontext, struct context *tcontext, u16 tclass, struct av_decision *avd); struct selinux_mapping { u16 value; /* policy value */ unsigned num_perms; u32 perms[sizeof(u32) * 8]; }; static struct selinux_mapping *current_mapping; static u16 current_mapping_size; static int selinux_set_mapping(struct policydb *pol, struct security_class_mapping *map, struct selinux_mapping **out_map_p, u16 *out_map_size) { struct selinux_mapping *out_map = NULL; size_t size = sizeof(struct selinux_mapping); u16 i, j; unsigned k; bool print_unknown_handle = false; /* Find number of classes in the input mapping */ if (!map) return -EINVAL; i = 0; while (map[i].name) i++; /* Allocate space for the class records, plus one for class zero */ out_map = kcalloc(++i, size, GFP_ATOMIC); if (!out_map) return -ENOMEM; /* Store the raw class and permission values */ j = 0; while (map[j].name) { struct security_class_mapping *p_in = map + (j++); struct selinux_mapping *p_out = out_map + j; /* An empty class string skips ahead */ if (!strcmp(p_in->name, "")) { p_out->num_perms = 0; continue; } p_out->value = string_to_security_class(pol, p_in->name); if (!p_out->value) { printk(KERN_INFO "SELinux: Class %s not defined in policy.\n", p_in->name); if (pol->reject_unknown) goto err; p_out->num_perms = 0; print_unknown_handle = true; continue; } k = 0; while (p_in->perms && p_in->perms[k]) { /* An empty permission string skips ahead */ if (!*p_in->perms[k]) { k++; continue; } p_out->perms[k] = string_to_av_perm(pol, p_out->value, p_in->perms[k]); if (!p_out->perms[k]) { printk(KERN_INFO "SELinux: Permission %s in class %s not defined in policy.\n", p_in->perms[k], p_in->name); if (pol->reject_unknown) goto err; print_unknown_handle = true; } k++; } p_out->num_perms = k; } if (print_unknown_handle) printk(KERN_INFO "SELinux: the above unknown classes and permissions will be %s\n", pol->allow_unknown ? "allowed" : "denied"); *out_map_p = out_map; *out_map_size = i; return 0; err: kfree(out_map); return -EINVAL; } /* * Get real, policy values from mapped values */ static u16 unmap_class(u16 tclass) { if (tclass < current_mapping_size) return current_mapping[tclass].value; return tclass; } /* * Get kernel value for class from its policy value */ static u16 map_class(u16 pol_value) { u16 i; for (i = 1; i < current_mapping_size; i++) { if (current_mapping[i].value == pol_value) return i; } return SECCLASS_NULL; } static void map_decision(u16 tclass, struct av_decision *avd, int allow_unknown) { if (tclass < current_mapping_size) { unsigned i, n = current_mapping[tclass].num_perms; u32 result; for (i = 0, result = 0; i < n; i++) { if (avd->allowed & current_mapping[tclass].perms[i]) result |= 1<<i; if (allow_unknown && !current_mapping[tclass].perms[i]) result |= 1<<i; } avd->allowed = result; for (i = 0, result = 0; i < n; i++) if (avd->auditallow & current_mapping[tclass].perms[i]) result |= 1<<i; avd->auditallow = result; for (i = 0, result = 0; i < n; i++) { if (avd->auditdeny & current_mapping[tclass].perms[i]) result |= 1<<i; if (!allow_unknown && !current_mapping[tclass].perms[i]) result |= 1<<i; } /* * In case the kernel has a bug and requests a permission * between num_perms and the maximum permission number, we * should audit that denial */ for (; i < (sizeof(u32)*8); i++) result |= 1<<i; avd->auditdeny = result; } } int security_mls_enabled(void) { return policydb.mls_enabled; } /* * Return the boolean value of a constraint expression * when it is applied to the specified source and target * security contexts. * * xcontext is a special beast... It is used by the validatetrans rules * only. For these rules, scontext is the context before the transition, * tcontext is the context after the transition, and xcontext is the context * of the process performing the transition. All other callers of * constraint_expr_eval should pass in NULL for xcontext. */ static int constraint_expr_eval(struct context *scontext, struct context *tcontext, struct context *xcontext, struct constraint_expr *cexpr) { u32 val1, val2; struct context *c; struct role_datum *r1, *r2; struct mls_level *l1, *l2; struct constraint_expr *e; int s[CEXPR_MAXDEPTH]; int sp = -1; for (e = cexpr; e; e = e->next) { switch (e->expr_type) { case CEXPR_NOT: BUG_ON(sp < 0); s[sp] = !s[sp]; break; case CEXPR_AND: BUG_ON(sp < 1); sp--; s[sp] &= s[sp + 1]; break; case CEXPR_OR: BUG_ON(sp < 1); sp--; s[sp] |= s[sp + 1]; break; case CEXPR_ATTR: if (sp == (CEXPR_MAXDEPTH - 1)) return 0; switch (e->attr) { case CEXPR_USER: val1 = scontext->user; val2 = tcontext->user; break; case CEXPR_TYPE: val1 = scontext->type; val2 = tcontext->type; break; case CEXPR_ROLE: val1 = scontext->role; val2 = tcontext->role; r1 = policydb.role_val_to_struct[val1 - 1]; r2 = policydb.role_val_to_struct[val2 - 1]; switch (e->op) { case CEXPR_DOM: s[++sp] = ebitmap_get_bit(&r1->dominates, val2 - 1); continue; case CEXPR_DOMBY: s[++sp] = ebitmap_get_bit(&r2->dominates, val1 - 1); continue; case CEXPR_INCOMP: s[++sp] = (!ebitmap_get_bit(&r1->dominates, val2 - 1) && !ebitmap_get_bit(&r2->dominates, val1 - 1)); continue; default: break; } break; case CEXPR_L1L2: l1 = &(scontext->range.level[0]); l2 = &(tcontext->range.level[0]); goto mls_ops; case CEXPR_L1H2: l1 = &(scontext->range.level[0]); l2 = &(tcontext->range.level[1]); goto mls_ops; case CEXPR_H1L2: l1 = &(scontext->range.level[1]); l2 = &(tcontext->range.level[0]); goto mls_ops; case CEXPR_H1H2: l1 = &(scontext->range.level[1]); l2 = &(tcontext->range.level[1]); goto mls_ops; case CEXPR_L1H1: l1 = &(scontext->range.level[0]); l2 = &(scontext->range.level[1]); goto mls_ops; case CEXPR_L2H2: l1 = &(tcontext->range.level[0]); l2 = &(tcontext->range.level[1]); goto mls_ops; mls_ops: switch (e->op) { case CEXPR_EQ: s[++sp] = mls_level_eq(l1, l2); continue; case CEXPR_NEQ: s[++sp] = !mls_level_eq(l1, l2); continue; case CEXPR_DOM: s[++sp] = mls_level_dom(l1, l2); continue; case CEXPR_DOMBY: s[++sp] = mls_level_dom(l2, l1); continue; case CEXPR_INCOMP: s[++sp] = mls_level_incomp(l2, l1); continue; default: BUG(); return 0; } break; default: BUG(); return 0; } switch (e->op) { case CEXPR_EQ: s[++sp] = (val1 == val2); break; case CEXPR_NEQ: s[++sp] = (val1 != val2); break; default: BUG(); return 0; } break; case CEXPR_NAMES: if (sp == (CEXPR_MAXDEPTH-1)) return 0; c = scontext; if (e->attr & CEXPR_TARGET) c = tcontext; else if (e->attr & CEXPR_XTARGET) { c = xcontext; if (!c) { BUG(); return 0; } } if (e->attr & CEXPR_USER) val1 = c->user; else if (e->attr & CEXPR_ROLE) val1 = c->role; else if (e->attr & CEXPR_TYPE) val1 = c->type; else { BUG(); return 0; } switch (e->op) { case CEXPR_EQ: s[++sp] = ebitmap_get_bit(&e->names, val1 - 1); break; case CEXPR_NEQ: s[++sp] = !ebitmap_get_bit(&e->names, val1 - 1); break; default: BUG(); return 0; } break; default: BUG(); return 0; } } BUG_ON(sp != 0); return s[0]; } /* * security_dump_masked_av - dumps masked permissions during * security_compute_av due to RBAC, MLS/Constraint and Type bounds. */ static int dump_masked_av_helper(void *k, void *d, void *args) { struct perm_datum *pdatum = d; char **permission_names = args; BUG_ON(pdatum->value < 1 || pdatum->value > 32); permission_names[pdatum->value - 1] = (char *)k; return 0; } static void security_dump_masked_av(struct context *scontext, struct context *tcontext, u16 tclass, u32 permissions, const char *reason) { struct common_datum *common_dat; struct class_datum *tclass_dat; struct audit_buffer *ab; char *tclass_name; char *scontext_name = NULL; char *tcontext_name = NULL; char *permission_names[32]; int index; u32 length; bool need_comma = false; if (!permissions) return; tclass_name = sym_name(&policydb, SYM_CLASSES, tclass - 1); tclass_dat = policydb.class_val_to_struct[tclass - 1]; common_dat = tclass_dat->comdatum; /* init permission_names */ if (common_dat && hashtab_map(common_dat->permissions.table, dump_masked_av_helper, permission_names) < 0) goto out; if (hashtab_map(tclass_dat->permissions.table, dump_masked_av_helper, permission_names) < 0) goto out; /* get scontext/tcontext in text form */ if (context_struct_to_string(scontext, &scontext_name, &length) < 0) goto out; if (context_struct_to_string(tcontext, &tcontext_name, &length) < 0) goto out; /* audit a message */ ab = audit_log_start(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR); if (!ab) goto out; audit_log_format(ab, "op=security_compute_av reason=%s " "scontext=%s tcontext=%s tclass=%s perms=", reason, scontext_name, tcontext_name, tclass_name); for (index = 0; index < 32; index++) { u32 mask = (1 << index); if ((mask & permissions) == 0) continue; audit_log_format(ab, "%s%s", need_comma ? "," : "", permission_names[index] ? permission_names[index] : "????"); need_comma = true; } audit_log_end(ab); out: /* release scontext/tcontext */ kfree(tcontext_name); kfree(scontext_name); return; } /* * security_boundary_permission - drops violated permissions * on boundary constraint. */ static void type_attribute_bounds_av(struct context *scontext, struct context *tcontext, u16 tclass, struct av_decision *avd) { struct context lo_scontext; struct context lo_tcontext; struct av_decision lo_avd; struct type_datum *source; struct type_datum *target; u32 masked = 0; source = flex_array_get_ptr(policydb.type_val_to_struct_array, scontext->type - 1); BUG_ON(!source); target = flex_array_get_ptr(policydb.type_val_to_struct_array, tcontext->type - 1); BUG_ON(!target); if (source->bounds) { memset(&lo_avd, 0, sizeof(lo_avd)); memcpy(&lo_scontext, scontext, sizeof(lo_scontext)); lo_scontext.type = source->bounds; context_struct_compute_av(&lo_scontext, tcontext, tclass, &lo_avd); if ((lo_avd.allowed & avd->allowed) == avd->allowed) return; /* no masked permission */ masked = ~lo_avd.allowed & avd->allowed; } if (target->bounds) { memset(&lo_avd, 0, sizeof(lo_avd)); memcpy(&lo_tcontext, tcontext, sizeof(lo_tcontext)); lo_tcontext.type = target->bounds; context_struct_compute_av(scontext, &lo_tcontext, tclass, &lo_avd); if ((lo_avd.allowed & avd->allowed) == avd->allowed) return; /* no masked permission */ masked = ~lo_avd.allowed & avd->allowed; } if (source->bounds && target->bounds) { memset(&lo_avd, 0, sizeof(lo_avd)); /* * lo_scontext and lo_tcontext are already * set up. */ context_struct_compute_av(&lo_scontext, &lo_tcontext, tclass, &lo_avd); if ((lo_avd.allowed & avd->allowed) == avd->allowed) return; /* no masked permission */ masked = ~lo_avd.allowed & avd->allowed; } if (masked) { /* mask violated permissions */ avd->allowed &= ~masked; /* audit masked permissions */ security_dump_masked_av(scontext, tcontext, tclass, masked, "bounds"); } } /* * Compute access vectors based on a context structure pair for * the permissions in a particular class. */ static void context_struct_compute_av(struct context *scontext, struct context *tcontext, u16 tclass, struct av_decision *avd) { struct constraint_node *constraint; struct role_allow *ra; struct avtab_key avkey; struct avtab_node *node; struct class_datum *tclass_datum; struct ebitmap *sattr, *tattr; struct ebitmap_node *snode, *tnode; unsigned int i, j; avd->allowed = 0; avd->auditallow = 0; avd->auditdeny = 0xffffffff; if (unlikely(!tclass || tclass > policydb.p_classes.nprim)) { if (printk_ratelimit()) printk(KERN_WARNING "SELinux: Invalid class %hu\n", tclass); return; } tclass_datum = policydb.class_val_to_struct[tclass - 1]; /* * If a specific type enforcement rule was defined for * this permission check, then use it. */ avkey.target_class = tclass; avkey.specified = AVTAB_AV; sattr = flex_array_get(policydb.type_attr_map_array, scontext->type - 1); BUG_ON(!sattr); tattr = flex_array_get(policydb.type_attr_map_array, tcontext->type - 1); BUG_ON(!tattr); ebitmap_for_each_positive_bit(sattr, snode, i) { ebitmap_for_each_positive_bit(tattr, tnode, j) { avkey.source_type = i + 1; avkey.target_type = j + 1; for (node = avtab_search_node(&policydb.te_avtab, &avkey); node; node = avtab_search_node_next(node, avkey.specified)) { if (node->key.specified == AVTAB_ALLOWED) avd->allowed |= node->datum.data; else if (node->key.specified == AVTAB_AUDITALLOW) avd->auditallow |= node->datum.data; else if (node->key.specified == AVTAB_AUDITDENY) avd->auditdeny &= node->datum.data; } /* Check conditional av table for additional permissions */ cond_compute_av(&policydb.te_cond_avtab, &avkey, avd); } } /* * Remove any permissions prohibited by a constraint (this includes * the MLS policy). */ constraint = tclass_datum->constraints; while (constraint) { if ((constraint->permissions & (avd->allowed)) && !constraint_expr_eval(scontext, tcontext, NULL, constraint->expr)) { avd->allowed &= ~(constraint->permissions); } constraint = constraint->next; } /* * If checking process transition permission and the * role is changing, then check the (current_role, new_role) * pair. */ if (tclass == policydb.process_class && (avd->allowed & policydb.process_trans_perms) && scontext->role != tcontext->role) { for (ra = policydb.role_allow; ra; ra = ra->next) { if (scontext->role == ra->role && tcontext->role == ra->new_role) break; } if (!ra) avd->allowed &= ~policydb.process_trans_perms; } /* * If the given source and target types have boundary * constraint, lazy checks have to mask any violated * permission and notice it to userspace via audit. */ type_attribute_bounds_av(scontext, tcontext, tclass, avd); } static int security_validtrans_handle_fail(struct context *ocontext, struct context *ncontext, struct context *tcontext, u16 tclass) { char *o = NULL, *n = NULL, *t = NULL; u32 olen, nlen, tlen; if (context_struct_to_string(ocontext, &o, &olen)) goto out; if (context_struct_to_string(ncontext, &n, &nlen)) goto out; if (context_struct_to_string(tcontext, &t, &tlen)) goto out; audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR, "security_validate_transition: denied for" " oldcontext=%s newcontext=%s taskcontext=%s tclass=%s", o, n, t, sym_name(&policydb, SYM_CLASSES, tclass-1)); out: kfree(o); kfree(n); kfree(t); if (!selinux_enforcing) return 0; return -EPERM; } int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid, u16 orig_tclass) { struct context *ocontext; struct context *ncontext; struct context *tcontext; struct class_datum *tclass_datum; struct constraint_node *constraint; u16 tclass; int rc = 0; if (!ss_initialized) return 0; read_lock(&policy_rwlock); tclass = unmap_class(orig_tclass); if (!tclass || tclass > policydb.p_classes.nprim) { printk(KERN_ERR "SELinux: %s: unrecognized class %d\n", __func__, tclass); rc = -EINVAL; goto out; } tclass_datum = policydb.class_val_to_struct[tclass - 1]; ocontext = sidtab_search(&sidtab, oldsid); if (!ocontext) { printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", __func__, oldsid); rc = -EINVAL; goto out; } ncontext = sidtab_search(&sidtab, newsid); if (!ncontext) { printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", __func__, newsid); rc = -EINVAL; goto out; } tcontext = sidtab_search(&sidtab, tasksid); if (!tcontext) { printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", __func__, tasksid); rc = -EINVAL; goto out; } constraint = tclass_datum->validatetrans; while (constraint) { if (!constraint_expr_eval(ocontext, ncontext, tcontext, constraint->expr)) { rc = security_validtrans_handle_fail(ocontext, ncontext, tcontext, tclass); goto out; } constraint = constraint->next; } out: read_unlock(&policy_rwlock); return rc; } /* * security_bounded_transition - check whether the given * transition is directed to bounded, or not. * It returns 0, if @newsid is bounded by @oldsid. * Otherwise, it returns error code. * * @oldsid : current security identifier * @newsid : destinated security identifier */ int security_bounded_transition(u32 old_sid, u32 new_sid) { struct context *old_context, *new_context; struct type_datum *type; int index; int rc; read_lock(&policy_rwlock); rc = -EINVAL; old_context = sidtab_search(&sidtab, old_sid); if (!old_context) { printk(KERN_ERR "SELinux: %s: unrecognized SID %u\n", __func__, old_sid); goto out; } rc = -EINVAL; new_context = sidtab_search(&sidtab, new_sid); if (!new_context) { printk(KERN_ERR "SELinux: %s: unrecognized SID %u\n", __func__, new_sid); goto out; } rc = 0; /* type/domain unchanged */ if (old_context->type == new_context->type) goto out; index = new_context->type; while (true) { type = flex_array_get_ptr(policydb.type_val_to_struct_array, index - 1); BUG_ON(!type); /* not bounded anymore */ rc = -EPERM; if (!type->bounds) break; /* @newsid is bounded by @oldsid */ rc = 0; if (type->bounds == old_context->type) break; index = type->bounds; } if (rc) { char *old_name = NULL; char *new_name = NULL; u32 length; if (!context_struct_to_string(old_context, &old_name, &length) && !context_struct_to_string(new_context, &new_name, &length)) { audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR, "op=security_bounded_transition " "result=denied " "oldcontext=%s newcontext=%s", old_name, new_name); } kfree(new_name); kfree(old_name); } out: read_unlock(&policy_rwlock); return rc; } static void avd_init(struct av_decision *avd) { avd->allowed = 0; avd->auditallow = 0; avd->auditdeny = 0xffffffff; avd->seqno = latest_granting; avd->flags = 0; } /** * security_compute_av - Compute access vector decisions. * @ssid: source security identifier * @tsid: target security identifier * @tclass: target security class * @avd: access vector decisions * * Compute a set of access vector decisions based on the * SID pair (@ssid, @tsid) for the permissions in @tclass. */ void security_compute_av(u32 ssid, u32 tsid, u16 orig_tclass, struct av_decision *avd) { u16 tclass; struct context *scontext = NULL, *tcontext = NULL; read_lock(&policy_rwlock); avd_init(avd); if (!ss_initialized) goto allow; scontext = sidtab_search(&sidtab, ssid); if (!scontext) { printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", __func__, ssid); goto out; } /* permissive domain? */ if (ebitmap_get_bit(&policydb.permissive_map, scontext->type)) avd->flags |= AVD_FLAGS_PERMISSIVE; tcontext = sidtab_search(&sidtab, tsid); if (!tcontext) { printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", __func__, tsid); goto out; } tclass = unmap_class(orig_tclass); if (unlikely(orig_tclass && !tclass)) { if (policydb.allow_unknown) goto allow; goto out; } context_struct_compute_av(scontext, tcontext, tclass, avd); map_decision(orig_tclass, avd, policydb.allow_unknown); out: read_unlock(&policy_rwlock); return; allow: avd->allowed = 0xffffffff; goto out; } void security_compute_av_user(u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd) { struct context *scontext = NULL, *tcontext = NULL; read_lock(&policy_rwlock); avd_init(avd); if (!ss_initialized) goto allow; scontext = sidtab_search(&sidtab, ssid); if (!scontext) { printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", __func__, ssid); goto out; } /* permissive domain? */ if (ebitmap_get_bit(&policydb.permissive_map, scontext->type)) avd->flags |= AVD_FLAGS_PERMISSIVE; tcontext = sidtab_search(&sidtab, tsid); if (!tcontext) { printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", __func__, tsid); goto out; } if (unlikely(!tclass)) { if (policydb.allow_unknown) goto allow; goto out; } context_struct_compute_av(scontext, tcontext, tclass, avd); out: read_unlock(&policy_rwlock); return; allow: avd->allowed = 0xffffffff; goto out; } /* * Write the security context string representation of * the context structure `context' into a dynamically * allocated string of the correct size. Set `*scontext' * to point to this string and set `*scontext_len' to * the length of the string. */ static int context_struct_to_string(struct context *context, char **scontext, u32 *scontext_len) { char *scontextp; if (scontext) *scontext = NULL; *scontext_len = 0; if (context->len) { *scontext_len = context->len; if (scontext) { *scontext = kstrdup(context->str, GFP_ATOMIC); if (!(*scontext)) return -ENOMEM; } return 0; } /* Compute the size of the context. */ *scontext_len += strlen(sym_name(&policydb, SYM_USERS, context->user - 1)) + 1; *scontext_len += strlen(sym_name(&policydb, SYM_ROLES, context->role - 1)) + 1; *scontext_len += strlen(sym_name(&policydb, SYM_TYPES, context->type - 1)) + 1; *scontext_len += mls_compute_context_len(context); if (!scontext) return 0; /* Allocate space for the context; caller must free this space. */ scontextp = kmalloc(*scontext_len, GFP_ATOMIC); if (!scontextp) return -ENOMEM; *scontext = scontextp; /* * Copy the user name, role name and type name into the context. */ sprintf(scontextp, "%s:%s:%s", sym_name(&policydb, SYM_USERS, context->user - 1), sym_name(&policydb, SYM_ROLES, context->role - 1), sym_name(&policydb, SYM_TYPES, context->type - 1)); scontextp += strlen(sym_name(&policydb, SYM_USERS, context->user - 1)) + 1 + strlen(sym_name(&policydb, SYM_ROLES, context->role - 1)) + 1 + strlen(sym_name(&policydb, SYM_TYPES, context->type - 1)); mls_sid_to_context(context, &scontextp); *scontextp = 0; return 0; } #include "initial_sid_to_string.h" const char *security_get_initial_sid_context(u32 sid) { if (unlikely(sid > SECINITSID_NUM)) return NULL; return initial_sid_to_string[sid]; } static int security_sid_to_context_core(u32 sid, char **scontext, u32 *scontext_len, int force) { struct context *context; int rc = 0; if (scontext) *scontext = NULL; *scontext_len = 0; if (!ss_initialized) { if (sid <= SECINITSID_NUM) { char *scontextp; *scontext_len = strlen(initial_sid_to_string[sid]) + 1; if (!scontext) goto out; scontextp = kmalloc(*scontext_len, GFP_ATOMIC); if (!scontextp) { rc = -ENOMEM; goto out; } strcpy(scontextp, initial_sid_to_string[sid]); *scontext = scontextp; goto out; } printk(KERN_ERR "SELinux: %s: called before initial " "load_policy on unknown SID %d\n", __func__, sid); rc = -EINVAL; goto out; } read_lock(&policy_rwlock); if (force) context = sidtab_search_force(&sidtab, sid); else context = sidtab_search(&sidtab, sid); if (!context) { printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", __func__, sid); rc = -EINVAL; goto out_unlock; } rc = context_struct_to_string(context, scontext, scontext_len); out_unlock: read_unlock(&policy_rwlock); out: return rc; } /** * security_sid_to_context - Obtain a context for a given SID. * @sid: security identifier, SID * @scontext: security context * @scontext_len: length in bytes * * Write the string representation of the context associated with @sid * into a dynamically allocated string of the correct size. Set @scontext * to point to this string and set @scontext_len to the length of the string. */ int security_sid_to_context(u32 sid, char **scontext, u32 *scontext_len) { return security_sid_to_context_core(sid, scontext, scontext_len, 0); } int security_sid_to_context_force(u32 sid, char **scontext, u32 *scontext_len) { return security_sid_to_context_core(sid, scontext, scontext_len, 1); } /* * Caveat: Mutates scontext. */ static int string_to_context_struct(struct policydb *pol, struct sidtab *sidtabp, char *scontext, u32 scontext_len, struct context *ctx, u32 def_sid) { struct role_datum *role; struct type_datum *typdatum; struct user_datum *usrdatum; char *scontextp, *p, oldc; int rc = 0; context_init(ctx); /* Parse the security context. */ rc = -EINVAL; scontextp = (char *) scontext; /* Extract the user. */ p = scontextp; while (*p && *p != ':') p++; if (*p == 0) goto out; *p++ = 0; usrdatum = hashtab_search(pol->p_users.table, scontextp); if (!usrdatum) goto out; ctx->user = usrdatum->value; /* Extract role. */ scontextp = p; while (*p && *p != ':') p++; if (*p == 0) goto out; *p++ = 0; role = hashtab_search(pol->p_roles.table, scontextp); if (!role) goto out; ctx->role = role->value; /* Extract type. */ scontextp = p; while (*p && *p != ':') p++; oldc = *p; *p++ = 0; typdatum = hashtab_search(pol->p_types.table, scontextp); if (!typdatum || typdatum->attribute) goto out; ctx->type = typdatum->value; rc = mls_context_to_sid(pol, oldc, &p, ctx, sidtabp, def_sid); if (rc) goto out; rc = -EINVAL; if ((p - scontext) < scontext_len) goto out; /* Check the validity of the new context. */ if (!policydb_context_isvalid(pol, ctx)) goto out; rc = 0; out: if (rc) context_destroy(ctx); return rc; } static int security_context_to_sid_core(const char *scontext, u32 scontext_len, u32 *sid, u32 def_sid, gfp_t gfp_flags, int force) { char *scontext2, *str = NULL; struct context context; int rc = 0; if (!ss_initialized) { int i; for (i = 1; i < SECINITSID_NUM; i++) { if (!strcmp(initial_sid_to_string[i], scontext)) { *sid = i; return 0; } } *sid = SECINITSID_KERNEL; return 0; } *sid = SECSID_NULL; /* Copy the string so that we can modify the copy as we parse it. */ scontext2 = kmalloc(scontext_len + 1, gfp_flags); if (!scontext2) return -ENOMEM; memcpy(scontext2, scontext, scontext_len); scontext2[scontext_len] = 0; if (force) { /* Save another copy for storing in uninterpreted form */ rc = -ENOMEM; str = kstrdup(scontext2, gfp_flags); if (!str) goto out; } read_lock(&policy_rwlock); rc = string_to_context_struct(&policydb, &sidtab, scontext2, scontext_len, &context, def_sid); if (rc == -EINVAL && force) { context.str = str; context.len = scontext_len; str = NULL; } else if (rc) goto out_unlock; rc = sidtab_context_to_sid(&sidtab, &context, sid); context_destroy(&context); out_unlock: read_unlock(&policy_rwlock); out: kfree(scontext2); kfree(str); return rc; } /** * security_context_to_sid - Obtain a SID for a given security context. * @scontext: security context * @scontext_len: length in bytes * @sid: security identifier, SID * * Obtains a SID associated with the security context that * has the string representation specified by @scontext. * Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient * memory is available, or 0 on success. */ int security_context_to_sid(const char *scontext, u32 scontext_len, u32 *sid) { return security_context_to_sid_core(scontext, scontext_len, sid, SECSID_NULL, GFP_KERNEL, 0); } /** * security_context_to_sid_default - Obtain a SID for a given security context, * falling back to specified default if needed. * * @scontext: security context * @scontext_len: length in bytes * @sid: security identifier, SID * @def_sid: default SID to assign on error * * Obtains a SID associated with the security context that * has the string representation specified by @scontext. * The default SID is passed to the MLS layer to be used to allow * kernel labeling of the MLS field if the MLS field is not present * (for upgrading to MLS without full relabel). * Implicitly forces adding of the context even if it cannot be mapped yet. * Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient * memory is available, or 0 on success. */ int security_context_to_sid_default(const char *scontext, u32 scontext_len, u32 *sid, u32 def_sid, gfp_t gfp_flags) { return security_context_to_sid_core(scontext, scontext_len, sid, def_sid, gfp_flags, 1); } int security_context_to_sid_force(const char *scontext, u32 scontext_len, u32 *sid) { return security_context_to_sid_core(scontext, scontext_len, sid, SECSID_NULL, GFP_KERNEL, 1); } static int compute_sid_handle_invalid_context( struct context *scontext, struct context *tcontext, u16 tclass, struct context *newcontext) { char *s = NULL, *t = NULL, *n = NULL; u32 slen, tlen, nlen; if (context_struct_to_string(scontext, &s, &slen)) goto out; if (context_struct_to_string(tcontext, &t, &tlen)) goto out; if (context_struct_to_string(newcontext, &n, &nlen)) goto out; audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR, "security_compute_sid: invalid context %s" " for scontext=%s" " tcontext=%s" " tclass=%s", n, s, t, sym_name(&policydb, SYM_CLASSES, tclass-1)); out: kfree(s); kfree(t); kfree(n); if (!selinux_enforcing) return 0; return -EACCES; } static void filename_compute_type(struct policydb *p, struct context *newcontext, u32 stype, u32 ttype, u16 tclass, const char *objname) { struct filename_trans ft; struct filename_trans_datum *otype; /* * Most filename trans rules are going to live in specific directories * like /dev or /var/run. This bitmap will quickly skip rule searches * if the ttype does not contain any rules. */ if (!ebitmap_get_bit(&p->filename_trans_ttypes, ttype)) return; ft.stype = stype; ft.ttype = ttype; ft.tclass = tclass; ft.name = objname; otype = hashtab_search(p->filename_trans, &ft); if (otype) newcontext->type = otype->otype; } static int security_compute_sid(u32 ssid, u32 tsid, u16 orig_tclass, u32 specified, const char *objname, u32 *out_sid, bool kern) { struct class_datum *cladatum = NULL; struct context *scontext = NULL, *tcontext = NULL, newcontext; struct role_trans *roletr = NULL; struct avtab_key avkey; struct avtab_datum *avdatum; struct avtab_node *node; u16 tclass; int rc = 0; bool sock; if (!ss_initialized) { switch (orig_tclass) { case SECCLASS_PROCESS: /* kernel value */ *out_sid = ssid; break; default: *out_sid = tsid; break; } goto out; } context_init(&newcontext); read_lock(&policy_rwlock); if (kern) { tclass = unmap_class(orig_tclass); sock = security_is_socket_class(orig_tclass); } else { tclass = orig_tclass; sock = security_is_socket_class(map_class(tclass)); } scontext = sidtab_search(&sidtab, ssid); if (!scontext) { printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", __func__, ssid); rc = -EINVAL; goto out_unlock; } tcontext = sidtab_search(&sidtab, tsid); if (!tcontext) { printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", __func__, tsid); rc = -EINVAL; goto out_unlock; } if (tclass && tclass <= policydb.p_classes.nprim) cladatum = policydb.class_val_to_struct[tclass - 1]; /* Set the user identity. */ switch (specified) { case AVTAB_TRANSITION: case AVTAB_CHANGE: if (cladatum && cladatum->default_user == DEFAULT_TARGET) { newcontext.user = tcontext->user; } else { /* notice this gets both DEFAULT_SOURCE and unset */ /* Use the process user identity. */ newcontext.user = scontext->user; } break; case AVTAB_MEMBER: /* Use the related object owner. */ newcontext.user = tcontext->user; break; } /* Set the role to default values. */ if (cladatum && cladatum->default_role == DEFAULT_SOURCE) { newcontext.role = scontext->role; } else if (cladatum && cladatum->default_role == DEFAULT_TARGET) { newcontext.role = tcontext->role; } else { if ((tclass == policydb.process_class) || (sock == true)) newcontext.role = scontext->role; else newcontext.role = OBJECT_R_VAL; } /* Set the type to default values. */ if (cladatum && cladatum->default_type == DEFAULT_SOURCE) { newcontext.type = scontext->type; } else if (cladatum && cladatum->default_type == DEFAULT_TARGET) { newcontext.type = tcontext->type; } else { if ((tclass == policydb.process_class) || (sock == true)) { /* Use the type of process. */ newcontext.type = scontext->type; } else { /* Use the type of the related object. */ newcontext.type = tcontext->type; } } /* Look for a type transition/member/change rule. */ avkey.source_type = scontext->type; avkey.target_type = tcontext->type; avkey.target_class = tclass; avkey.specified = specified; avdatum = avtab_search(&policydb.te_avtab, &avkey); /* If no permanent rule, also check for enabled conditional rules */ if (!avdatum) { node = avtab_search_node(&policydb.te_cond_avtab, &avkey); for (; node; node = avtab_search_node_next(node, specified)) { if (node->key.specified & AVTAB_ENABLED) { avdatum = &node->datum; break; } } } if (avdatum) { /* Use the type from the type transition/member/change rule. */ newcontext.type = avdatum->data; } /* if we have a objname this is a file trans check so check those rules */ if (objname) filename_compute_type(&policydb, &newcontext, scontext->type, tcontext->type, tclass, objname); /* Check for class-specific changes. */ if (specified & AVTAB_TRANSITION) { /* Look for a role transition rule. */ for (roletr = policydb.role_tr; roletr; roletr = roletr->next) { if ((roletr->role == scontext->role) && (roletr->type == tcontext->type) && (roletr->tclass == tclass)) { /* Use the role transition rule. */ newcontext.role = roletr->new_role; break; } } } /* Set the MLS attributes. This is done last because it may allocate memory. */ rc = mls_compute_sid(scontext, tcontext, tclass, specified, &newcontext, sock); if (rc) goto out_unlock; /* Check the validity of the context. */ if (!policydb_context_isvalid(&policydb, &newcontext)) { rc = compute_sid_handle_invalid_context(scontext, tcontext, tclass, &newcontext); if (rc) goto out_unlock; } /* Obtain the sid for the context. */ rc = sidtab_context_to_sid(&sidtab, &newcontext, out_sid); out_unlock: read_unlock(&policy_rwlock); context_destroy(&newcontext); out: return rc; } /** * security_transition_sid - Compute the SID for a new subject/object. * @ssid: source security identifier * @tsid: target security identifier * @tclass: target security class * @out_sid: security identifier for new subject/object * * Compute a SID to use for labeling a new subject or object in the * class @tclass based on a SID pair (@ssid, @tsid). * Return -%EINVAL if any of the parameters are invalid, -%ENOMEM * if insufficient memory is available, or %0 if the new SID was * computed successfully. */ int security_transition_sid(u32 ssid, u32 tsid, u16 tclass, const struct qstr *qstr, u32 *out_sid) { return security_compute_sid(ssid, tsid, tclass, AVTAB_TRANSITION, qstr ? qstr->name : NULL, out_sid, true); } int security_transition_sid_user(u32 ssid, u32 tsid, u16 tclass, const char *objname, u32 *out_sid) { return security_compute_sid(ssid, tsid, tclass, AVTAB_TRANSITION, objname, out_sid, false); } /** * security_member_sid - Compute the SID for member selection. * @ssid: source security identifier * @tsid: target security identifier * @tclass: target security class * @out_sid: security identifier for selected member * * Compute a SID to use when selecting a member of a polyinstantiated * object of class @tclass based on a SID pair (@ssid, @tsid). * Return -%EINVAL if any of the parameters are invalid, -%ENOMEM * if insufficient memory is available, or %0 if the SID was * computed successfully. */ int security_member_sid(u32 ssid, u32 tsid, u16 tclass, u32 *out_sid) { return security_compute_sid(ssid, tsid, tclass, AVTAB_MEMBER, NULL, out_sid, false); } /** * security_change_sid - Compute the SID for object relabeling. * @ssid: source security identifier * @tsid: target security identifier * @tclass: target security class * @out_sid: security identifier for selected member * * Compute a SID to use for relabeling an object of class @tclass * based on a SID pair (@ssid, @tsid). * Return -%EINVAL if any of the parameters are invalid, -%ENOMEM * if insufficient memory is available, or %0 if the SID was * computed successfully. */ int security_change_sid(u32 ssid, u32 tsid, u16 tclass, u32 *out_sid) { return security_compute_sid(ssid, tsid, tclass, AVTAB_CHANGE, NULL, out_sid, false); } /* Clone the SID into the new SID table. */ static int clone_sid(u32 sid, struct context *context, void *arg) { struct sidtab *s = arg; if (sid > SECINITSID_NUM) return sidtab_insert(s, sid, context); else return 0; } static inline int convert_context_handle_invalid_context(struct context *context) { char *s; u32 len; if (selinux_enforcing) return -EINVAL; if (!context_struct_to_string(context, &s, &len)) { printk(KERN_WARNING "SELinux: Context %s would be invalid if enforcing\n", s); kfree(s); } return 0; } struct convert_context_args { struct policydb *oldp; struct policydb *newp; }; /* * Convert the values in the security context * structure `c' from the values specified * in the policy `p->oldp' to the values specified * in the policy `p->newp'. Verify that the * context is valid under the new policy. */ static int convert_context(u32 key, struct context *c, void *p) { struct convert_context_args *args; struct context oldc; struct ocontext *oc; struct mls_range *range; struct role_datum *role; struct type_datum *typdatum; struct user_datum *usrdatum; char *s; u32 len; int rc = 0; if (key <= SECINITSID_NUM) goto out; args = p; if (c->str) { struct context ctx; rc = -ENOMEM; s = kstrdup(c->str, GFP_KERNEL); if (!s) goto out; rc = string_to_context_struct(args->newp, NULL, s, c->len, &ctx, SECSID_NULL); kfree(s); if (!rc) { printk(KERN_INFO "SELinux: Context %s became valid (mapped).\n", c->str); /* Replace string with mapped representation. */ kfree(c->str); memcpy(c, &ctx, sizeof(*c)); goto out; } else if (rc == -EINVAL) { /* Retain string representation for later mapping. */ rc = 0; goto out; } else { /* Other error condition, e.g. ENOMEM. */ printk(KERN_ERR "SELinux: Unable to map context %s, rc = %d.\n", c->str, -rc); goto out; } } rc = context_cpy(&oldc, c); if (rc) goto out; /* Convert the user. */ rc = -EINVAL; usrdatum = hashtab_search(args->newp->p_users.table, sym_name(args->oldp, SYM_USERS, c->user - 1)); if (!usrdatum) goto bad; c->user = usrdatum->value; /* Convert the role. */ rc = -EINVAL; role = hashtab_search(args->newp->p_roles.table, sym_name(args->oldp, SYM_ROLES, c->role - 1)); if (!role) goto bad; c->role = role->value; /* Convert the type. */ rc = -EINVAL; typdatum = hashtab_search(args->newp->p_types.table, sym_name(args->oldp, SYM_TYPES, c->type - 1)); if (!typdatum) goto bad; c->type = typdatum->value; /* Convert the MLS fields if dealing with MLS policies */ if (args->oldp->mls_enabled && args->newp->mls_enabled) { rc = mls_convert_context(args->oldp, args->newp, c); if (rc) goto bad; } else if (args->oldp->mls_enabled && !args->newp->mls_enabled) { /* * Switching between MLS and non-MLS policy: * free any storage used by the MLS fields in the * context for all existing entries in the sidtab. */ mls_context_destroy(c); } else if (!args->oldp->mls_enabled && args->newp->mls_enabled) { /* * Switching between non-MLS and MLS policy: * ensure that the MLS fields of the context for all * existing entries in the sidtab are filled in with a * suitable default value, likely taken from one of the * initial SIDs. */ oc = args->newp->ocontexts[OCON_ISID]; while (oc && oc->sid[0] != SECINITSID_UNLABELED) oc = oc->next; rc = -EINVAL; if (!oc) { printk(KERN_ERR "SELinux: unable to look up" " the initial SIDs list\n"); goto bad; } range = &oc->context[0].range; rc = mls_range_set(c, range); if (rc) goto bad; } /* Check the validity of the new context. */ if (!policydb_context_isvalid(args->newp, c)) { rc = convert_context_handle_invalid_context(&oldc); if (rc) goto bad; } context_destroy(&oldc); rc = 0; out: return rc; bad: /* Map old representation to string and save it. */ rc = context_struct_to_string(&oldc, &s, &len); if (rc) return rc; context_destroy(&oldc); context_destroy(c); c->str = s; c->len = len; printk(KERN_INFO "SELinux: Context %s became invalid (unmapped).\n", c->str); rc = 0; goto out; } static void security_load_policycaps(void) { selinux_policycap_netpeer = ebitmap_get_bit(&policydb.policycaps, POLICYDB_CAPABILITY_NETPEER); selinux_policycap_openperm = ebitmap_get_bit(&policydb.policycaps, POLICYDB_CAPABILITY_OPENPERM); selinux_policycap_alwaysnetwork = ebitmap_get_bit(&policydb.policycaps, POLICYDB_CAPABILITY_ALWAYSNETWORK); } static int security_preserve_bools(struct policydb *p); /** * security_load_policy - Load a security policy configuration. * @data: binary policy data * @len: length of data in bytes * * Load a new set of security policy configuration data, * validate it and convert the SID table as necessary. * This function will flush the access vector cache after * loading the new policy. */ int security_load_policy(void *data, size_t len) { struct policydb *oldpolicydb, *newpolicydb; struct sidtab oldsidtab, newsidtab; struct selinux_mapping *oldmap, *map = NULL; struct convert_context_args args; u32 seqno; u16 map_size; int rc = 0; struct policy_file file = { data, len }, *fp = &file; oldpolicydb = kzalloc(2 * sizeof(*oldpolicydb), GFP_KERNEL); if (!oldpolicydb) { rc = -ENOMEM; goto out; } newpolicydb = oldpolicydb + 1; if (!ss_initialized) { avtab_cache_init(); rc = policydb_read(&policydb, fp); if (rc) { avtab_cache_destroy(); goto out; } policydb.len = len; rc = selinux_set_mapping(&policydb, secclass_map, &current_mapping, &current_mapping_size); if (rc) { policydb_destroy(&policydb); avtab_cache_destroy(); goto out; } rc = policydb_load_isids(&policydb, &sidtab); if (rc) { policydb_destroy(&policydb); avtab_cache_destroy(); goto out; } security_load_policycaps(); ss_initialized = 1; seqno = ++latest_granting; selinux_complete_init(); avc_ss_reset(seqno); selnl_notify_policyload(seqno); selinux_status_update_policyload(seqno); selinux_netlbl_cache_invalidate(); selinux_xfrm_notify_policyload(); goto out; } #if 0 sidtab_hash_eval(&sidtab, "sids"); #endif rc = policydb_read(newpolicydb, fp); if (rc) goto out; newpolicydb->len = len; /* If switching between different policy types, log MLS status */ if (policydb.mls_enabled && !newpolicydb->mls_enabled) printk(KERN_INFO "SELinux: Disabling MLS support...\n"); else if (!policydb.mls_enabled && newpolicydb->mls_enabled) printk(KERN_INFO "SELinux: Enabling MLS support...\n"); rc = policydb_load_isids(newpolicydb, &newsidtab); if (rc) { printk(KERN_ERR "SELinux: unable to load the initial SIDs\n"); policydb_destroy(newpolicydb); goto out; } rc = selinux_set_mapping(newpolicydb, secclass_map, &map, &map_size); if (rc) goto err; rc = security_preserve_bools(newpolicydb); if (rc) { printk(KERN_ERR "SELinux: unable to preserve booleans\n"); goto err; } /* Clone the SID table. */ sidtab_shutdown(&sidtab); rc = sidtab_map(&sidtab, clone_sid, &newsidtab); if (rc) goto err; /* * Convert the internal representations of contexts * in the new SID table. */ args.oldp = &policydb; args.newp = newpolicydb; rc = sidtab_map(&newsidtab, convert_context, &args); if (rc) { printk(KERN_ERR "SELinux: unable to convert the internal" " representation of contexts in the new SID" " table\n"); goto err; } /* Save the old policydb and SID table to free later. */ memcpy(oldpolicydb, &policydb, sizeof(policydb)); sidtab_set(&oldsidtab, &sidtab); /* Install the new policydb and SID table. */ write_lock_irq(&policy_rwlock); memcpy(&policydb, newpolicydb, sizeof(policydb)); sidtab_set(&sidtab, &newsidtab); security_load_policycaps(); oldmap = current_mapping; current_mapping = map; current_mapping_size = map_size; seqno = ++latest_granting; write_unlock_irq(&policy_rwlock); /* Free the old policydb and SID table. */ policydb_destroy(oldpolicydb); sidtab_destroy(&oldsidtab); kfree(oldmap); avc_ss_reset(seqno); selnl_notify_policyload(seqno); selinux_status_update_policyload(seqno); selinux_netlbl_cache_invalidate(); selinux_xfrm_notify_policyload(); rc = 0; goto out; err: kfree(map); sidtab_destroy(&newsidtab); policydb_destroy(newpolicydb); out: kfree(oldpolicydb); return rc; } size_t security_policydb_len(void) { size_t len; read_lock(&policy_rwlock); len = policydb.len; read_unlock(&policy_rwlock); return len; } /** * security_port_sid - Obtain the SID for a port. * @protocol: protocol number * @port: port number * @out_sid: security identifier */ int security_port_sid(u8 protocol, u16 port, u32 *out_sid) { struct ocontext *c; int rc = 0; read_lock(&policy_rwlock); c = policydb.ocontexts[OCON_PORT]; while (c) { if (c->u.port.protocol == protocol && c->u.port.low_port <= port && c->u.port.high_port >= port) break; c = c->next; } if (c) { if (!c->sid[0]) { rc = sidtab_context_to_sid(&sidtab, &c->context[0], &c->sid[0]); if (rc) goto out; } *out_sid = c->sid[0]; } else { *out_sid = SECINITSID_PORT; } out: read_unlock(&policy_rwlock); return rc; } /** * security_netif_sid - Obtain the SID for a network interface. * @name: interface name * @if_sid: interface SID */ int security_netif_sid(char *name, u32 *if_sid) { int rc = 0; struct ocontext *c; read_lock(&policy_rwlock); c = policydb.ocontexts[OCON_NETIF]; while (c) { if (strcmp(name, c->u.name) == 0) break; c = c->next; } if (c) { if (!c->sid[0] || !c->sid[1]) { rc = sidtab_context_to_sid(&sidtab, &c->context[0], &c->sid[0]); if (rc) goto out; rc = sidtab_context_to_sid(&sidtab, &c->context[1], &c->sid[1]); if (rc) goto out; } *if_sid = c->sid[0]; } else *if_sid = SECINITSID_NETIF; out: read_unlock(&policy_rwlock); return rc; } static int match_ipv6_addrmask(u32 *input, u32 *addr, u32 *mask) { int i, fail = 0; for (i = 0; i < 4; i++) if (addr[i] != (input[i] & mask[i])) { fail = 1; break; } return !fail; } /** * security_node_sid - Obtain the SID for a node (host). * @domain: communication domain aka address family * @addrp: address * @addrlen: address length in bytes * @out_sid: security identifier */ int security_node_sid(u16 domain, void *addrp, u32 addrlen, u32 *out_sid) { int rc; struct ocontext *c; read_lock(&policy_rwlock); switch (domain) { case AF_INET: { u32 addr; rc = -EINVAL; if (addrlen != sizeof(u32)) goto out; addr = *((u32 *)addrp); c = policydb.ocontexts[OCON_NODE]; while (c) { if (c->u.node.addr == (addr & c->u.node.mask)) break; c = c->next; } break; } case AF_INET6: rc = -EINVAL; if (addrlen != sizeof(u64) * 2) goto out; c = policydb.ocontexts[OCON_NODE6]; while (c) { if (match_ipv6_addrmask(addrp, c->u.node6.addr, c->u.node6.mask)) break; c = c->next; } break; default: rc = 0; *out_sid = SECINITSID_NODE; goto out; } if (c) { if (!c->sid[0]) { rc = sidtab_context_to_sid(&sidtab, &c->context[0], &c->sid[0]); if (rc) goto out; } *out_sid = c->sid[0]; } else { *out_sid = SECINITSID_NODE; } rc = 0; out: read_unlock(&policy_rwlock); return rc; } #define SIDS_NEL 25 /** * security_get_user_sids - Obtain reachable SIDs for a user. * @fromsid: starting SID * @username: username * @sids: array of reachable SIDs for user * @nel: number of elements in @sids * * Generate the set of SIDs for legal security contexts * for a given user that can be reached by @fromsid. * Set *@sids to point to a dynamically allocated * array containing the set of SIDs. Set *@nel to the * number of elements in the array. */ int security_get_user_sids(u32 fromsid, char *username, u32 **sids, u32 *nel) { struct context *fromcon, usercon; u32 *mysids = NULL, *mysids2, sid; u32 mynel = 0, maxnel = SIDS_NEL; struct user_datum *user; struct role_datum *role; struct ebitmap_node *rnode, *tnode; int rc = 0, i, j; *sids = NULL; *nel = 0; if (!ss_initialized) goto out; read_lock(&policy_rwlock); context_init(&usercon); rc = -EINVAL; fromcon = sidtab_search(&sidtab, fromsid); if (!fromcon) goto out_unlock; rc = -EINVAL; user = hashtab_search(policydb.p_users.table, username); if (!user) goto out_unlock; usercon.user = user->value; rc = -ENOMEM; mysids = kcalloc(maxnel, sizeof(*mysids), GFP_ATOMIC); if (!mysids) goto out_unlock; ebitmap_for_each_positive_bit(&user->roles, rnode, i) { role = policydb.role_val_to_struct[i]; usercon.role = i + 1; ebitmap_for_each_positive_bit(&role->types, tnode, j) { usercon.type = j + 1; if (mls_setup_user_range(fromcon, user, &usercon)) continue; rc = sidtab_context_to_sid(&sidtab, &usercon, &sid); if (rc) goto out_unlock; if (mynel < maxnel) { mysids[mynel++] = sid; } else { rc = -ENOMEM; maxnel += SIDS_NEL; mysids2 = kcalloc(maxnel, sizeof(*mysids2), GFP_ATOMIC); if (!mysids2) goto out_unlock; memcpy(mysids2, mysids, mynel * sizeof(*mysids2)); kfree(mysids); mysids = mysids2; mysids[mynel++] = sid; } } } rc = 0; out_unlock: read_unlock(&policy_rwlock); if (rc || !mynel) { kfree(mysids); goto out; } rc = -ENOMEM; mysids2 = kcalloc(mynel, sizeof(*mysids2), GFP_KERNEL); if (!mysids2) { kfree(mysids); goto out; } for (i = 0, j = 0; i < mynel; i++) { struct av_decision dummy_avd; rc = avc_has_perm_noaudit(fromsid, mysids[i], SECCLASS_PROCESS, /* kernel value */ PROCESS__TRANSITION, AVC_STRICT, &dummy_avd); if (!rc) mysids2[j++] = mysids[i]; cond_resched(); } rc = 0; kfree(mysids); *sids = mysids2; *nel = j; out: return rc; } /** * security_genfs_sid - Obtain a SID for a file in a filesystem * @fstype: filesystem type * @path: path from root of mount * @sclass: file security class * @sid: SID for path * * Obtain a SID to use for a file in a filesystem that * cannot support xattr or use a fixed labeling behavior like * transition SIDs or task SIDs. */ int security_genfs_sid(const char *fstype, char *path, u16 orig_sclass, u32 *sid) { int len; u16 sclass; struct genfs *genfs; struct ocontext *c; int rc, cmp = 0; while (path[0] == '/' && path[1] == '/') path++; read_lock(&policy_rwlock); sclass = unmap_class(orig_sclass); *sid = SECINITSID_UNLABELED; for (genfs = policydb.genfs; genfs; genfs = genfs->next) { cmp = strcmp(fstype, genfs->fstype); if (cmp <= 0) break; } rc = -ENOENT; if (!genfs || cmp) goto out; for (c = genfs->head; c; c = c->next) { len = strlen(c->u.name); if ((!c->v.sclass || sclass == c->v.sclass) && (strncmp(c->u.name, path, len) == 0)) break; } rc = -ENOENT; if (!c) goto out; if (!c->sid[0]) { rc = sidtab_context_to_sid(&sidtab, &c->context[0], &c->sid[0]); if (rc) goto out; } *sid = c->sid[0]; rc = 0; out: read_unlock(&policy_rwlock); return rc; } /** * security_fs_use - Determine how to handle labeling for a filesystem. * @sb: superblock in question */ int security_fs_use(struct super_block *sb) { int rc = 0; struct ocontext *c; struct superblock_security_struct *sbsec = sb->s_security; const char *fstype = sb->s_type->name; read_lock(&policy_rwlock); c = policydb.ocontexts[OCON_FSUSE]; while (c) { if (strcmp(fstype, c->u.name) == 0) break; c = c->next; } if (c) { sbsec->behavior = c->v.behavior; if (!c->sid[0]) { rc = sidtab_context_to_sid(&sidtab, &c->context[0], &c->sid[0]); if (rc) goto out; } sbsec->sid = c->sid[0]; } else { rc = security_genfs_sid(fstype, "/", SECCLASS_DIR, &sbsec->sid); if (rc) { sbsec->behavior = SECURITY_FS_USE_NONE; rc = 0; } else { sbsec->behavior = SECURITY_FS_USE_GENFS; } } out: read_unlock(&policy_rwlock); return rc; } int security_get_bools(int *len, char ***names, int **values) { int i, rc; read_lock(&policy_rwlock); *names = NULL; *values = NULL; rc = 0; *len = policydb.p_bools.nprim; if (!*len) goto out; rc = -ENOMEM; *names = kcalloc(*len, sizeof(char *), GFP_ATOMIC); if (!*names) goto err; rc = -ENOMEM; *values = kcalloc(*len, sizeof(int), GFP_ATOMIC); if (!*values) goto err; for (i = 0; i < *len; i++) { size_t name_len; (*values)[i] = policydb.bool_val_to_struct[i]->state; name_len = strlen(sym_name(&policydb, SYM_BOOLS, i)) + 1; rc = -ENOMEM; (*names)[i] = kmalloc(sizeof(char) * name_len, GFP_ATOMIC); if (!(*names)[i]) goto err; strncpy((*names)[i], sym_name(&policydb, SYM_BOOLS, i), name_len); (*names)[i][name_len - 1] = 0; } rc = 0; out: read_unlock(&policy_rwlock); return rc; err: if (*names) { for (i = 0; i < *len; i++) kfree((*names)[i]); } kfree(*values); goto out; } int security_set_bools(int len, int *values) { int i, rc; int lenp, seqno = 0; struct cond_node *cur; write_lock_irq(&policy_rwlock); rc = -EFAULT; lenp = policydb.p_bools.nprim; if (len != lenp) goto out; for (i = 0; i < len; i++) { if (!!values[i] != policydb.bool_val_to_struct[i]->state) { audit_log(current->audit_context, GFP_ATOMIC, AUDIT_MAC_CONFIG_CHANGE, "bool=%s val=%d old_val=%d auid=%u ses=%u", sym_name(&policydb, SYM_BOOLS, i), !!values[i], policydb.bool_val_to_struct[i]->state, from_kuid(&init_user_ns, audit_get_loginuid(current)), audit_get_sessionid(current)); } if (values[i]) policydb.bool_val_to_struct[i]->state = 1; else policydb.bool_val_to_struct[i]->state = 0; } for (cur = policydb.cond_list; cur; cur = cur->next) { rc = evaluate_cond_node(&policydb, cur); if (rc) goto out; } seqno = ++latest_granting; rc = 0; out: write_unlock_irq(&policy_rwlock); if (!rc) { avc_ss_reset(seqno); selnl_notify_policyload(seqno); selinux_status_update_policyload(seqno); selinux_xfrm_notify_policyload(); } return rc; } int security_get_bool_value(int bool) { int rc; int len; read_lock(&policy_rwlock); rc = -EFAULT; len = policydb.p_bools.nprim; if (bool >= len) goto out; rc = policydb.bool_val_to_struct[bool]->state; out: read_unlock(&policy_rwlock); return rc; } static int security_preserve_bools(struct policydb *p) { int rc, nbools = 0, *bvalues = NULL, i; char **bnames = NULL; struct cond_bool_datum *booldatum; struct cond_node *cur; rc = security_get_bools(&nbools, &bnames, &bvalues); if (rc) goto out; for (i = 0; i < nbools; i++) { booldatum = hashtab_search(p->p_bools.table, bnames[i]); if (booldatum) booldatum->state = bvalues[i]; } for (cur = p->cond_list; cur; cur = cur->next) { rc = evaluate_cond_node(p, cur); if (rc) goto out; } out: if (bnames) { for (i = 0; i < nbools; i++) kfree(bnames[i]); } kfree(bnames); kfree(bvalues); return rc; } /* * security_sid_mls_copy() - computes a new sid based on the given * sid and the mls portion of mls_sid. */ int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid) { struct context *context1; struct context *context2; struct context newcon; char *s; u32 len; int rc; rc = 0; if (!ss_initialized || !policydb.mls_enabled) { *new_sid = sid; goto out; } context_init(&newcon); read_lock(&policy_rwlock); rc = -EINVAL; context1 = sidtab_search(&sidtab, sid); if (!context1) { printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", __func__, sid); goto out_unlock; } rc = -EINVAL; context2 = sidtab_search(&sidtab, mls_sid); if (!context2) { printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", __func__, mls_sid); goto out_unlock; } newcon.user = context1->user; newcon.role = context1->role; newcon.type = context1->type; rc = mls_context_cpy(&newcon, context2); if (rc) goto out_unlock; /* Check the validity of the new context. */ if (!policydb_context_isvalid(&policydb, &newcon)) { rc = convert_context_handle_invalid_context(&newcon); if (rc) { if (!context_struct_to_string(&newcon, &s, &len)) { audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR, "security_sid_mls_copy: invalid context %s", s); kfree(s); } goto out_unlock; } } rc = sidtab_context_to_sid(&sidtab, &newcon, new_sid); out_unlock: read_unlock(&policy_rwlock); context_destroy(&newcon); out: return rc; } /** * security_net_peersid_resolve - Compare and resolve two network peer SIDs * @nlbl_sid: NetLabel SID * @nlbl_type: NetLabel labeling protocol type * @xfrm_sid: XFRM SID * * Description: * Compare the @nlbl_sid and @xfrm_sid values and if the two SIDs can be * resolved into a single SID it is returned via @peer_sid and the function * returns zero. Otherwise @peer_sid is set to SECSID_NULL and the function * returns a negative value. A table summarizing the behavior is below: * * | function return | @sid * ------------------------------+-----------------+----------------- * no peer labels | 0 | SECSID_NULL * single peer label | 0 | <peer_label> * multiple, consistent labels | 0 | <peer_label> * multiple, inconsistent labels | -<errno> | SECSID_NULL * */ int security_net_peersid_resolve(u32 nlbl_sid, u32 nlbl_type, u32 xfrm_sid, u32 *peer_sid) { int rc; struct context *nlbl_ctx; struct context *xfrm_ctx; *peer_sid = SECSID_NULL; /* handle the common (which also happens to be the set of easy) cases * right away, these two if statements catch everything involving a * single or absent peer SID/label */ if (xfrm_sid == SECSID_NULL) { *peer_sid = nlbl_sid; return 0; } /* NOTE: an nlbl_type == NETLBL_NLTYPE_UNLABELED is a "fallback" label * and is treated as if nlbl_sid == SECSID_NULL when a XFRM SID/label * is present */ if (nlbl_sid == SECSID_NULL || nlbl_type == NETLBL_NLTYPE_UNLABELED) { *peer_sid = xfrm_sid; return 0; } /* we don't need to check ss_initialized here since the only way both * nlbl_sid and xfrm_sid are not equal to SECSID_NULL would be if the * security server was initialized and ss_initialized was true */ if (!policydb.mls_enabled) return 0; read_lock(&policy_rwlock); rc = -EINVAL; nlbl_ctx = sidtab_search(&sidtab, nlbl_sid); if (!nlbl_ctx) { printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", __func__, nlbl_sid); goto out; } rc = -EINVAL; xfrm_ctx = sidtab_search(&sidtab, xfrm_sid); if (!xfrm_ctx) { printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", __func__, xfrm_sid); goto out; } rc = (mls_context_cmp(nlbl_ctx, xfrm_ctx) ? 0 : -EACCES); if (rc) goto out; /* at present NetLabel SIDs/labels really only carry MLS * information so if the MLS portion of the NetLabel SID * matches the MLS portion of the labeled XFRM SID/label * then pass along the XFRM SID as it is the most * expressive */ *peer_sid = xfrm_sid; out: read_unlock(&policy_rwlock); return rc; } static int get_classes_callback(void *k, void *d, void *args) { struct class_datum *datum = d; char *name = k, **classes = args; int value = datum->value - 1; classes[value] = kstrdup(name, GFP_ATOMIC); if (!classes[value]) return -ENOMEM; return 0; } int security_get_classes(char ***classes, int *nclasses) { int rc; read_lock(&policy_rwlock); rc = -ENOMEM; *nclasses = policydb.p_classes.nprim; *classes = kcalloc(*nclasses, sizeof(**classes), GFP_ATOMIC); if (!*classes) goto out; rc = hashtab_map(policydb.p_classes.table, get_classes_callback, *classes); if (rc) { int i; for (i = 0; i < *nclasses; i++) kfree((*classes)[i]); kfree(*classes); } out: read_unlock(&policy_rwlock); return rc; } static int get_permissions_callback(void *k, void *d, void *args) { struct perm_datum *datum = d; char *name = k, **perms = args; int value = datum->value - 1; perms[value] = kstrdup(name, GFP_ATOMIC); if (!perms[value]) return -ENOMEM; return 0; } int security_get_permissions(char *class, char ***perms, int *nperms) { int rc, i; struct class_datum *match; read_lock(&policy_rwlock); rc = -EINVAL; match = hashtab_search(policydb.p_classes.table, class); if (!match) { printk(KERN_ERR "SELinux: %s: unrecognized class %s\n", __func__, class); goto out; } rc = -ENOMEM; *nperms = match->permissions.nprim; *perms = kcalloc(*nperms, sizeof(**perms), GFP_ATOMIC); if (!*perms) goto out; if (match->comdatum) { rc = hashtab_map(match->comdatum->permissions.table, get_permissions_callback, *perms); if (rc) goto err; } rc = hashtab_map(match->permissions.table, get_permissions_callback, *perms); if (rc) goto err; out: read_unlock(&policy_rwlock); return rc; err: read_unlock(&policy_rwlock); for (i = 0; i < *nperms; i++) kfree((*perms)[i]); kfree(*perms); return rc; } int security_get_reject_unknown(void) { return policydb.reject_unknown; } int security_get_allow_unknown(void) { return policydb.allow_unknown; } /** * security_policycap_supported - Check for a specific policy capability * @req_cap: capability * * Description: * This function queries the currently loaded policy to see if it supports the * capability specified by @req_cap. Returns true (1) if the capability is * supported, false (0) if it isn't supported. * */ int security_policycap_supported(unsigned int req_cap) { int rc; read_lock(&policy_rwlock); rc = ebitmap_get_bit(&policydb.policycaps, req_cap); read_unlock(&policy_rwlock); return rc; } struct selinux_audit_rule { u32 au_seqno; struct context au_ctxt; }; void selinux_audit_rule_free(void *vrule) { struct selinux_audit_rule *rule = vrule; if (rule) { context_destroy(&rule->au_ctxt); kfree(rule); } } int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule) { struct selinux_audit_rule *tmprule; struct role_datum *roledatum; struct type_datum *typedatum; struct user_datum *userdatum; struct selinux_audit_rule **rule = (struct selinux_audit_rule **)vrule; int rc = 0; *rule = NULL; if (!ss_initialized) return -EOPNOTSUPP; switch (field) { case AUDIT_SUBJ_USER: case AUDIT_SUBJ_ROLE: case AUDIT_SUBJ_TYPE: case AUDIT_OBJ_USER: case AUDIT_OBJ_ROLE: case AUDIT_OBJ_TYPE: /* only 'equals' and 'not equals' fit user, role, and type */ if (op != Audit_equal && op != Audit_not_equal) return -EINVAL; break; case AUDIT_SUBJ_SEN: case AUDIT_SUBJ_CLR: case AUDIT_OBJ_LEV_LOW: case AUDIT_OBJ_LEV_HIGH: /* we do not allow a range, indicated by the presence of '-' */ if (strchr(rulestr, '-')) return -EINVAL; break; default: /* only the above fields are valid */ return -EINVAL; } tmprule = kzalloc(sizeof(struct selinux_audit_rule), GFP_KERNEL); if (!tmprule) return -ENOMEM; context_init(&tmprule->au_ctxt); read_lock(&policy_rwlock); tmprule->au_seqno = latest_granting; switch (field) { case AUDIT_SUBJ_USER: case AUDIT_OBJ_USER: rc = -EINVAL; userdatum = hashtab_search(policydb.p_users.table, rulestr); if (!userdatum) goto out; tmprule->au_ctxt.user = userdatum->value; break; case AUDIT_SUBJ_ROLE: case AUDIT_OBJ_ROLE: rc = -EINVAL; roledatum = hashtab_search(policydb.p_roles.table, rulestr); if (!roledatum) goto out; tmprule->au_ctxt.role = roledatum->value; break; case AUDIT_SUBJ_TYPE: case AUDIT_OBJ_TYPE: rc = -EINVAL; typedatum = hashtab_search(policydb.p_types.table, rulestr); if (!typedatum) goto out; tmprule->au_ctxt.type = typedatum->value; break; case AUDIT_SUBJ_SEN: case AUDIT_SUBJ_CLR: case AUDIT_OBJ_LEV_LOW: case AUDIT_OBJ_LEV_HIGH: rc = mls_from_string(rulestr, &tmprule->au_ctxt, GFP_ATOMIC); if (rc) goto out; break; } rc = 0; out: read_unlock(&policy_rwlock); if (rc) { selinux_audit_rule_free(tmprule); tmprule = NULL; } *rule = tmprule; return rc; } /* Check to see if the rule contains any selinux fields */ int selinux_audit_rule_known(struct audit_krule *rule) { int i; for (i = 0; i < rule->field_count; i++) { struct audit_field *f = &rule->fields[i]; switch (f->type) { case AUDIT_SUBJ_USER: case AUDIT_SUBJ_ROLE: case AUDIT_SUBJ_TYPE: case AUDIT_SUBJ_SEN: case AUDIT_SUBJ_CLR: case AUDIT_OBJ_USER: case AUDIT_OBJ_ROLE: case AUDIT_OBJ_TYPE: case AUDIT_OBJ_LEV_LOW: case AUDIT_OBJ_LEV_HIGH: return 1; } } return 0; } int selinux_audit_rule_match(u32 sid, u32 field, u32 op, void *vrule, struct audit_context *actx) { struct context *ctxt; struct mls_level *level; struct selinux_audit_rule *rule = vrule; int match = 0; if (!rule) { audit_log(actx, GFP_ATOMIC, AUDIT_SELINUX_ERR, "selinux_audit_rule_match: missing rule\n"); return -ENOENT; } read_lock(&policy_rwlock); if (rule->au_seqno < latest_granting) { audit_log(actx, GFP_ATOMIC, AUDIT_SELINUX_ERR, "selinux_audit_rule_match: stale rule\n"); match = -ESTALE; goto out; } ctxt = sidtab_search(&sidtab, sid); if (!ctxt) { audit_log(actx, GFP_ATOMIC, AUDIT_SELINUX_ERR, "selinux_audit_rule_match: unrecognized SID %d\n", sid); match = -ENOENT; goto out; } /* a field/op pair that is not caught here will simply fall through without a match */ switch (field) { case AUDIT_SUBJ_USER: case AUDIT_OBJ_USER: switch (op) { case Audit_equal: match = (ctxt->user == rule->au_ctxt.user); break; case Audit_not_equal: match = (ctxt->user != rule->au_ctxt.user); break; } break; case AUDIT_SUBJ_ROLE: case AUDIT_OBJ_ROLE: switch (op) { case Audit_equal: match = (ctxt->role == rule->au_ctxt.role); break; case Audit_not_equal: match = (ctxt->role != rule->au_ctxt.role); break; } break; case AUDIT_SUBJ_TYPE: case AUDIT_OBJ_TYPE: switch (op) { case Audit_equal: match = (ctxt->type == rule->au_ctxt.type); break; case Audit_not_equal: match = (ctxt->type != rule->au_ctxt.type); break; } break; case AUDIT_SUBJ_SEN: case AUDIT_SUBJ_CLR: case AUDIT_OBJ_LEV_LOW: case AUDIT_OBJ_LEV_HIGH: level = ((field == AUDIT_SUBJ_SEN || field == AUDIT_OBJ_LEV_LOW) ? &ctxt->range.level[0] : &ctxt->range.level[1]); switch (op) { case Audit_equal: match = mls_level_eq(&rule->au_ctxt.range.level[0], level); break; case Audit_not_equal: match = !mls_level_eq(&rule->au_ctxt.range.level[0], level); break; case Audit_lt: match = (mls_level_dom(&rule->au_ctxt.range.level[0], level) && !mls_level_eq(&rule->au_ctxt.range.level[0], level)); break; case Audit_le: match = mls_level_dom(&rule->au_ctxt.range.level[0], level); break; case Audit_gt: match = (mls_level_dom(level, &rule->au_ctxt.range.level[0]) && !mls_level_eq(level, &rule->au_ctxt.range.level[0])); break; case Audit_ge: match = mls_level_dom(level, &rule->au_ctxt.range.level[0]); break; } } out: read_unlock(&policy_rwlock); return match; } static int (*aurule_callback)(void) = audit_update_lsm_rules; static int aurule_avc_callback(u32 event) { int err = 0; if (event == AVC_CALLBACK_RESET && aurule_callback) err = aurule_callback(); return err; } static int __init aurule_init(void) { int err; err = avc_add_callback(aurule_avc_callback, AVC_CALLBACK_RESET); if (err) panic("avc_add_callback() failed, error %d\n", err); return err; } __initcall(aurule_init); #ifdef CONFIG_NETLABEL /** * security_netlbl_cache_add - Add an entry to the NetLabel cache * @secattr: the NetLabel packet security attributes * @sid: the SELinux SID * * Description: * Attempt to cache the context in @ctx, which was derived from the packet in * @skb, in the NetLabel subsystem cache. This function assumes @secattr has * already been initialized. * */ static void security_netlbl_cache_add(struct netlbl_lsm_secattr *secattr, u32 sid) { u32 *sid_cache; sid_cache = kmalloc(sizeof(*sid_cache), GFP_ATOMIC); if (sid_cache == NULL) return; secattr->cache = netlbl_secattr_cache_alloc(GFP_ATOMIC); if (secattr->cache == NULL) { kfree(sid_cache); return; } *sid_cache = sid; secattr->cache->free = kfree; secattr->cache->data = sid_cache; secattr->flags |= NETLBL_SECATTR_CACHE; } /** * security_netlbl_secattr_to_sid - Convert a NetLabel secattr to a SELinux SID * @secattr: the NetLabel packet security attributes * @sid: the SELinux SID * * Description: * Convert the given NetLabel security attributes in @secattr into a * SELinux SID. If the @secattr field does not contain a full SELinux * SID/context then use SECINITSID_NETMSG as the foundation. If possible the * 'cache' field of @secattr is set and the CACHE flag is set; this is to * allow the @secattr to be used by NetLabel to cache the secattr to SID * conversion for future lookups. Returns zero on success, negative values on * failure. * */ int security_netlbl_secattr_to_sid(struct netlbl_lsm_secattr *secattr, u32 *sid) { int rc; struct context *ctx; struct context ctx_new; if (!ss_initialized) { *sid = SECSID_NULL; return 0; } read_lock(&policy_rwlock); if (secattr->flags & NETLBL_SECATTR_CACHE) *sid = *(u32 *)secattr->cache->data; else if (secattr->flags & NETLBL_SECATTR_SECID) *sid = secattr->attr.secid; else if (secattr->flags & NETLBL_SECATTR_MLS_LVL) { rc = -EIDRM; ctx = sidtab_search(&sidtab, SECINITSID_NETMSG); if (ctx == NULL) goto out; context_init(&ctx_new); ctx_new.user = ctx->user; ctx_new.role = ctx->role; ctx_new.type = ctx->type; mls_import_netlbl_lvl(&ctx_new, secattr); if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { rc = ebitmap_netlbl_import(&ctx_new.range.level[0].cat, secattr->attr.mls.cat); if (rc) goto out; memcpy(&ctx_new.range.level[1].cat, &ctx_new.range.level[0].cat, sizeof(ctx_new.range.level[0].cat)); } rc = -EIDRM; if (!mls_context_isvalid(&policydb, &ctx_new)) goto out_free; rc = sidtab_context_to_sid(&sidtab, &ctx_new, sid); if (rc) goto out_free; security_netlbl_cache_add(secattr, *sid); ebitmap_destroy(&ctx_new.range.level[0].cat); } else *sid = SECSID_NULL; read_unlock(&policy_rwlock); return 0; out_free: ebitmap_destroy(&ctx_new.range.level[0].cat); out: read_unlock(&policy_rwlock); return rc; } /** * security_netlbl_sid_to_secattr - Convert a SELinux SID to a NetLabel secattr * @sid: the SELinux SID * @secattr: the NetLabel packet security attributes * * Description: * Convert the given SELinux SID in @sid into a NetLabel security attribute. * Returns zero on success, negative values on failure. * */ int security_netlbl_sid_to_secattr(u32 sid, struct netlbl_lsm_secattr *secattr) { int rc; struct context *ctx; if (!ss_initialized) return 0; read_lock(&policy_rwlock); rc = -ENOENT; ctx = sidtab_search(&sidtab, sid); if (ctx == NULL) goto out; rc = -ENOMEM; secattr->domain = kstrdup(sym_name(&policydb, SYM_TYPES, ctx->type - 1), GFP_ATOMIC); if (secattr->domain == NULL) goto out; secattr->attr.secid = sid; secattr->flags |= NETLBL_SECATTR_DOMAIN_CPY | NETLBL_SECATTR_SECID; mls_export_netlbl_lvl(ctx, secattr); rc = mls_export_netlbl_cat(ctx, secattr); out: read_unlock(&policy_rwlock); return rc; } #endif /* CONFIG_NETLABEL */ /** * security_read_policy - read the policy. * @data: binary policy data * @len: length of data in bytes * */ int security_read_policy(void **data, size_t *len) { int rc; struct policy_file fp; if (!ss_initialized) return -EINVAL; *len = security_policydb_len(); *data = vmalloc_user(*len); if (!*data) return -ENOMEM; fp.data = *data; fp.len = *len; read_lock(&policy_rwlock); rc = policydb_write(&policydb, &fp); read_unlock(&policy_rwlock); if (rc) return rc; *len = (unsigned long)fp.data - (unsigned long)*data; return 0; }
./CrossVul/dataset_final_sorted/CWE-20/c/bad_2080_0
crossvul-cpp_data_bad_5845_18
/* * net/key/af_key.c An implementation of PF_KEYv2 sockets. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Maxim Giryaev <gem@asplinux.ru> * David S. Miller <davem@redhat.com> * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> * Kunihiro Ishiguro <kunihiro@ipinfusion.com> * Kazunori MIYAZAWA / USAGI Project <miyazawa@linux-ipv6.org> * Derek Atkins <derek@ihtfp.com> */ #include <linux/capability.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/socket.h> #include <linux/pfkeyv2.h> #include <linux/ipsec.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/proc_fs.h> #include <linux/init.h> #include <linux/slab.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/xfrm.h> #include <net/sock.h> #define _X2KEY(x) ((x) == XFRM_INF ? 0 : (x)) #define _KEY2X(x) ((x) == 0 ? XFRM_INF : (x)) static int pfkey_net_id __read_mostly; struct netns_pfkey { /* List of all pfkey sockets. */ struct hlist_head table; atomic_t socks_nr; }; static DEFINE_MUTEX(pfkey_mutex); #define DUMMY_MARK 0 static const struct xfrm_mark dummy_mark = {0, 0}; struct pfkey_sock { /* struct sock must be the first member of struct pfkey_sock */ struct sock sk; int registered; int promisc; struct { uint8_t msg_version; uint32_t msg_portid; int (*dump)(struct pfkey_sock *sk); void (*done)(struct pfkey_sock *sk); union { struct xfrm_policy_walk policy; struct xfrm_state_walk state; } u; struct sk_buff *skb; } dump; }; static inline struct pfkey_sock *pfkey_sk(struct sock *sk) { return (struct pfkey_sock *)sk; } static int pfkey_can_dump(const struct sock *sk) { if (3 * atomic_read(&sk->sk_rmem_alloc) <= 2 * sk->sk_rcvbuf) return 1; return 0; } static void pfkey_terminate_dump(struct pfkey_sock *pfk) { if (pfk->dump.dump) { if (pfk->dump.skb) { kfree_skb(pfk->dump.skb); pfk->dump.skb = NULL; } pfk->dump.done(pfk); pfk->dump.dump = NULL; pfk->dump.done = NULL; } } static void pfkey_sock_destruct(struct sock *sk) { struct net *net = sock_net(sk); struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); pfkey_terminate_dump(pfkey_sk(sk)); skb_queue_purge(&sk->sk_receive_queue); if (!sock_flag(sk, SOCK_DEAD)) { pr_err("Attempt to release alive pfkey socket: %p\n", sk); return; } WARN_ON(atomic_read(&sk->sk_rmem_alloc)); WARN_ON(atomic_read(&sk->sk_wmem_alloc)); atomic_dec(&net_pfkey->socks_nr); } static const struct proto_ops pfkey_ops; static void pfkey_insert(struct sock *sk) { struct net *net = sock_net(sk); struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); mutex_lock(&pfkey_mutex); sk_add_node_rcu(sk, &net_pfkey->table); mutex_unlock(&pfkey_mutex); } static void pfkey_remove(struct sock *sk) { mutex_lock(&pfkey_mutex); sk_del_node_init_rcu(sk); mutex_unlock(&pfkey_mutex); } static struct proto key_proto = { .name = "KEY", .owner = THIS_MODULE, .obj_size = sizeof(struct pfkey_sock), }; static int pfkey_create(struct net *net, struct socket *sock, int protocol, int kern) { struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); struct sock *sk; int err; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; if (sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; if (protocol != PF_KEY_V2) return -EPROTONOSUPPORT; err = -ENOMEM; sk = sk_alloc(net, PF_KEY, GFP_KERNEL, &key_proto); if (sk == NULL) goto out; sock->ops = &pfkey_ops; sock_init_data(sock, sk); sk->sk_family = PF_KEY; sk->sk_destruct = pfkey_sock_destruct; atomic_inc(&net_pfkey->socks_nr); pfkey_insert(sk); return 0; out: return err; } static int pfkey_release(struct socket *sock) { struct sock *sk = sock->sk; if (!sk) return 0; pfkey_remove(sk); sock_orphan(sk); sock->sk = NULL; skb_queue_purge(&sk->sk_write_queue); synchronize_rcu(); sock_put(sk); return 0; } static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2, gfp_t allocation, struct sock *sk) { int err = -ENOBUFS; sock_hold(sk); if (*skb2 == NULL) { if (atomic_read(&skb->users) != 1) { *skb2 = skb_clone(skb, allocation); } else { *skb2 = skb; atomic_inc(&skb->users); } } if (*skb2 != NULL) { if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) { skb_set_owner_r(*skb2, sk); skb_queue_tail(&sk->sk_receive_queue, *skb2); sk->sk_data_ready(sk, (*skb2)->len); *skb2 = NULL; err = 0; } } sock_put(sk); return err; } /* Send SKB to all pfkey sockets matching selected criteria. */ #define BROADCAST_ALL 0 #define BROADCAST_ONE 1 #define BROADCAST_REGISTERED 2 #define BROADCAST_PROMISC_ONLY 4 static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, int broadcast_flags, struct sock *one_sk, struct net *net) { struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); struct sock *sk; struct sk_buff *skb2 = NULL; int err = -ESRCH; /* XXX Do we need something like netlink_overrun? I think * XXX PF_KEY socket apps will not mind current behavior. */ if (!skb) return -ENOMEM; rcu_read_lock(); sk_for_each_rcu(sk, &net_pfkey->table) { struct pfkey_sock *pfk = pfkey_sk(sk); int err2; /* Yes, it means that if you are meant to receive this * pfkey message you receive it twice as promiscuous * socket. */ if (pfk->promisc) pfkey_broadcast_one(skb, &skb2, allocation, sk); /* the exact target will be processed later */ if (sk == one_sk) continue; if (broadcast_flags != BROADCAST_ALL) { if (broadcast_flags & BROADCAST_PROMISC_ONLY) continue; if ((broadcast_flags & BROADCAST_REGISTERED) && !pfk->registered) continue; if (broadcast_flags & BROADCAST_ONE) continue; } err2 = pfkey_broadcast_one(skb, &skb2, allocation, sk); /* Error is cleare after succecful sending to at least one * registered KM */ if ((broadcast_flags & BROADCAST_REGISTERED) && err) err = err2; } rcu_read_unlock(); if (one_sk != NULL) err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk); kfree_skb(skb2); kfree_skb(skb); return err; } static int pfkey_do_dump(struct pfkey_sock *pfk) { struct sadb_msg *hdr; int rc; rc = pfk->dump.dump(pfk); if (rc == -ENOBUFS) return 0; if (pfk->dump.skb) { if (!pfkey_can_dump(&pfk->sk)) return 0; hdr = (struct sadb_msg *) pfk->dump.skb->data; hdr->sadb_msg_seq = 0; hdr->sadb_msg_errno = rc; pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE, &pfk->sk, sock_net(&pfk->sk)); pfk->dump.skb = NULL; } pfkey_terminate_dump(pfk); return rc; } static inline void pfkey_hdr_dup(struct sadb_msg *new, const struct sadb_msg *orig) { *new = *orig; } static int pfkey_error(const struct sadb_msg *orig, int err, struct sock *sk) { struct sk_buff *skb = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_KERNEL); struct sadb_msg *hdr; if (!skb) return -ENOBUFS; /* Woe be to the platform trying to support PFKEY yet * having normal errnos outside the 1-255 range, inclusive. */ err = -err; if (err == ERESTARTSYS || err == ERESTARTNOHAND || err == ERESTARTNOINTR) err = EINTR; if (err >= 512) err = EINVAL; BUG_ON(err <= 0 || err >= 256); hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg)); pfkey_hdr_dup(hdr, orig); hdr->sadb_msg_errno = (uint8_t) err; hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ONE, sk, sock_net(sk)); return 0; } static const u8 sadb_ext_min_len[] = { [SADB_EXT_RESERVED] = (u8) 0, [SADB_EXT_SA] = (u8) sizeof(struct sadb_sa), [SADB_EXT_LIFETIME_CURRENT] = (u8) sizeof(struct sadb_lifetime), [SADB_EXT_LIFETIME_HARD] = (u8) sizeof(struct sadb_lifetime), [SADB_EXT_LIFETIME_SOFT] = (u8) sizeof(struct sadb_lifetime), [SADB_EXT_ADDRESS_SRC] = (u8) sizeof(struct sadb_address), [SADB_EXT_ADDRESS_DST] = (u8) sizeof(struct sadb_address), [SADB_EXT_ADDRESS_PROXY] = (u8) sizeof(struct sadb_address), [SADB_EXT_KEY_AUTH] = (u8) sizeof(struct sadb_key), [SADB_EXT_KEY_ENCRYPT] = (u8) sizeof(struct sadb_key), [SADB_EXT_IDENTITY_SRC] = (u8) sizeof(struct sadb_ident), [SADB_EXT_IDENTITY_DST] = (u8) sizeof(struct sadb_ident), [SADB_EXT_SENSITIVITY] = (u8) sizeof(struct sadb_sens), [SADB_EXT_PROPOSAL] = (u8) sizeof(struct sadb_prop), [SADB_EXT_SUPPORTED_AUTH] = (u8) sizeof(struct sadb_supported), [SADB_EXT_SUPPORTED_ENCRYPT] = (u8) sizeof(struct sadb_supported), [SADB_EXT_SPIRANGE] = (u8) sizeof(struct sadb_spirange), [SADB_X_EXT_KMPRIVATE] = (u8) sizeof(struct sadb_x_kmprivate), [SADB_X_EXT_POLICY] = (u8) sizeof(struct sadb_x_policy), [SADB_X_EXT_SA2] = (u8) sizeof(struct sadb_x_sa2), [SADB_X_EXT_NAT_T_TYPE] = (u8) sizeof(struct sadb_x_nat_t_type), [SADB_X_EXT_NAT_T_SPORT] = (u8) sizeof(struct sadb_x_nat_t_port), [SADB_X_EXT_NAT_T_DPORT] = (u8) sizeof(struct sadb_x_nat_t_port), [SADB_X_EXT_NAT_T_OA] = (u8) sizeof(struct sadb_address), [SADB_X_EXT_SEC_CTX] = (u8) sizeof(struct sadb_x_sec_ctx), [SADB_X_EXT_KMADDRESS] = (u8) sizeof(struct sadb_x_kmaddress), }; /* Verify sadb_address_{len,prefixlen} against sa_family. */ static int verify_address_len(const void *p) { const struct sadb_address *sp = p; const struct sockaddr *addr = (const struct sockaddr *)(sp + 1); const struct sockaddr_in *sin; #if IS_ENABLED(CONFIG_IPV6) const struct sockaddr_in6 *sin6; #endif int len; switch (addr->sa_family) { case AF_INET: len = DIV_ROUND_UP(sizeof(*sp) + sizeof(*sin), sizeof(uint64_t)); if (sp->sadb_address_len != len || sp->sadb_address_prefixlen > 32) return -EINVAL; break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: len = DIV_ROUND_UP(sizeof(*sp) + sizeof(*sin6), sizeof(uint64_t)); if (sp->sadb_address_len != len || sp->sadb_address_prefixlen > 128) return -EINVAL; break; #endif default: /* It is user using kernel to keep track of security * associations for another protocol, such as * OSPF/RSVP/RIPV2/MIP. It is user's job to verify * lengths. * * XXX Actually, association/policy database is not yet * XXX able to cope with arbitrary sockaddr families. * XXX When it can, remove this -EINVAL. -DaveM */ return -EINVAL; break; } return 0; } static inline int pfkey_sec_ctx_len(const struct sadb_x_sec_ctx *sec_ctx) { return DIV_ROUND_UP(sizeof(struct sadb_x_sec_ctx) + sec_ctx->sadb_x_ctx_len, sizeof(uint64_t)); } static inline int verify_sec_ctx_len(const void *p) { const struct sadb_x_sec_ctx *sec_ctx = p; int len = sec_ctx->sadb_x_ctx_len; if (len > PAGE_SIZE) return -EINVAL; len = pfkey_sec_ctx_len(sec_ctx); if (sec_ctx->sadb_x_sec_len != len) return -EINVAL; return 0; } static inline struct xfrm_user_sec_ctx *pfkey_sadb2xfrm_user_sec_ctx(const struct sadb_x_sec_ctx *sec_ctx) { struct xfrm_user_sec_ctx *uctx = NULL; int ctx_size = sec_ctx->sadb_x_ctx_len; uctx = kmalloc((sizeof(*uctx)+ctx_size), GFP_KERNEL); if (!uctx) return NULL; uctx->len = pfkey_sec_ctx_len(sec_ctx); uctx->exttype = sec_ctx->sadb_x_sec_exttype; uctx->ctx_doi = sec_ctx->sadb_x_ctx_doi; uctx->ctx_alg = sec_ctx->sadb_x_ctx_alg; uctx->ctx_len = sec_ctx->sadb_x_ctx_len; memcpy(uctx + 1, sec_ctx + 1, uctx->ctx_len); return uctx; } static int present_and_same_family(const struct sadb_address *src, const struct sadb_address *dst) { const struct sockaddr *s_addr, *d_addr; if (!src || !dst) return 0; s_addr = (const struct sockaddr *)(src + 1); d_addr = (const struct sockaddr *)(dst + 1); if (s_addr->sa_family != d_addr->sa_family) return 0; if (s_addr->sa_family != AF_INET #if IS_ENABLED(CONFIG_IPV6) && s_addr->sa_family != AF_INET6 #endif ) return 0; return 1; } static int parse_exthdrs(struct sk_buff *skb, const struct sadb_msg *hdr, void **ext_hdrs) { const char *p = (char *) hdr; int len = skb->len; len -= sizeof(*hdr); p += sizeof(*hdr); while (len > 0) { const struct sadb_ext *ehdr = (const struct sadb_ext *) p; uint16_t ext_type; int ext_len; ext_len = ehdr->sadb_ext_len; ext_len *= sizeof(uint64_t); ext_type = ehdr->sadb_ext_type; if (ext_len < sizeof(uint64_t) || ext_len > len || ext_type == SADB_EXT_RESERVED) return -EINVAL; if (ext_type <= SADB_EXT_MAX) { int min = (int) sadb_ext_min_len[ext_type]; if (ext_len < min) return -EINVAL; if (ext_hdrs[ext_type-1] != NULL) return -EINVAL; if (ext_type == SADB_EXT_ADDRESS_SRC || ext_type == SADB_EXT_ADDRESS_DST || ext_type == SADB_EXT_ADDRESS_PROXY || ext_type == SADB_X_EXT_NAT_T_OA) { if (verify_address_len(p)) return -EINVAL; } if (ext_type == SADB_X_EXT_SEC_CTX) { if (verify_sec_ctx_len(p)) return -EINVAL; } ext_hdrs[ext_type-1] = (void *) p; } p += ext_len; len -= ext_len; } return 0; } static uint16_t pfkey_satype2proto(uint8_t satype) { switch (satype) { case SADB_SATYPE_UNSPEC: return IPSEC_PROTO_ANY; case SADB_SATYPE_AH: return IPPROTO_AH; case SADB_SATYPE_ESP: return IPPROTO_ESP; case SADB_X_SATYPE_IPCOMP: return IPPROTO_COMP; break; default: return 0; } /* NOTREACHED */ } static uint8_t pfkey_proto2satype(uint16_t proto) { switch (proto) { case IPPROTO_AH: return SADB_SATYPE_AH; case IPPROTO_ESP: return SADB_SATYPE_ESP; case IPPROTO_COMP: return SADB_X_SATYPE_IPCOMP; break; default: return 0; } /* NOTREACHED */ } /* BTW, this scheme means that there is no way with PFKEY2 sockets to * say specifically 'just raw sockets' as we encode them as 255. */ static uint8_t pfkey_proto_to_xfrm(uint8_t proto) { return proto == IPSEC_PROTO_ANY ? 0 : proto; } static uint8_t pfkey_proto_from_xfrm(uint8_t proto) { return proto ? proto : IPSEC_PROTO_ANY; } static inline int pfkey_sockaddr_len(sa_family_t family) { switch (family) { case AF_INET: return sizeof(struct sockaddr_in); #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: return sizeof(struct sockaddr_in6); #endif } return 0; } static int pfkey_sockaddr_extract(const struct sockaddr *sa, xfrm_address_t *xaddr) { switch (sa->sa_family) { case AF_INET: xaddr->a4 = ((struct sockaddr_in *)sa)->sin_addr.s_addr; return AF_INET; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: memcpy(xaddr->a6, &((struct sockaddr_in6 *)sa)->sin6_addr, sizeof(struct in6_addr)); return AF_INET6; #endif } return 0; } static int pfkey_sadb_addr2xfrm_addr(const struct sadb_address *addr, xfrm_address_t *xaddr) { return pfkey_sockaddr_extract((struct sockaddr *)(addr + 1), xaddr); } static struct xfrm_state *pfkey_xfrm_state_lookup(struct net *net, const struct sadb_msg *hdr, void * const *ext_hdrs) { const struct sadb_sa *sa; const struct sadb_address *addr; uint16_t proto; unsigned short family; xfrm_address_t *xaddr; sa = ext_hdrs[SADB_EXT_SA - 1]; if (sa == NULL) return NULL; proto = pfkey_satype2proto(hdr->sadb_msg_satype); if (proto == 0) return NULL; /* sadb_address_len should be checked by caller */ addr = ext_hdrs[SADB_EXT_ADDRESS_DST - 1]; if (addr == NULL) return NULL; family = ((const struct sockaddr *)(addr + 1))->sa_family; switch (family) { case AF_INET: xaddr = (xfrm_address_t *)&((const struct sockaddr_in *)(addr + 1))->sin_addr; break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: xaddr = (xfrm_address_t *)&((const struct sockaddr_in6 *)(addr + 1))->sin6_addr; break; #endif default: xaddr = NULL; } if (!xaddr) return NULL; return xfrm_state_lookup(net, DUMMY_MARK, xaddr, sa->sadb_sa_spi, proto, family); } #define PFKEY_ALIGN8(a) (1 + (((a) - 1) | (8 - 1))) static int pfkey_sockaddr_size(sa_family_t family) { return PFKEY_ALIGN8(pfkey_sockaddr_len(family)); } static inline int pfkey_mode_from_xfrm(int mode) { switch(mode) { case XFRM_MODE_TRANSPORT: return IPSEC_MODE_TRANSPORT; case XFRM_MODE_TUNNEL: return IPSEC_MODE_TUNNEL; case XFRM_MODE_BEET: return IPSEC_MODE_BEET; default: return -1; } } static inline int pfkey_mode_to_xfrm(int mode) { switch(mode) { case IPSEC_MODE_ANY: /*XXX*/ case IPSEC_MODE_TRANSPORT: return XFRM_MODE_TRANSPORT; case IPSEC_MODE_TUNNEL: return XFRM_MODE_TUNNEL; case IPSEC_MODE_BEET: return XFRM_MODE_BEET; default: return -1; } } static unsigned int pfkey_sockaddr_fill(const xfrm_address_t *xaddr, __be16 port, struct sockaddr *sa, unsigned short family) { switch (family) { case AF_INET: { struct sockaddr_in *sin = (struct sockaddr_in *)sa; sin->sin_family = AF_INET; sin->sin_port = port; sin->sin_addr.s_addr = xaddr->a4; memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); return 32; } #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: { struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sa; sin6->sin6_family = AF_INET6; sin6->sin6_port = port; sin6->sin6_flowinfo = 0; sin6->sin6_addr = *(struct in6_addr *)xaddr->a6; sin6->sin6_scope_id = 0; return 128; } #endif } return 0; } static struct sk_buff *__pfkey_xfrm_state2msg(const struct xfrm_state *x, int add_keys, int hsc) { struct sk_buff *skb; struct sadb_msg *hdr; struct sadb_sa *sa; struct sadb_lifetime *lifetime; struct sadb_address *addr; struct sadb_key *key; struct sadb_x_sa2 *sa2; struct sadb_x_sec_ctx *sec_ctx; struct xfrm_sec_ctx *xfrm_ctx; int ctx_size = 0; int size; int auth_key_size = 0; int encrypt_key_size = 0; int sockaddr_size; struct xfrm_encap_tmpl *natt = NULL; int mode; /* address family check */ sockaddr_size = pfkey_sockaddr_size(x->props.family); if (!sockaddr_size) return ERR_PTR(-EINVAL); /* base, SA, (lifetime (HSC),) address(SD), (address(P),) key(AE), (identity(SD),) (sensitivity)> */ size = sizeof(struct sadb_msg) +sizeof(struct sadb_sa) + sizeof(struct sadb_lifetime) + ((hsc & 1) ? sizeof(struct sadb_lifetime) : 0) + ((hsc & 2) ? sizeof(struct sadb_lifetime) : 0) + sizeof(struct sadb_address)*2 + sockaddr_size*2 + sizeof(struct sadb_x_sa2); if ((xfrm_ctx = x->security)) { ctx_size = PFKEY_ALIGN8(xfrm_ctx->ctx_len); size += sizeof(struct sadb_x_sec_ctx) + ctx_size; } /* identity & sensitivity */ if (!xfrm_addr_equal(&x->sel.saddr, &x->props.saddr, x->props.family)) size += sizeof(struct sadb_address) + sockaddr_size; if (add_keys) { if (x->aalg && x->aalg->alg_key_len) { auth_key_size = PFKEY_ALIGN8((x->aalg->alg_key_len + 7) / 8); size += sizeof(struct sadb_key) + auth_key_size; } if (x->ealg && x->ealg->alg_key_len) { encrypt_key_size = PFKEY_ALIGN8((x->ealg->alg_key_len+7) / 8); size += sizeof(struct sadb_key) + encrypt_key_size; } } if (x->encap) natt = x->encap; if (natt && natt->encap_type) { size += sizeof(struct sadb_x_nat_t_type); size += sizeof(struct sadb_x_nat_t_port); size += sizeof(struct sadb_x_nat_t_port); } skb = alloc_skb(size + 16, GFP_ATOMIC); if (skb == NULL) return ERR_PTR(-ENOBUFS); /* call should fill header later */ hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg)); memset(hdr, 0, size); /* XXX do we need this ? */ hdr->sadb_msg_len = size / sizeof(uint64_t); /* sa */ sa = (struct sadb_sa *) skb_put(skb, sizeof(struct sadb_sa)); sa->sadb_sa_len = sizeof(struct sadb_sa)/sizeof(uint64_t); sa->sadb_sa_exttype = SADB_EXT_SA; sa->sadb_sa_spi = x->id.spi; sa->sadb_sa_replay = x->props.replay_window; switch (x->km.state) { case XFRM_STATE_VALID: sa->sadb_sa_state = x->km.dying ? SADB_SASTATE_DYING : SADB_SASTATE_MATURE; break; case XFRM_STATE_ACQ: sa->sadb_sa_state = SADB_SASTATE_LARVAL; break; default: sa->sadb_sa_state = SADB_SASTATE_DEAD; break; } sa->sadb_sa_auth = 0; if (x->aalg) { struct xfrm_algo_desc *a = xfrm_aalg_get_byname(x->aalg->alg_name, 0); sa->sadb_sa_auth = (a && a->pfkey_supported) ? a->desc.sadb_alg_id : 0; } sa->sadb_sa_encrypt = 0; BUG_ON(x->ealg && x->calg); if (x->ealg) { struct xfrm_algo_desc *a = xfrm_ealg_get_byname(x->ealg->alg_name, 0); sa->sadb_sa_encrypt = (a && a->pfkey_supported) ? a->desc.sadb_alg_id : 0; } /* KAME compatible: sadb_sa_encrypt is overloaded with calg id */ if (x->calg) { struct xfrm_algo_desc *a = xfrm_calg_get_byname(x->calg->alg_name, 0); sa->sadb_sa_encrypt = (a && a->pfkey_supported) ? a->desc.sadb_alg_id : 0; } sa->sadb_sa_flags = 0; if (x->props.flags & XFRM_STATE_NOECN) sa->sadb_sa_flags |= SADB_SAFLAGS_NOECN; if (x->props.flags & XFRM_STATE_DECAP_DSCP) sa->sadb_sa_flags |= SADB_SAFLAGS_DECAP_DSCP; if (x->props.flags & XFRM_STATE_NOPMTUDISC) sa->sadb_sa_flags |= SADB_SAFLAGS_NOPMTUDISC; /* hard time */ if (hsc & 2) { lifetime = (struct sadb_lifetime *) skb_put(skb, sizeof(struct sadb_lifetime)); lifetime->sadb_lifetime_len = sizeof(struct sadb_lifetime)/sizeof(uint64_t); lifetime->sadb_lifetime_exttype = SADB_EXT_LIFETIME_HARD; lifetime->sadb_lifetime_allocations = _X2KEY(x->lft.hard_packet_limit); lifetime->sadb_lifetime_bytes = _X2KEY(x->lft.hard_byte_limit); lifetime->sadb_lifetime_addtime = x->lft.hard_add_expires_seconds; lifetime->sadb_lifetime_usetime = x->lft.hard_use_expires_seconds; } /* soft time */ if (hsc & 1) { lifetime = (struct sadb_lifetime *) skb_put(skb, sizeof(struct sadb_lifetime)); lifetime->sadb_lifetime_len = sizeof(struct sadb_lifetime)/sizeof(uint64_t); lifetime->sadb_lifetime_exttype = SADB_EXT_LIFETIME_SOFT; lifetime->sadb_lifetime_allocations = _X2KEY(x->lft.soft_packet_limit); lifetime->sadb_lifetime_bytes = _X2KEY(x->lft.soft_byte_limit); lifetime->sadb_lifetime_addtime = x->lft.soft_add_expires_seconds; lifetime->sadb_lifetime_usetime = x->lft.soft_use_expires_seconds; } /* current time */ lifetime = (struct sadb_lifetime *) skb_put(skb, sizeof(struct sadb_lifetime)); lifetime->sadb_lifetime_len = sizeof(struct sadb_lifetime)/sizeof(uint64_t); lifetime->sadb_lifetime_exttype = SADB_EXT_LIFETIME_CURRENT; lifetime->sadb_lifetime_allocations = x->curlft.packets; lifetime->sadb_lifetime_bytes = x->curlft.bytes; lifetime->sadb_lifetime_addtime = x->curlft.add_time; lifetime->sadb_lifetime_usetime = x->curlft.use_time; /* src address */ addr = (struct sadb_address*) skb_put(skb, sizeof(struct sadb_address)+sockaddr_size); addr->sadb_address_len = (sizeof(struct sadb_address)+sockaddr_size)/ sizeof(uint64_t); addr->sadb_address_exttype = SADB_EXT_ADDRESS_SRC; /* "if the ports are non-zero, then the sadb_address_proto field, normally zero, MUST be filled in with the transport protocol's number." - RFC2367 */ addr->sadb_address_proto = 0; addr->sadb_address_reserved = 0; addr->sadb_address_prefixlen = pfkey_sockaddr_fill(&x->props.saddr, 0, (struct sockaddr *) (addr + 1), x->props.family); if (!addr->sadb_address_prefixlen) BUG(); /* dst address */ addr = (struct sadb_address*) skb_put(skb, sizeof(struct sadb_address)+sockaddr_size); addr->sadb_address_len = (sizeof(struct sadb_address)+sockaddr_size)/ sizeof(uint64_t); addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST; addr->sadb_address_proto = 0; addr->sadb_address_reserved = 0; addr->sadb_address_prefixlen = pfkey_sockaddr_fill(&x->id.daddr, 0, (struct sockaddr *) (addr + 1), x->props.family); if (!addr->sadb_address_prefixlen) BUG(); if (!xfrm_addr_equal(&x->sel.saddr, &x->props.saddr, x->props.family)) { addr = (struct sadb_address*) skb_put(skb, sizeof(struct sadb_address)+sockaddr_size); addr->sadb_address_len = (sizeof(struct sadb_address)+sockaddr_size)/ sizeof(uint64_t); addr->sadb_address_exttype = SADB_EXT_ADDRESS_PROXY; addr->sadb_address_proto = pfkey_proto_from_xfrm(x->sel.proto); addr->sadb_address_prefixlen = x->sel.prefixlen_s; addr->sadb_address_reserved = 0; pfkey_sockaddr_fill(&x->sel.saddr, x->sel.sport, (struct sockaddr *) (addr + 1), x->props.family); } /* auth key */ if (add_keys && auth_key_size) { key = (struct sadb_key *) skb_put(skb, sizeof(struct sadb_key)+auth_key_size); key->sadb_key_len = (sizeof(struct sadb_key) + auth_key_size) / sizeof(uint64_t); key->sadb_key_exttype = SADB_EXT_KEY_AUTH; key->sadb_key_bits = x->aalg->alg_key_len; key->sadb_key_reserved = 0; memcpy(key + 1, x->aalg->alg_key, (x->aalg->alg_key_len+7)/8); } /* encrypt key */ if (add_keys && encrypt_key_size) { key = (struct sadb_key *) skb_put(skb, sizeof(struct sadb_key)+encrypt_key_size); key->sadb_key_len = (sizeof(struct sadb_key) + encrypt_key_size) / sizeof(uint64_t); key->sadb_key_exttype = SADB_EXT_KEY_ENCRYPT; key->sadb_key_bits = x->ealg->alg_key_len; key->sadb_key_reserved = 0; memcpy(key + 1, x->ealg->alg_key, (x->ealg->alg_key_len+7)/8); } /* sa */ sa2 = (struct sadb_x_sa2 *) skb_put(skb, sizeof(struct sadb_x_sa2)); sa2->sadb_x_sa2_len = sizeof(struct sadb_x_sa2)/sizeof(uint64_t); sa2->sadb_x_sa2_exttype = SADB_X_EXT_SA2; if ((mode = pfkey_mode_from_xfrm(x->props.mode)) < 0) { kfree_skb(skb); return ERR_PTR(-EINVAL); } sa2->sadb_x_sa2_mode = mode; sa2->sadb_x_sa2_reserved1 = 0; sa2->sadb_x_sa2_reserved2 = 0; sa2->sadb_x_sa2_sequence = 0; sa2->sadb_x_sa2_reqid = x->props.reqid; if (natt && natt->encap_type) { struct sadb_x_nat_t_type *n_type; struct sadb_x_nat_t_port *n_port; /* type */ n_type = (struct sadb_x_nat_t_type*) skb_put(skb, sizeof(*n_type)); n_type->sadb_x_nat_t_type_len = sizeof(*n_type)/sizeof(uint64_t); n_type->sadb_x_nat_t_type_exttype = SADB_X_EXT_NAT_T_TYPE; n_type->sadb_x_nat_t_type_type = natt->encap_type; n_type->sadb_x_nat_t_type_reserved[0] = 0; n_type->sadb_x_nat_t_type_reserved[1] = 0; n_type->sadb_x_nat_t_type_reserved[2] = 0; /* source port */ n_port = (struct sadb_x_nat_t_port*) skb_put(skb, sizeof (*n_port)); n_port->sadb_x_nat_t_port_len = sizeof(*n_port)/sizeof(uint64_t); n_port->sadb_x_nat_t_port_exttype = SADB_X_EXT_NAT_T_SPORT; n_port->sadb_x_nat_t_port_port = natt->encap_sport; n_port->sadb_x_nat_t_port_reserved = 0; /* dest port */ n_port = (struct sadb_x_nat_t_port*) skb_put(skb, sizeof (*n_port)); n_port->sadb_x_nat_t_port_len = sizeof(*n_port)/sizeof(uint64_t); n_port->sadb_x_nat_t_port_exttype = SADB_X_EXT_NAT_T_DPORT; n_port->sadb_x_nat_t_port_port = natt->encap_dport; n_port->sadb_x_nat_t_port_reserved = 0; } /* security context */ if (xfrm_ctx) { sec_ctx = (struct sadb_x_sec_ctx *) skb_put(skb, sizeof(struct sadb_x_sec_ctx) + ctx_size); sec_ctx->sadb_x_sec_len = (sizeof(struct sadb_x_sec_ctx) + ctx_size) / sizeof(uint64_t); sec_ctx->sadb_x_sec_exttype = SADB_X_EXT_SEC_CTX; sec_ctx->sadb_x_ctx_doi = xfrm_ctx->ctx_doi; sec_ctx->sadb_x_ctx_alg = xfrm_ctx->ctx_alg; sec_ctx->sadb_x_ctx_len = xfrm_ctx->ctx_len; memcpy(sec_ctx + 1, xfrm_ctx->ctx_str, xfrm_ctx->ctx_len); } return skb; } static inline struct sk_buff *pfkey_xfrm_state2msg(const struct xfrm_state *x) { struct sk_buff *skb; skb = __pfkey_xfrm_state2msg(x, 1, 3); return skb; } static inline struct sk_buff *pfkey_xfrm_state2msg_expire(const struct xfrm_state *x, int hsc) { return __pfkey_xfrm_state2msg(x, 0, hsc); } static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, const struct sadb_msg *hdr, void * const *ext_hdrs) { struct xfrm_state *x; const struct sadb_lifetime *lifetime; const struct sadb_sa *sa; const struct sadb_key *key; const struct sadb_x_sec_ctx *sec_ctx; uint16_t proto; int err; sa = ext_hdrs[SADB_EXT_SA - 1]; if (!sa || !present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC-1], ext_hdrs[SADB_EXT_ADDRESS_DST-1])) return ERR_PTR(-EINVAL); if (hdr->sadb_msg_satype == SADB_SATYPE_ESP && !ext_hdrs[SADB_EXT_KEY_ENCRYPT-1]) return ERR_PTR(-EINVAL); if (hdr->sadb_msg_satype == SADB_SATYPE_AH && !ext_hdrs[SADB_EXT_KEY_AUTH-1]) return ERR_PTR(-EINVAL); if (!!ext_hdrs[SADB_EXT_LIFETIME_HARD-1] != !!ext_hdrs[SADB_EXT_LIFETIME_SOFT-1]) return ERR_PTR(-EINVAL); proto = pfkey_satype2proto(hdr->sadb_msg_satype); if (proto == 0) return ERR_PTR(-EINVAL); /* default error is no buffer space */ err = -ENOBUFS; /* RFC2367: Only SADB_SASTATE_MATURE SAs may be submitted in an SADB_ADD message. SADB_SASTATE_LARVAL SAs are created by SADB_GETSPI and it is not sensible to add a new SA in the DYING or SADB_SASTATE_DEAD state. Therefore, the sadb_sa_state field of all submitted SAs MUST be SADB_SASTATE_MATURE and the kernel MUST return an error if this is not true. However, KAME setkey always uses SADB_SASTATE_LARVAL. Hence, we have to _ignore_ sadb_sa_state, which is also reasonable. */ if (sa->sadb_sa_auth > SADB_AALG_MAX || (hdr->sadb_msg_satype == SADB_X_SATYPE_IPCOMP && sa->sadb_sa_encrypt > SADB_X_CALG_MAX) || sa->sadb_sa_encrypt > SADB_EALG_MAX) return ERR_PTR(-EINVAL); key = ext_hdrs[SADB_EXT_KEY_AUTH - 1]; if (key != NULL && sa->sadb_sa_auth != SADB_X_AALG_NULL && ((key->sadb_key_bits+7) / 8 == 0 || (key->sadb_key_bits+7) / 8 > key->sadb_key_len * sizeof(uint64_t))) return ERR_PTR(-EINVAL); key = ext_hdrs[SADB_EXT_KEY_ENCRYPT-1]; if (key != NULL && sa->sadb_sa_encrypt != SADB_EALG_NULL && ((key->sadb_key_bits+7) / 8 == 0 || (key->sadb_key_bits+7) / 8 > key->sadb_key_len * sizeof(uint64_t))) return ERR_PTR(-EINVAL); x = xfrm_state_alloc(net); if (x == NULL) return ERR_PTR(-ENOBUFS); x->id.proto = proto; x->id.spi = sa->sadb_sa_spi; x->props.replay_window = min_t(unsigned int, sa->sadb_sa_replay, (sizeof(x->replay.bitmap) * 8)); if (sa->sadb_sa_flags & SADB_SAFLAGS_NOECN) x->props.flags |= XFRM_STATE_NOECN; if (sa->sadb_sa_flags & SADB_SAFLAGS_DECAP_DSCP) x->props.flags |= XFRM_STATE_DECAP_DSCP; if (sa->sadb_sa_flags & SADB_SAFLAGS_NOPMTUDISC) x->props.flags |= XFRM_STATE_NOPMTUDISC; lifetime = ext_hdrs[SADB_EXT_LIFETIME_HARD - 1]; if (lifetime != NULL) { x->lft.hard_packet_limit = _KEY2X(lifetime->sadb_lifetime_allocations); x->lft.hard_byte_limit = _KEY2X(lifetime->sadb_lifetime_bytes); x->lft.hard_add_expires_seconds = lifetime->sadb_lifetime_addtime; x->lft.hard_use_expires_seconds = lifetime->sadb_lifetime_usetime; } lifetime = ext_hdrs[SADB_EXT_LIFETIME_SOFT - 1]; if (lifetime != NULL) { x->lft.soft_packet_limit = _KEY2X(lifetime->sadb_lifetime_allocations); x->lft.soft_byte_limit = _KEY2X(lifetime->sadb_lifetime_bytes); x->lft.soft_add_expires_seconds = lifetime->sadb_lifetime_addtime; x->lft.soft_use_expires_seconds = lifetime->sadb_lifetime_usetime; } sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1]; if (sec_ctx != NULL) { struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx); if (!uctx) goto out; err = security_xfrm_state_alloc(x, uctx); kfree(uctx); if (err) goto out; } key = ext_hdrs[SADB_EXT_KEY_AUTH - 1]; if (sa->sadb_sa_auth) { int keysize = 0; struct xfrm_algo_desc *a = xfrm_aalg_get_byid(sa->sadb_sa_auth); if (!a || !a->pfkey_supported) { err = -ENOSYS; goto out; } if (key) keysize = (key->sadb_key_bits + 7) / 8; x->aalg = kmalloc(sizeof(*x->aalg) + keysize, GFP_KERNEL); if (!x->aalg) goto out; strcpy(x->aalg->alg_name, a->name); x->aalg->alg_key_len = 0; if (key) { x->aalg->alg_key_len = key->sadb_key_bits; memcpy(x->aalg->alg_key, key+1, keysize); } x->aalg->alg_trunc_len = a->uinfo.auth.icv_truncbits; x->props.aalgo = sa->sadb_sa_auth; /* x->algo.flags = sa->sadb_sa_flags; */ } if (sa->sadb_sa_encrypt) { if (hdr->sadb_msg_satype == SADB_X_SATYPE_IPCOMP) { struct xfrm_algo_desc *a = xfrm_calg_get_byid(sa->sadb_sa_encrypt); if (!a || !a->pfkey_supported) { err = -ENOSYS; goto out; } x->calg = kmalloc(sizeof(*x->calg), GFP_KERNEL); if (!x->calg) goto out; strcpy(x->calg->alg_name, a->name); x->props.calgo = sa->sadb_sa_encrypt; } else { int keysize = 0; struct xfrm_algo_desc *a = xfrm_ealg_get_byid(sa->sadb_sa_encrypt); if (!a || !a->pfkey_supported) { err = -ENOSYS; goto out; } key = (struct sadb_key*) ext_hdrs[SADB_EXT_KEY_ENCRYPT-1]; if (key) keysize = (key->sadb_key_bits + 7) / 8; x->ealg = kmalloc(sizeof(*x->ealg) + keysize, GFP_KERNEL); if (!x->ealg) goto out; strcpy(x->ealg->alg_name, a->name); x->ealg->alg_key_len = 0; if (key) { x->ealg->alg_key_len = key->sadb_key_bits; memcpy(x->ealg->alg_key, key+1, keysize); } x->props.ealgo = sa->sadb_sa_encrypt; } } /* x->algo.flags = sa->sadb_sa_flags; */ x->props.family = pfkey_sadb_addr2xfrm_addr((struct sadb_address *) ext_hdrs[SADB_EXT_ADDRESS_SRC-1], &x->props.saddr); pfkey_sadb_addr2xfrm_addr((struct sadb_address *) ext_hdrs[SADB_EXT_ADDRESS_DST-1], &x->id.daddr); if (ext_hdrs[SADB_X_EXT_SA2-1]) { const struct sadb_x_sa2 *sa2 = ext_hdrs[SADB_X_EXT_SA2-1]; int mode = pfkey_mode_to_xfrm(sa2->sadb_x_sa2_mode); if (mode < 0) { err = -EINVAL; goto out; } x->props.mode = mode; x->props.reqid = sa2->sadb_x_sa2_reqid; } if (ext_hdrs[SADB_EXT_ADDRESS_PROXY-1]) { const struct sadb_address *addr = ext_hdrs[SADB_EXT_ADDRESS_PROXY-1]; /* Nobody uses this, but we try. */ x->sel.family = pfkey_sadb_addr2xfrm_addr(addr, &x->sel.saddr); x->sel.prefixlen_s = addr->sadb_address_prefixlen; } if (!x->sel.family) x->sel.family = x->props.family; if (ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1]) { const struct sadb_x_nat_t_type* n_type; struct xfrm_encap_tmpl *natt; x->encap = kmalloc(sizeof(*x->encap), GFP_KERNEL); if (!x->encap) goto out; natt = x->encap; n_type = ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1]; natt->encap_type = n_type->sadb_x_nat_t_type_type; if (ext_hdrs[SADB_X_EXT_NAT_T_SPORT-1]) { const struct sadb_x_nat_t_port *n_port = ext_hdrs[SADB_X_EXT_NAT_T_SPORT-1]; natt->encap_sport = n_port->sadb_x_nat_t_port_port; } if (ext_hdrs[SADB_X_EXT_NAT_T_DPORT-1]) { const struct sadb_x_nat_t_port *n_port = ext_hdrs[SADB_X_EXT_NAT_T_DPORT-1]; natt->encap_dport = n_port->sadb_x_nat_t_port_port; } memset(&natt->encap_oa, 0, sizeof(natt->encap_oa)); } err = xfrm_init_state(x); if (err) goto out; x->km.seq = hdr->sadb_msg_seq; return x; out: x->km.state = XFRM_STATE_DEAD; xfrm_state_put(x); return ERR_PTR(err); } static int pfkey_reserved(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs) { return -EOPNOTSUPP; } static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs) { struct net *net = sock_net(sk); struct sk_buff *resp_skb; struct sadb_x_sa2 *sa2; struct sadb_address *saddr, *daddr; struct sadb_msg *out_hdr; struct sadb_spirange *range; struct xfrm_state *x = NULL; int mode; int err; u32 min_spi, max_spi; u32 reqid; u8 proto; unsigned short family; xfrm_address_t *xsaddr = NULL, *xdaddr = NULL; if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC-1], ext_hdrs[SADB_EXT_ADDRESS_DST-1])) return -EINVAL; proto = pfkey_satype2proto(hdr->sadb_msg_satype); if (proto == 0) return -EINVAL; if ((sa2 = ext_hdrs[SADB_X_EXT_SA2-1]) != NULL) { mode = pfkey_mode_to_xfrm(sa2->sadb_x_sa2_mode); if (mode < 0) return -EINVAL; reqid = sa2->sadb_x_sa2_reqid; } else { mode = 0; reqid = 0; } saddr = ext_hdrs[SADB_EXT_ADDRESS_SRC-1]; daddr = ext_hdrs[SADB_EXT_ADDRESS_DST-1]; family = ((struct sockaddr *)(saddr + 1))->sa_family; switch (family) { case AF_INET: xdaddr = (xfrm_address_t *)&((struct sockaddr_in *)(daddr + 1))->sin_addr.s_addr; xsaddr = (xfrm_address_t *)&((struct sockaddr_in *)(saddr + 1))->sin_addr.s_addr; break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: xdaddr = (xfrm_address_t *)&((struct sockaddr_in6 *)(daddr + 1))->sin6_addr; xsaddr = (xfrm_address_t *)&((struct sockaddr_in6 *)(saddr + 1))->sin6_addr; break; #endif } if (hdr->sadb_msg_seq) { x = xfrm_find_acq_byseq(net, DUMMY_MARK, hdr->sadb_msg_seq); if (x && !xfrm_addr_equal(&x->id.daddr, xdaddr, family)) { xfrm_state_put(x); x = NULL; } } if (!x) x = xfrm_find_acq(net, &dummy_mark, mode, reqid, proto, xdaddr, xsaddr, 1, family); if (x == NULL) return -ENOENT; min_spi = 0x100; max_spi = 0x0fffffff; range = ext_hdrs[SADB_EXT_SPIRANGE-1]; if (range) { min_spi = range->sadb_spirange_min; max_spi = range->sadb_spirange_max; } err = xfrm_alloc_spi(x, min_spi, max_spi); resp_skb = err ? ERR_PTR(err) : pfkey_xfrm_state2msg(x); if (IS_ERR(resp_skb)) { xfrm_state_put(x); return PTR_ERR(resp_skb); } out_hdr = (struct sadb_msg *) resp_skb->data; out_hdr->sadb_msg_version = hdr->sadb_msg_version; out_hdr->sadb_msg_type = SADB_GETSPI; out_hdr->sadb_msg_satype = pfkey_proto2satype(proto); out_hdr->sadb_msg_errno = 0; out_hdr->sadb_msg_reserved = 0; out_hdr->sadb_msg_seq = hdr->sadb_msg_seq; out_hdr->sadb_msg_pid = hdr->sadb_msg_pid; xfrm_state_put(x); pfkey_broadcast(resp_skb, GFP_KERNEL, BROADCAST_ONE, sk, net); return 0; } static int pfkey_acquire(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs) { struct net *net = sock_net(sk); struct xfrm_state *x; if (hdr->sadb_msg_len != sizeof(struct sadb_msg)/8) return -EOPNOTSUPP; if (hdr->sadb_msg_seq == 0 || hdr->sadb_msg_errno == 0) return 0; x = xfrm_find_acq_byseq(net, DUMMY_MARK, hdr->sadb_msg_seq); if (x == NULL) return 0; spin_lock_bh(&x->lock); if (x->km.state == XFRM_STATE_ACQ) { x->km.state = XFRM_STATE_ERROR; wake_up(&net->xfrm.km_waitq); } spin_unlock_bh(&x->lock); xfrm_state_put(x); return 0; } static inline int event2poltype(int event) { switch (event) { case XFRM_MSG_DELPOLICY: return SADB_X_SPDDELETE; case XFRM_MSG_NEWPOLICY: return SADB_X_SPDADD; case XFRM_MSG_UPDPOLICY: return SADB_X_SPDUPDATE; case XFRM_MSG_POLEXPIRE: // return SADB_X_SPDEXPIRE; default: pr_err("pfkey: Unknown policy event %d\n", event); break; } return 0; } static inline int event2keytype(int event) { switch (event) { case XFRM_MSG_DELSA: return SADB_DELETE; case XFRM_MSG_NEWSA: return SADB_ADD; case XFRM_MSG_UPDSA: return SADB_UPDATE; case XFRM_MSG_EXPIRE: return SADB_EXPIRE; default: pr_err("pfkey: Unknown SA event %d\n", event); break; } return 0; } /* ADD/UPD/DEL */ static int key_notify_sa(struct xfrm_state *x, const struct km_event *c) { struct sk_buff *skb; struct sadb_msg *hdr; skb = pfkey_xfrm_state2msg(x); if (IS_ERR(skb)) return PTR_ERR(skb); hdr = (struct sadb_msg *) skb->data; hdr->sadb_msg_version = PF_KEY_V2; hdr->sadb_msg_type = event2keytype(c->event); hdr->sadb_msg_satype = pfkey_proto2satype(x->id.proto); hdr->sadb_msg_errno = 0; hdr->sadb_msg_reserved = 0; hdr->sadb_msg_seq = c->seq; hdr->sadb_msg_pid = c->portid; pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xs_net(x)); return 0; } static int pfkey_add(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs) { struct net *net = sock_net(sk); struct xfrm_state *x; int err; struct km_event c; x = pfkey_msg2xfrm_state(net, hdr, ext_hdrs); if (IS_ERR(x)) return PTR_ERR(x); xfrm_state_hold(x); if (hdr->sadb_msg_type == SADB_ADD) err = xfrm_state_add(x); else err = xfrm_state_update(x); xfrm_audit_state_add(x, err ? 0 : 1, audit_get_loginuid(current), audit_get_sessionid(current), 0); if (err < 0) { x->km.state = XFRM_STATE_DEAD; __xfrm_state_put(x); goto out; } if (hdr->sadb_msg_type == SADB_ADD) c.event = XFRM_MSG_NEWSA; else c.event = XFRM_MSG_UPDSA; c.seq = hdr->sadb_msg_seq; c.portid = hdr->sadb_msg_pid; km_state_notify(x, &c); out: xfrm_state_put(x); return err; } static int pfkey_delete(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs) { struct net *net = sock_net(sk); struct xfrm_state *x; struct km_event c; int err; if (!ext_hdrs[SADB_EXT_SA-1] || !present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC-1], ext_hdrs[SADB_EXT_ADDRESS_DST-1])) return -EINVAL; x = pfkey_xfrm_state_lookup(net, hdr, ext_hdrs); if (x == NULL) return -ESRCH; if ((err = security_xfrm_state_delete(x))) goto out; if (xfrm_state_kern(x)) { err = -EPERM; goto out; } err = xfrm_state_delete(x); if (err < 0) goto out; c.seq = hdr->sadb_msg_seq; c.portid = hdr->sadb_msg_pid; c.event = XFRM_MSG_DELSA; km_state_notify(x, &c); out: xfrm_audit_state_delete(x, err ? 0 : 1, audit_get_loginuid(current), audit_get_sessionid(current), 0); xfrm_state_put(x); return err; } static int pfkey_get(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs) { struct net *net = sock_net(sk); __u8 proto; struct sk_buff *out_skb; struct sadb_msg *out_hdr; struct xfrm_state *x; if (!ext_hdrs[SADB_EXT_SA-1] || !present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC-1], ext_hdrs[SADB_EXT_ADDRESS_DST-1])) return -EINVAL; x = pfkey_xfrm_state_lookup(net, hdr, ext_hdrs); if (x == NULL) return -ESRCH; out_skb = pfkey_xfrm_state2msg(x); proto = x->id.proto; xfrm_state_put(x); if (IS_ERR(out_skb)) return PTR_ERR(out_skb); out_hdr = (struct sadb_msg *) out_skb->data; out_hdr->sadb_msg_version = hdr->sadb_msg_version; out_hdr->sadb_msg_type = SADB_GET; out_hdr->sadb_msg_satype = pfkey_proto2satype(proto); out_hdr->sadb_msg_errno = 0; out_hdr->sadb_msg_reserved = 0; out_hdr->sadb_msg_seq = hdr->sadb_msg_seq; out_hdr->sadb_msg_pid = hdr->sadb_msg_pid; pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk)); return 0; } static struct sk_buff *compose_sadb_supported(const struct sadb_msg *orig, gfp_t allocation) { struct sk_buff *skb; struct sadb_msg *hdr; int len, auth_len, enc_len, i; auth_len = xfrm_count_pfkey_auth_supported(); if (auth_len) { auth_len *= sizeof(struct sadb_alg); auth_len += sizeof(struct sadb_supported); } enc_len = xfrm_count_pfkey_enc_supported(); if (enc_len) { enc_len *= sizeof(struct sadb_alg); enc_len += sizeof(struct sadb_supported); } len = enc_len + auth_len + sizeof(struct sadb_msg); skb = alloc_skb(len + 16, allocation); if (!skb) goto out_put_algs; hdr = (struct sadb_msg *) skb_put(skb, sizeof(*hdr)); pfkey_hdr_dup(hdr, orig); hdr->sadb_msg_errno = 0; hdr->sadb_msg_len = len / sizeof(uint64_t); if (auth_len) { struct sadb_supported *sp; struct sadb_alg *ap; sp = (struct sadb_supported *) skb_put(skb, auth_len); ap = (struct sadb_alg *) (sp + 1); sp->sadb_supported_len = auth_len / sizeof(uint64_t); sp->sadb_supported_exttype = SADB_EXT_SUPPORTED_AUTH; for (i = 0; ; i++) { struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(i); if (!aalg) break; if (!aalg->pfkey_supported) continue; if (aalg->available) *ap++ = aalg->desc; } } if (enc_len) { struct sadb_supported *sp; struct sadb_alg *ap; sp = (struct sadb_supported *) skb_put(skb, enc_len); ap = (struct sadb_alg *) (sp + 1); sp->sadb_supported_len = enc_len / sizeof(uint64_t); sp->sadb_supported_exttype = SADB_EXT_SUPPORTED_ENCRYPT; for (i = 0; ; i++) { struct xfrm_algo_desc *ealg = xfrm_ealg_get_byidx(i); if (!ealg) break; if (!ealg->pfkey_supported) continue; if (ealg->available) *ap++ = ealg->desc; } } out_put_algs: return skb; } static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs) { struct pfkey_sock *pfk = pfkey_sk(sk); struct sk_buff *supp_skb; if (hdr->sadb_msg_satype > SADB_SATYPE_MAX) return -EINVAL; if (hdr->sadb_msg_satype != SADB_SATYPE_UNSPEC) { if (pfk->registered&(1<<hdr->sadb_msg_satype)) return -EEXIST; pfk->registered |= (1<<hdr->sadb_msg_satype); } xfrm_probe_algs(); supp_skb = compose_sadb_supported(hdr, GFP_KERNEL); if (!supp_skb) { if (hdr->sadb_msg_satype != SADB_SATYPE_UNSPEC) pfk->registered &= ~(1<<hdr->sadb_msg_satype); return -ENOBUFS; } pfkey_broadcast(supp_skb, GFP_KERNEL, BROADCAST_REGISTERED, sk, sock_net(sk)); return 0; } static int unicast_flush_resp(struct sock *sk, const struct sadb_msg *ihdr) { struct sk_buff *skb; struct sadb_msg *hdr; skb = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_ATOMIC); if (!skb) return -ENOBUFS; hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg)); memcpy(hdr, ihdr, sizeof(struct sadb_msg)); hdr->sadb_msg_errno = (uint8_t) 0; hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk)); } static int key_notify_sa_flush(const struct km_event *c) { struct sk_buff *skb; struct sadb_msg *hdr; skb = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_ATOMIC); if (!skb) return -ENOBUFS; hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg)); hdr->sadb_msg_satype = pfkey_proto2satype(c->data.proto); hdr->sadb_msg_type = SADB_FLUSH; hdr->sadb_msg_seq = c->seq; hdr->sadb_msg_pid = c->portid; hdr->sadb_msg_version = PF_KEY_V2; hdr->sadb_msg_errno = (uint8_t) 0; hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); hdr->sadb_msg_reserved = 0; pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); return 0; } static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs) { struct net *net = sock_net(sk); unsigned int proto; struct km_event c; struct xfrm_audit audit_info; int err, err2; proto = pfkey_satype2proto(hdr->sadb_msg_satype); if (proto == 0) return -EINVAL; audit_info.loginuid = audit_get_loginuid(current); audit_info.sessionid = audit_get_sessionid(current); audit_info.secid = 0; err = xfrm_state_flush(net, proto, &audit_info); err2 = unicast_flush_resp(sk, hdr); if (err || err2) { if (err == -ESRCH) /* empty table - go quietly */ err = 0; return err ? err : err2; } c.data.proto = proto; c.seq = hdr->sadb_msg_seq; c.portid = hdr->sadb_msg_pid; c.event = XFRM_MSG_FLUSHSA; c.net = net; km_state_notify(NULL, &c); return 0; } static int dump_sa(struct xfrm_state *x, int count, void *ptr) { struct pfkey_sock *pfk = ptr; struct sk_buff *out_skb; struct sadb_msg *out_hdr; if (!pfkey_can_dump(&pfk->sk)) return -ENOBUFS; out_skb = pfkey_xfrm_state2msg(x); if (IS_ERR(out_skb)) return PTR_ERR(out_skb); out_hdr = (struct sadb_msg *) out_skb->data; out_hdr->sadb_msg_version = pfk->dump.msg_version; out_hdr->sadb_msg_type = SADB_DUMP; out_hdr->sadb_msg_satype = pfkey_proto2satype(x->id.proto); out_hdr->sadb_msg_errno = 0; out_hdr->sadb_msg_reserved = 0; out_hdr->sadb_msg_seq = count + 1; out_hdr->sadb_msg_pid = pfk->dump.msg_portid; if (pfk->dump.skb) pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE, &pfk->sk, sock_net(&pfk->sk)); pfk->dump.skb = out_skb; return 0; } static int pfkey_dump_sa(struct pfkey_sock *pfk) { struct net *net = sock_net(&pfk->sk); return xfrm_state_walk(net, &pfk->dump.u.state, dump_sa, (void *) pfk); } static void pfkey_dump_sa_done(struct pfkey_sock *pfk) { xfrm_state_walk_done(&pfk->dump.u.state); } static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs) { u8 proto; struct pfkey_sock *pfk = pfkey_sk(sk); if (pfk->dump.dump != NULL) return -EBUSY; proto = pfkey_satype2proto(hdr->sadb_msg_satype); if (proto == 0) return -EINVAL; pfk->dump.msg_version = hdr->sadb_msg_version; pfk->dump.msg_portid = hdr->sadb_msg_pid; pfk->dump.dump = pfkey_dump_sa; pfk->dump.done = pfkey_dump_sa_done; xfrm_state_walk_init(&pfk->dump.u.state, proto); return pfkey_do_dump(pfk); } static int pfkey_promisc(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs) { struct pfkey_sock *pfk = pfkey_sk(sk); int satype = hdr->sadb_msg_satype; bool reset_errno = false; if (hdr->sadb_msg_len == (sizeof(*hdr) / sizeof(uint64_t))) { reset_errno = true; if (satype != 0 && satype != 1) return -EINVAL; pfk->promisc = satype; } if (reset_errno && skb_cloned(skb)) skb = skb_copy(skb, GFP_KERNEL); else skb = skb_clone(skb, GFP_KERNEL); if (reset_errno && skb) { struct sadb_msg *new_hdr = (struct sadb_msg *) skb->data; new_hdr->sadb_msg_errno = 0; } pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk)); return 0; } static int check_reqid(struct xfrm_policy *xp, int dir, int count, void *ptr) { int i; u32 reqid = *(u32*)ptr; for (i=0; i<xp->xfrm_nr; i++) { if (xp->xfrm_vec[i].reqid == reqid) return -EEXIST; } return 0; } static u32 gen_reqid(struct net *net) { struct xfrm_policy_walk walk; u32 start; int rc; static u32 reqid = IPSEC_MANUAL_REQID_MAX; start = reqid; do { ++reqid; if (reqid == 0) reqid = IPSEC_MANUAL_REQID_MAX+1; xfrm_policy_walk_init(&walk, XFRM_POLICY_TYPE_MAIN); rc = xfrm_policy_walk(net, &walk, check_reqid, (void*)&reqid); xfrm_policy_walk_done(&walk); if (rc != -EEXIST) return reqid; } while (reqid != start); return 0; } static int parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq) { struct net *net = xp_net(xp); struct xfrm_tmpl *t = xp->xfrm_vec + xp->xfrm_nr; int mode; if (xp->xfrm_nr >= XFRM_MAX_DEPTH) return -ELOOP; if (rq->sadb_x_ipsecrequest_mode == 0) return -EINVAL; t->id.proto = rq->sadb_x_ipsecrequest_proto; /* XXX check proto */ if ((mode = pfkey_mode_to_xfrm(rq->sadb_x_ipsecrequest_mode)) < 0) return -EINVAL; t->mode = mode; if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_USE) t->optional = 1; else if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_UNIQUE) { t->reqid = rq->sadb_x_ipsecrequest_reqid; if (t->reqid > IPSEC_MANUAL_REQID_MAX) t->reqid = 0; if (!t->reqid && !(t->reqid = gen_reqid(net))) return -ENOBUFS; } /* addresses present only in tunnel mode */ if (t->mode == XFRM_MODE_TUNNEL) { u8 *sa = (u8 *) (rq + 1); int family, socklen; family = pfkey_sockaddr_extract((struct sockaddr *)sa, &t->saddr); if (!family) return -EINVAL; socklen = pfkey_sockaddr_len(family); if (pfkey_sockaddr_extract((struct sockaddr *)(sa + socklen), &t->id.daddr) != family) return -EINVAL; t->encap_family = family; } else t->encap_family = xp->family; /* No way to set this via kame pfkey */ t->allalgs = 1; xp->xfrm_nr++; return 0; } static int parse_ipsecrequests(struct xfrm_policy *xp, struct sadb_x_policy *pol) { int err; int len = pol->sadb_x_policy_len*8 - sizeof(struct sadb_x_policy); struct sadb_x_ipsecrequest *rq = (void*)(pol+1); if (pol->sadb_x_policy_len * 8 < sizeof(struct sadb_x_policy)) return -EINVAL; while (len >= sizeof(struct sadb_x_ipsecrequest)) { if ((err = parse_ipsecrequest(xp, rq)) < 0) return err; len -= rq->sadb_x_ipsecrequest_len; rq = (void*)((u8*)rq + rq->sadb_x_ipsecrequest_len); } return 0; } static inline int pfkey_xfrm_policy2sec_ctx_size(const struct xfrm_policy *xp) { struct xfrm_sec_ctx *xfrm_ctx = xp->security; if (xfrm_ctx) { int len = sizeof(struct sadb_x_sec_ctx); len += xfrm_ctx->ctx_len; return PFKEY_ALIGN8(len); } return 0; } static int pfkey_xfrm_policy2msg_size(const struct xfrm_policy *xp) { const struct xfrm_tmpl *t; int sockaddr_size = pfkey_sockaddr_size(xp->family); int socklen = 0; int i; for (i=0; i<xp->xfrm_nr; i++) { t = xp->xfrm_vec + i; socklen += pfkey_sockaddr_len(t->encap_family); } return sizeof(struct sadb_msg) + (sizeof(struct sadb_lifetime) * 3) + (sizeof(struct sadb_address) * 2) + (sockaddr_size * 2) + sizeof(struct sadb_x_policy) + (xp->xfrm_nr * sizeof(struct sadb_x_ipsecrequest)) + (socklen * 2) + pfkey_xfrm_policy2sec_ctx_size(xp); } static struct sk_buff * pfkey_xfrm_policy2msg_prep(const struct xfrm_policy *xp) { struct sk_buff *skb; int size; size = pfkey_xfrm_policy2msg_size(xp); skb = alloc_skb(size + 16, GFP_ATOMIC); if (skb == NULL) return ERR_PTR(-ENOBUFS); return skb; } static int pfkey_xfrm_policy2msg(struct sk_buff *skb, const struct xfrm_policy *xp, int dir) { struct sadb_msg *hdr; struct sadb_address *addr; struct sadb_lifetime *lifetime; struct sadb_x_policy *pol; struct sadb_x_sec_ctx *sec_ctx; struct xfrm_sec_ctx *xfrm_ctx; int i; int size; int sockaddr_size = pfkey_sockaddr_size(xp->family); int socklen = pfkey_sockaddr_len(xp->family); size = pfkey_xfrm_policy2msg_size(xp); /* call should fill header later */ hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg)); memset(hdr, 0, size); /* XXX do we need this ? */ /* src address */ addr = (struct sadb_address*) skb_put(skb, sizeof(struct sadb_address)+sockaddr_size); addr->sadb_address_len = (sizeof(struct sadb_address)+sockaddr_size)/ sizeof(uint64_t); addr->sadb_address_exttype = SADB_EXT_ADDRESS_SRC; addr->sadb_address_proto = pfkey_proto_from_xfrm(xp->selector.proto); addr->sadb_address_prefixlen = xp->selector.prefixlen_s; addr->sadb_address_reserved = 0; if (!pfkey_sockaddr_fill(&xp->selector.saddr, xp->selector.sport, (struct sockaddr *) (addr + 1), xp->family)) BUG(); /* dst address */ addr = (struct sadb_address*) skb_put(skb, sizeof(struct sadb_address)+sockaddr_size); addr->sadb_address_len = (sizeof(struct sadb_address)+sockaddr_size)/ sizeof(uint64_t); addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST; addr->sadb_address_proto = pfkey_proto_from_xfrm(xp->selector.proto); addr->sadb_address_prefixlen = xp->selector.prefixlen_d; addr->sadb_address_reserved = 0; pfkey_sockaddr_fill(&xp->selector.daddr, xp->selector.dport, (struct sockaddr *) (addr + 1), xp->family); /* hard time */ lifetime = (struct sadb_lifetime *) skb_put(skb, sizeof(struct sadb_lifetime)); lifetime->sadb_lifetime_len = sizeof(struct sadb_lifetime)/sizeof(uint64_t); lifetime->sadb_lifetime_exttype = SADB_EXT_LIFETIME_HARD; lifetime->sadb_lifetime_allocations = _X2KEY(xp->lft.hard_packet_limit); lifetime->sadb_lifetime_bytes = _X2KEY(xp->lft.hard_byte_limit); lifetime->sadb_lifetime_addtime = xp->lft.hard_add_expires_seconds; lifetime->sadb_lifetime_usetime = xp->lft.hard_use_expires_seconds; /* soft time */ lifetime = (struct sadb_lifetime *) skb_put(skb, sizeof(struct sadb_lifetime)); lifetime->sadb_lifetime_len = sizeof(struct sadb_lifetime)/sizeof(uint64_t); lifetime->sadb_lifetime_exttype = SADB_EXT_LIFETIME_SOFT; lifetime->sadb_lifetime_allocations = _X2KEY(xp->lft.soft_packet_limit); lifetime->sadb_lifetime_bytes = _X2KEY(xp->lft.soft_byte_limit); lifetime->sadb_lifetime_addtime = xp->lft.soft_add_expires_seconds; lifetime->sadb_lifetime_usetime = xp->lft.soft_use_expires_seconds; /* current time */ lifetime = (struct sadb_lifetime *) skb_put(skb, sizeof(struct sadb_lifetime)); lifetime->sadb_lifetime_len = sizeof(struct sadb_lifetime)/sizeof(uint64_t); lifetime->sadb_lifetime_exttype = SADB_EXT_LIFETIME_CURRENT; lifetime->sadb_lifetime_allocations = xp->curlft.packets; lifetime->sadb_lifetime_bytes = xp->curlft.bytes; lifetime->sadb_lifetime_addtime = xp->curlft.add_time; lifetime->sadb_lifetime_usetime = xp->curlft.use_time; pol = (struct sadb_x_policy *) skb_put(skb, sizeof(struct sadb_x_policy)); pol->sadb_x_policy_len = sizeof(struct sadb_x_policy)/sizeof(uint64_t); pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY; pol->sadb_x_policy_type = IPSEC_POLICY_DISCARD; if (xp->action == XFRM_POLICY_ALLOW) { if (xp->xfrm_nr) pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC; else pol->sadb_x_policy_type = IPSEC_POLICY_NONE; } pol->sadb_x_policy_dir = dir+1; pol->sadb_x_policy_reserved = 0; pol->sadb_x_policy_id = xp->index; pol->sadb_x_policy_priority = xp->priority; for (i=0; i<xp->xfrm_nr; i++) { const struct xfrm_tmpl *t = xp->xfrm_vec + i; struct sadb_x_ipsecrequest *rq; int req_size; int mode; req_size = sizeof(struct sadb_x_ipsecrequest); if (t->mode == XFRM_MODE_TUNNEL) { socklen = pfkey_sockaddr_len(t->encap_family); req_size += socklen * 2; } else { size -= 2*socklen; } rq = (void*)skb_put(skb, req_size); pol->sadb_x_policy_len += req_size/8; memset(rq, 0, sizeof(*rq)); rq->sadb_x_ipsecrequest_len = req_size; rq->sadb_x_ipsecrequest_proto = t->id.proto; if ((mode = pfkey_mode_from_xfrm(t->mode)) < 0) return -EINVAL; rq->sadb_x_ipsecrequest_mode = mode; rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_REQUIRE; if (t->reqid) rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_UNIQUE; if (t->optional) rq->sadb_x_ipsecrequest_level = IPSEC_LEVEL_USE; rq->sadb_x_ipsecrequest_reqid = t->reqid; if (t->mode == XFRM_MODE_TUNNEL) { u8 *sa = (void *)(rq + 1); pfkey_sockaddr_fill(&t->saddr, 0, (struct sockaddr *)sa, t->encap_family); pfkey_sockaddr_fill(&t->id.daddr, 0, (struct sockaddr *) (sa + socklen), t->encap_family); } } /* security context */ if ((xfrm_ctx = xp->security)) { int ctx_size = pfkey_xfrm_policy2sec_ctx_size(xp); sec_ctx = (struct sadb_x_sec_ctx *) skb_put(skb, ctx_size); sec_ctx->sadb_x_sec_len = ctx_size / sizeof(uint64_t); sec_ctx->sadb_x_sec_exttype = SADB_X_EXT_SEC_CTX; sec_ctx->sadb_x_ctx_doi = xfrm_ctx->ctx_doi; sec_ctx->sadb_x_ctx_alg = xfrm_ctx->ctx_alg; sec_ctx->sadb_x_ctx_len = xfrm_ctx->ctx_len; memcpy(sec_ctx + 1, xfrm_ctx->ctx_str, xfrm_ctx->ctx_len); } hdr->sadb_msg_len = size / sizeof(uint64_t); hdr->sadb_msg_reserved = atomic_read(&xp->refcnt); return 0; } static int key_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c) { struct sk_buff *out_skb; struct sadb_msg *out_hdr; int err; out_skb = pfkey_xfrm_policy2msg_prep(xp); if (IS_ERR(out_skb)) return PTR_ERR(out_skb); err = pfkey_xfrm_policy2msg(out_skb, xp, dir); if (err < 0) return err; out_hdr = (struct sadb_msg *) out_skb->data; out_hdr->sadb_msg_version = PF_KEY_V2; if (c->data.byid && c->event == XFRM_MSG_DELPOLICY) out_hdr->sadb_msg_type = SADB_X_SPDDELETE2; else out_hdr->sadb_msg_type = event2poltype(c->event); out_hdr->sadb_msg_errno = 0; out_hdr->sadb_msg_seq = c->seq; out_hdr->sadb_msg_pid = c->portid; pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp)); return 0; } static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs) { struct net *net = sock_net(sk); int err = 0; struct sadb_lifetime *lifetime; struct sadb_address *sa; struct sadb_x_policy *pol; struct xfrm_policy *xp; struct km_event c; struct sadb_x_sec_ctx *sec_ctx; if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC-1], ext_hdrs[SADB_EXT_ADDRESS_DST-1]) || !ext_hdrs[SADB_X_EXT_POLICY-1]) return -EINVAL; pol = ext_hdrs[SADB_X_EXT_POLICY-1]; if (pol->sadb_x_policy_type > IPSEC_POLICY_IPSEC) return -EINVAL; if (!pol->sadb_x_policy_dir || pol->sadb_x_policy_dir >= IPSEC_DIR_MAX) return -EINVAL; xp = xfrm_policy_alloc(net, GFP_KERNEL); if (xp == NULL) return -ENOBUFS; xp->action = (pol->sadb_x_policy_type == IPSEC_POLICY_DISCARD ? XFRM_POLICY_BLOCK : XFRM_POLICY_ALLOW); xp->priority = pol->sadb_x_policy_priority; sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1]; xp->family = pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.saddr); xp->selector.family = xp->family; xp->selector.prefixlen_s = sa->sadb_address_prefixlen; xp->selector.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); xp->selector.sport = ((struct sockaddr_in *)(sa+1))->sin_port; if (xp->selector.sport) xp->selector.sport_mask = htons(0xffff); sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1]; pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.daddr); xp->selector.prefixlen_d = sa->sadb_address_prefixlen; /* Amusing, we set this twice. KAME apps appear to set same value * in both addresses. */ xp->selector.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); xp->selector.dport = ((struct sockaddr_in *)(sa+1))->sin_port; if (xp->selector.dport) xp->selector.dport_mask = htons(0xffff); sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1]; if (sec_ctx != NULL) { struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx); if (!uctx) { err = -ENOBUFS; goto out; } err = security_xfrm_policy_alloc(&xp->security, uctx); kfree(uctx); if (err) goto out; } xp->lft.soft_byte_limit = XFRM_INF; xp->lft.hard_byte_limit = XFRM_INF; xp->lft.soft_packet_limit = XFRM_INF; xp->lft.hard_packet_limit = XFRM_INF; if ((lifetime = ext_hdrs[SADB_EXT_LIFETIME_HARD-1]) != NULL) { xp->lft.hard_packet_limit = _KEY2X(lifetime->sadb_lifetime_allocations); xp->lft.hard_byte_limit = _KEY2X(lifetime->sadb_lifetime_bytes); xp->lft.hard_add_expires_seconds = lifetime->sadb_lifetime_addtime; xp->lft.hard_use_expires_seconds = lifetime->sadb_lifetime_usetime; } if ((lifetime = ext_hdrs[SADB_EXT_LIFETIME_SOFT-1]) != NULL) { xp->lft.soft_packet_limit = _KEY2X(lifetime->sadb_lifetime_allocations); xp->lft.soft_byte_limit = _KEY2X(lifetime->sadb_lifetime_bytes); xp->lft.soft_add_expires_seconds = lifetime->sadb_lifetime_addtime; xp->lft.soft_use_expires_seconds = lifetime->sadb_lifetime_usetime; } xp->xfrm_nr = 0; if (pol->sadb_x_policy_type == IPSEC_POLICY_IPSEC && (err = parse_ipsecrequests(xp, pol)) < 0) goto out; err = xfrm_policy_insert(pol->sadb_x_policy_dir-1, xp, hdr->sadb_msg_type != SADB_X_SPDUPDATE); xfrm_audit_policy_add(xp, err ? 0 : 1, audit_get_loginuid(current), audit_get_sessionid(current), 0); if (err) goto out; if (hdr->sadb_msg_type == SADB_X_SPDUPDATE) c.event = XFRM_MSG_UPDPOLICY; else c.event = XFRM_MSG_NEWPOLICY; c.seq = hdr->sadb_msg_seq; c.portid = hdr->sadb_msg_pid; km_policy_notify(xp, pol->sadb_x_policy_dir-1, &c); xfrm_pol_put(xp); return 0; out: xp->walk.dead = 1; xfrm_policy_destroy(xp); return err; } static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs) { struct net *net = sock_net(sk); int err; struct sadb_address *sa; struct sadb_x_policy *pol; struct xfrm_policy *xp; struct xfrm_selector sel; struct km_event c; struct sadb_x_sec_ctx *sec_ctx; struct xfrm_sec_ctx *pol_ctx = NULL; if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC-1], ext_hdrs[SADB_EXT_ADDRESS_DST-1]) || !ext_hdrs[SADB_X_EXT_POLICY-1]) return -EINVAL; pol = ext_hdrs[SADB_X_EXT_POLICY-1]; if (!pol->sadb_x_policy_dir || pol->sadb_x_policy_dir >= IPSEC_DIR_MAX) return -EINVAL; memset(&sel, 0, sizeof(sel)); sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1]; sel.family = pfkey_sadb_addr2xfrm_addr(sa, &sel.saddr); sel.prefixlen_s = sa->sadb_address_prefixlen; sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); sel.sport = ((struct sockaddr_in *)(sa+1))->sin_port; if (sel.sport) sel.sport_mask = htons(0xffff); sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1]; pfkey_sadb_addr2xfrm_addr(sa, &sel.daddr); sel.prefixlen_d = sa->sadb_address_prefixlen; sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); sel.dport = ((struct sockaddr_in *)(sa+1))->sin_port; if (sel.dport) sel.dport_mask = htons(0xffff); sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1]; if (sec_ctx != NULL) { struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx); if (!uctx) return -ENOMEM; err = security_xfrm_policy_alloc(&pol_ctx, uctx); kfree(uctx); if (err) return err; } xp = xfrm_policy_bysel_ctx(net, DUMMY_MARK, XFRM_POLICY_TYPE_MAIN, pol->sadb_x_policy_dir - 1, &sel, pol_ctx, 1, &err); security_xfrm_policy_free(pol_ctx); if (xp == NULL) return -ENOENT; xfrm_audit_policy_delete(xp, err ? 0 : 1, audit_get_loginuid(current), audit_get_sessionid(current), 0); if (err) goto out; c.seq = hdr->sadb_msg_seq; c.portid = hdr->sadb_msg_pid; c.data.byid = 0; c.event = XFRM_MSG_DELPOLICY; km_policy_notify(xp, pol->sadb_x_policy_dir-1, &c); out: xfrm_pol_put(xp); if (err == 0) xfrm_garbage_collect(net); return err; } static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, const struct sadb_msg *hdr, int dir) { int err; struct sk_buff *out_skb; struct sadb_msg *out_hdr; err = 0; out_skb = pfkey_xfrm_policy2msg_prep(xp); if (IS_ERR(out_skb)) { err = PTR_ERR(out_skb); goto out; } err = pfkey_xfrm_policy2msg(out_skb, xp, dir); if (err < 0) goto out; out_hdr = (struct sadb_msg *) out_skb->data; out_hdr->sadb_msg_version = hdr->sadb_msg_version; out_hdr->sadb_msg_type = hdr->sadb_msg_type; out_hdr->sadb_msg_satype = 0; out_hdr->sadb_msg_errno = 0; out_hdr->sadb_msg_seq = hdr->sadb_msg_seq; out_hdr->sadb_msg_pid = hdr->sadb_msg_pid; pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, xp_net(xp)); err = 0; out: return err; } #ifdef CONFIG_NET_KEY_MIGRATE static int pfkey_sockaddr_pair_size(sa_family_t family) { return PFKEY_ALIGN8(pfkey_sockaddr_len(family) * 2); } static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len, xfrm_address_t *saddr, xfrm_address_t *daddr, u16 *family) { int af, socklen; if (ext_len < pfkey_sockaddr_pair_size(sa->sa_family)) return -EINVAL; af = pfkey_sockaddr_extract(sa, saddr); if (!af) return -EINVAL; socklen = pfkey_sockaddr_len(af); if (pfkey_sockaddr_extract((struct sockaddr *) (((u8 *)sa) + socklen), daddr) != af) return -EINVAL; *family = af; return 0; } static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len, struct xfrm_migrate *m) { int err; struct sadb_x_ipsecrequest *rq2; int mode; if (len <= sizeof(struct sadb_x_ipsecrequest) || len < rq1->sadb_x_ipsecrequest_len) return -EINVAL; /* old endoints */ err = parse_sockaddr_pair((struct sockaddr *)(rq1 + 1), rq1->sadb_x_ipsecrequest_len, &m->old_saddr, &m->old_daddr, &m->old_family); if (err) return err; rq2 = (struct sadb_x_ipsecrequest *)((u8 *)rq1 + rq1->sadb_x_ipsecrequest_len); len -= rq1->sadb_x_ipsecrequest_len; if (len <= sizeof(struct sadb_x_ipsecrequest) || len < rq2->sadb_x_ipsecrequest_len) return -EINVAL; /* new endpoints */ err = parse_sockaddr_pair((struct sockaddr *)(rq2 + 1), rq2->sadb_x_ipsecrequest_len, &m->new_saddr, &m->new_daddr, &m->new_family); if (err) return err; if (rq1->sadb_x_ipsecrequest_proto != rq2->sadb_x_ipsecrequest_proto || rq1->sadb_x_ipsecrequest_mode != rq2->sadb_x_ipsecrequest_mode || rq1->sadb_x_ipsecrequest_reqid != rq2->sadb_x_ipsecrequest_reqid) return -EINVAL; m->proto = rq1->sadb_x_ipsecrequest_proto; if ((mode = pfkey_mode_to_xfrm(rq1->sadb_x_ipsecrequest_mode)) < 0) return -EINVAL; m->mode = mode; m->reqid = rq1->sadb_x_ipsecrequest_reqid; return ((int)(rq1->sadb_x_ipsecrequest_len + rq2->sadb_x_ipsecrequest_len)); } static int pfkey_migrate(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs) { int i, len, ret, err = -EINVAL; u8 dir; struct sadb_address *sa; struct sadb_x_kmaddress *kma; struct sadb_x_policy *pol; struct sadb_x_ipsecrequest *rq; struct xfrm_selector sel; struct xfrm_migrate m[XFRM_MAX_DEPTH]; struct xfrm_kmaddress k; if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1], ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) || !ext_hdrs[SADB_X_EXT_POLICY - 1]) { err = -EINVAL; goto out; } kma = ext_hdrs[SADB_X_EXT_KMADDRESS - 1]; pol = ext_hdrs[SADB_X_EXT_POLICY - 1]; if (pol->sadb_x_policy_dir >= IPSEC_DIR_MAX) { err = -EINVAL; goto out; } if (kma) { /* convert sadb_x_kmaddress to xfrm_kmaddress */ k.reserved = kma->sadb_x_kmaddress_reserved; ret = parse_sockaddr_pair((struct sockaddr *)(kma + 1), 8*(kma->sadb_x_kmaddress_len) - sizeof(*kma), &k.local, &k.remote, &k.family); if (ret < 0) { err = ret; goto out; } } dir = pol->sadb_x_policy_dir - 1; memset(&sel, 0, sizeof(sel)); /* set source address info of selector */ sa = ext_hdrs[SADB_EXT_ADDRESS_SRC - 1]; sel.family = pfkey_sadb_addr2xfrm_addr(sa, &sel.saddr); sel.prefixlen_s = sa->sadb_address_prefixlen; sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); sel.sport = ((struct sockaddr_in *)(sa + 1))->sin_port; if (sel.sport) sel.sport_mask = htons(0xffff); /* set destination address info of selector */ sa = ext_hdrs[SADB_EXT_ADDRESS_DST - 1], pfkey_sadb_addr2xfrm_addr(sa, &sel.daddr); sel.prefixlen_d = sa->sadb_address_prefixlen; sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); sel.dport = ((struct sockaddr_in *)(sa + 1))->sin_port; if (sel.dport) sel.dport_mask = htons(0xffff); rq = (struct sadb_x_ipsecrequest *)(pol + 1); /* extract ipsecrequests */ i = 0; len = pol->sadb_x_policy_len * 8 - sizeof(struct sadb_x_policy); while (len > 0 && i < XFRM_MAX_DEPTH) { ret = ipsecrequests_to_migrate(rq, len, &m[i]); if (ret < 0) { err = ret; goto out; } else { rq = (struct sadb_x_ipsecrequest *)((u8 *)rq + ret); len -= ret; i++; } } if (!i || len > 0) { err = -EINVAL; goto out; } return xfrm_migrate(&sel, dir, XFRM_POLICY_TYPE_MAIN, m, i, kma ? &k : NULL); out: return err; } #else static int pfkey_migrate(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs) { return -ENOPROTOOPT; } #endif static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs) { struct net *net = sock_net(sk); unsigned int dir; int err = 0, delete; struct sadb_x_policy *pol; struct xfrm_policy *xp; struct km_event c; if ((pol = ext_hdrs[SADB_X_EXT_POLICY-1]) == NULL) return -EINVAL; dir = xfrm_policy_id2dir(pol->sadb_x_policy_id); if (dir >= XFRM_POLICY_MAX) return -EINVAL; delete = (hdr->sadb_msg_type == SADB_X_SPDDELETE2); xp = xfrm_policy_byid(net, DUMMY_MARK, XFRM_POLICY_TYPE_MAIN, dir, pol->sadb_x_policy_id, delete, &err); if (xp == NULL) return -ENOENT; if (delete) { xfrm_audit_policy_delete(xp, err ? 0 : 1, audit_get_loginuid(current), audit_get_sessionid(current), 0); if (err) goto out; c.seq = hdr->sadb_msg_seq; c.portid = hdr->sadb_msg_pid; c.data.byid = 1; c.event = XFRM_MSG_DELPOLICY; km_policy_notify(xp, dir, &c); } else { err = key_pol_get_resp(sk, xp, hdr, dir); } out: xfrm_pol_put(xp); if (delete && err == 0) xfrm_garbage_collect(net); return err; } static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr) { struct pfkey_sock *pfk = ptr; struct sk_buff *out_skb; struct sadb_msg *out_hdr; int err; if (!pfkey_can_dump(&pfk->sk)) return -ENOBUFS; out_skb = pfkey_xfrm_policy2msg_prep(xp); if (IS_ERR(out_skb)) return PTR_ERR(out_skb); err = pfkey_xfrm_policy2msg(out_skb, xp, dir); if (err < 0) return err; out_hdr = (struct sadb_msg *) out_skb->data; out_hdr->sadb_msg_version = pfk->dump.msg_version; out_hdr->sadb_msg_type = SADB_X_SPDDUMP; out_hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC; out_hdr->sadb_msg_errno = 0; out_hdr->sadb_msg_seq = count + 1; out_hdr->sadb_msg_pid = pfk->dump.msg_portid; if (pfk->dump.skb) pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE, &pfk->sk, sock_net(&pfk->sk)); pfk->dump.skb = out_skb; return 0; } static int pfkey_dump_sp(struct pfkey_sock *pfk) { struct net *net = sock_net(&pfk->sk); return xfrm_policy_walk(net, &pfk->dump.u.policy, dump_sp, (void *) pfk); } static void pfkey_dump_sp_done(struct pfkey_sock *pfk) { xfrm_policy_walk_done(&pfk->dump.u.policy); } static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs) { struct pfkey_sock *pfk = pfkey_sk(sk); if (pfk->dump.dump != NULL) return -EBUSY; pfk->dump.msg_version = hdr->sadb_msg_version; pfk->dump.msg_portid = hdr->sadb_msg_pid; pfk->dump.dump = pfkey_dump_sp; pfk->dump.done = pfkey_dump_sp_done; xfrm_policy_walk_init(&pfk->dump.u.policy, XFRM_POLICY_TYPE_MAIN); return pfkey_do_dump(pfk); } static int key_notify_policy_flush(const struct km_event *c) { struct sk_buff *skb_out; struct sadb_msg *hdr; skb_out = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_ATOMIC); if (!skb_out) return -ENOBUFS; hdr = (struct sadb_msg *) skb_put(skb_out, sizeof(struct sadb_msg)); hdr->sadb_msg_type = SADB_X_SPDFLUSH; hdr->sadb_msg_seq = c->seq; hdr->sadb_msg_pid = c->portid; hdr->sadb_msg_version = PF_KEY_V2; hdr->sadb_msg_errno = (uint8_t) 0; hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC; hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); hdr->sadb_msg_reserved = 0; pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); return 0; } static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs) { struct net *net = sock_net(sk); struct km_event c; struct xfrm_audit audit_info; int err, err2; audit_info.loginuid = audit_get_loginuid(current); audit_info.sessionid = audit_get_sessionid(current); audit_info.secid = 0; err = xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info); err2 = unicast_flush_resp(sk, hdr); if (err || err2) { if (err == -ESRCH) /* empty table - old silent behavior */ return 0; return err; } c.data.type = XFRM_POLICY_TYPE_MAIN; c.event = XFRM_MSG_FLUSHPOLICY; c.portid = hdr->sadb_msg_pid; c.seq = hdr->sadb_msg_seq; c.net = net; km_policy_notify(NULL, 0, &c); return 0; } typedef int (*pfkey_handler)(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs); static const pfkey_handler pfkey_funcs[SADB_MAX + 1] = { [SADB_RESERVED] = pfkey_reserved, [SADB_GETSPI] = pfkey_getspi, [SADB_UPDATE] = pfkey_add, [SADB_ADD] = pfkey_add, [SADB_DELETE] = pfkey_delete, [SADB_GET] = pfkey_get, [SADB_ACQUIRE] = pfkey_acquire, [SADB_REGISTER] = pfkey_register, [SADB_EXPIRE] = NULL, [SADB_FLUSH] = pfkey_flush, [SADB_DUMP] = pfkey_dump, [SADB_X_PROMISC] = pfkey_promisc, [SADB_X_PCHANGE] = NULL, [SADB_X_SPDUPDATE] = pfkey_spdadd, [SADB_X_SPDADD] = pfkey_spdadd, [SADB_X_SPDDELETE] = pfkey_spddelete, [SADB_X_SPDGET] = pfkey_spdget, [SADB_X_SPDACQUIRE] = NULL, [SADB_X_SPDDUMP] = pfkey_spddump, [SADB_X_SPDFLUSH] = pfkey_spdflush, [SADB_X_SPDSETIDX] = pfkey_spdadd, [SADB_X_SPDDELETE2] = pfkey_spdget, [SADB_X_MIGRATE] = pfkey_migrate, }; static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr) { void *ext_hdrs[SADB_EXT_MAX]; int err; pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL, BROADCAST_PROMISC_ONLY, NULL, sock_net(sk)); memset(ext_hdrs, 0, sizeof(ext_hdrs)); err = parse_exthdrs(skb, hdr, ext_hdrs); if (!err) { err = -EOPNOTSUPP; if (pfkey_funcs[hdr->sadb_msg_type]) err = pfkey_funcs[hdr->sadb_msg_type](sk, skb, hdr, ext_hdrs); } return err; } static struct sadb_msg *pfkey_get_base_msg(struct sk_buff *skb, int *errp) { struct sadb_msg *hdr = NULL; if (skb->len < sizeof(*hdr)) { *errp = -EMSGSIZE; } else { hdr = (struct sadb_msg *) skb->data; if (hdr->sadb_msg_version != PF_KEY_V2 || hdr->sadb_msg_reserved != 0 || (hdr->sadb_msg_type <= SADB_RESERVED || hdr->sadb_msg_type > SADB_MAX)) { hdr = NULL; *errp = -EINVAL; } else if (hdr->sadb_msg_len != (skb->len / sizeof(uint64_t)) || hdr->sadb_msg_len < (sizeof(struct sadb_msg) / sizeof(uint64_t))) { hdr = NULL; *errp = -EMSGSIZE; } else { *errp = 0; } } return hdr; } static inline int aalg_tmpl_set(const struct xfrm_tmpl *t, const struct xfrm_algo_desc *d) { unsigned int id = d->desc.sadb_alg_id; if (id >= sizeof(t->aalgos) * 8) return 0; return (t->aalgos >> id) & 1; } static inline int ealg_tmpl_set(const struct xfrm_tmpl *t, const struct xfrm_algo_desc *d) { unsigned int id = d->desc.sadb_alg_id; if (id >= sizeof(t->ealgos) * 8) return 0; return (t->ealgos >> id) & 1; } static int count_ah_combs(const struct xfrm_tmpl *t) { int i, sz = 0; for (i = 0; ; i++) { const struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(i); if (!aalg) break; if (!aalg->pfkey_supported) continue; if (aalg_tmpl_set(t, aalg) && aalg->available) sz += sizeof(struct sadb_comb); } return sz + sizeof(struct sadb_prop); } static int count_esp_combs(const struct xfrm_tmpl *t) { int i, k, sz = 0; for (i = 0; ; i++) { const struct xfrm_algo_desc *ealg = xfrm_ealg_get_byidx(i); if (!ealg) break; if (!ealg->pfkey_supported) continue; if (!(ealg_tmpl_set(t, ealg) && ealg->available)) continue; for (k = 1; ; k++) { const struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(k); if (!aalg) break; if (!aalg->pfkey_supported) continue; if (aalg_tmpl_set(t, aalg) && aalg->available) sz += sizeof(struct sadb_comb); } } return sz + sizeof(struct sadb_prop); } static void dump_ah_combs(struct sk_buff *skb, const struct xfrm_tmpl *t) { struct sadb_prop *p; int i; p = (struct sadb_prop*)skb_put(skb, sizeof(struct sadb_prop)); p->sadb_prop_len = sizeof(struct sadb_prop)/8; p->sadb_prop_exttype = SADB_EXT_PROPOSAL; p->sadb_prop_replay = 32; memset(p->sadb_prop_reserved, 0, sizeof(p->sadb_prop_reserved)); for (i = 0; ; i++) { const struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(i); if (!aalg) break; if (!aalg->pfkey_supported) continue; if (aalg_tmpl_set(t, aalg) && aalg->available) { struct sadb_comb *c; c = (struct sadb_comb*)skb_put(skb, sizeof(struct sadb_comb)); memset(c, 0, sizeof(*c)); p->sadb_prop_len += sizeof(struct sadb_comb)/8; c->sadb_comb_auth = aalg->desc.sadb_alg_id; c->sadb_comb_auth_minbits = aalg->desc.sadb_alg_minbits; c->sadb_comb_auth_maxbits = aalg->desc.sadb_alg_maxbits; c->sadb_comb_hard_addtime = 24*60*60; c->sadb_comb_soft_addtime = 20*60*60; c->sadb_comb_hard_usetime = 8*60*60; c->sadb_comb_soft_usetime = 7*60*60; } } } static void dump_esp_combs(struct sk_buff *skb, const struct xfrm_tmpl *t) { struct sadb_prop *p; int i, k; p = (struct sadb_prop*)skb_put(skb, sizeof(struct sadb_prop)); p->sadb_prop_len = sizeof(struct sadb_prop)/8; p->sadb_prop_exttype = SADB_EXT_PROPOSAL; p->sadb_prop_replay = 32; memset(p->sadb_prop_reserved, 0, sizeof(p->sadb_prop_reserved)); for (i=0; ; i++) { const struct xfrm_algo_desc *ealg = xfrm_ealg_get_byidx(i); if (!ealg) break; if (!ealg->pfkey_supported) continue; if (!(ealg_tmpl_set(t, ealg) && ealg->available)) continue; for (k = 1; ; k++) { struct sadb_comb *c; const struct xfrm_algo_desc *aalg = xfrm_aalg_get_byidx(k); if (!aalg) break; if (!aalg->pfkey_supported) continue; if (!(aalg_tmpl_set(t, aalg) && aalg->available)) continue; c = (struct sadb_comb*)skb_put(skb, sizeof(struct sadb_comb)); memset(c, 0, sizeof(*c)); p->sadb_prop_len += sizeof(struct sadb_comb)/8; c->sadb_comb_auth = aalg->desc.sadb_alg_id; c->sadb_comb_auth_minbits = aalg->desc.sadb_alg_minbits; c->sadb_comb_auth_maxbits = aalg->desc.sadb_alg_maxbits; c->sadb_comb_encrypt = ealg->desc.sadb_alg_id; c->sadb_comb_encrypt_minbits = ealg->desc.sadb_alg_minbits; c->sadb_comb_encrypt_maxbits = ealg->desc.sadb_alg_maxbits; c->sadb_comb_hard_addtime = 24*60*60; c->sadb_comb_soft_addtime = 20*60*60; c->sadb_comb_hard_usetime = 8*60*60; c->sadb_comb_soft_usetime = 7*60*60; } } } static int key_notify_policy_expire(struct xfrm_policy *xp, const struct km_event *c) { return 0; } static int key_notify_sa_expire(struct xfrm_state *x, const struct km_event *c) { struct sk_buff *out_skb; struct sadb_msg *out_hdr; int hard; int hsc; hard = c->data.hard; if (hard) hsc = 2; else hsc = 1; out_skb = pfkey_xfrm_state2msg_expire(x, hsc); if (IS_ERR(out_skb)) return PTR_ERR(out_skb); out_hdr = (struct sadb_msg *) out_skb->data; out_hdr->sadb_msg_version = PF_KEY_V2; out_hdr->sadb_msg_type = SADB_EXPIRE; out_hdr->sadb_msg_satype = pfkey_proto2satype(x->id.proto); out_hdr->sadb_msg_errno = 0; out_hdr->sadb_msg_reserved = 0; out_hdr->sadb_msg_seq = 0; out_hdr->sadb_msg_pid = 0; pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, xs_net(x)); return 0; } static int pfkey_send_notify(struct xfrm_state *x, const struct km_event *c) { struct net *net = x ? xs_net(x) : c->net; struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); if (atomic_read(&net_pfkey->socks_nr) == 0) return 0; switch (c->event) { case XFRM_MSG_EXPIRE: return key_notify_sa_expire(x, c); case XFRM_MSG_DELSA: case XFRM_MSG_NEWSA: case XFRM_MSG_UPDSA: return key_notify_sa(x, c); case XFRM_MSG_FLUSHSA: return key_notify_sa_flush(c); case XFRM_MSG_NEWAE: /* not yet supported */ break; default: pr_err("pfkey: Unknown SA event %d\n", c->event); break; } return 0; } static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) { if (xp && xp->type != XFRM_POLICY_TYPE_MAIN) return 0; switch (c->event) { case XFRM_MSG_POLEXPIRE: return key_notify_policy_expire(xp, c); case XFRM_MSG_DELPOLICY: case XFRM_MSG_NEWPOLICY: case XFRM_MSG_UPDPOLICY: return key_notify_policy(xp, dir, c); case XFRM_MSG_FLUSHPOLICY: if (c->data.type != XFRM_POLICY_TYPE_MAIN) break; return key_notify_policy_flush(c); default: pr_err("pfkey: Unknown policy event %d\n", c->event); break; } return 0; } static u32 get_acqseq(void) { u32 res; static atomic_t acqseq; do { res = atomic_inc_return(&acqseq); } while (!res); return res; } static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *xp) { struct sk_buff *skb; struct sadb_msg *hdr; struct sadb_address *addr; struct sadb_x_policy *pol; int sockaddr_size; int size; struct sadb_x_sec_ctx *sec_ctx; struct xfrm_sec_ctx *xfrm_ctx; int ctx_size = 0; sockaddr_size = pfkey_sockaddr_size(x->props.family); if (!sockaddr_size) return -EINVAL; size = sizeof(struct sadb_msg) + (sizeof(struct sadb_address) * 2) + (sockaddr_size * 2) + sizeof(struct sadb_x_policy); if (x->id.proto == IPPROTO_AH) size += count_ah_combs(t); else if (x->id.proto == IPPROTO_ESP) size += count_esp_combs(t); if ((xfrm_ctx = x->security)) { ctx_size = PFKEY_ALIGN8(xfrm_ctx->ctx_len); size += sizeof(struct sadb_x_sec_ctx) + ctx_size; } skb = alloc_skb(size + 16, GFP_ATOMIC); if (skb == NULL) return -ENOMEM; hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg)); hdr->sadb_msg_version = PF_KEY_V2; hdr->sadb_msg_type = SADB_ACQUIRE; hdr->sadb_msg_satype = pfkey_proto2satype(x->id.proto); hdr->sadb_msg_len = size / sizeof(uint64_t); hdr->sadb_msg_errno = 0; hdr->sadb_msg_reserved = 0; hdr->sadb_msg_seq = x->km.seq = get_acqseq(); hdr->sadb_msg_pid = 0; /* src address */ addr = (struct sadb_address*) skb_put(skb, sizeof(struct sadb_address)+sockaddr_size); addr->sadb_address_len = (sizeof(struct sadb_address)+sockaddr_size)/ sizeof(uint64_t); addr->sadb_address_exttype = SADB_EXT_ADDRESS_SRC; addr->sadb_address_proto = 0; addr->sadb_address_reserved = 0; addr->sadb_address_prefixlen = pfkey_sockaddr_fill(&x->props.saddr, 0, (struct sockaddr *) (addr + 1), x->props.family); if (!addr->sadb_address_prefixlen) BUG(); /* dst address */ addr = (struct sadb_address*) skb_put(skb, sizeof(struct sadb_address)+sockaddr_size); addr->sadb_address_len = (sizeof(struct sadb_address)+sockaddr_size)/ sizeof(uint64_t); addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST; addr->sadb_address_proto = 0; addr->sadb_address_reserved = 0; addr->sadb_address_prefixlen = pfkey_sockaddr_fill(&x->id.daddr, 0, (struct sockaddr *) (addr + 1), x->props.family); if (!addr->sadb_address_prefixlen) BUG(); pol = (struct sadb_x_policy *) skb_put(skb, sizeof(struct sadb_x_policy)); pol->sadb_x_policy_len = sizeof(struct sadb_x_policy)/sizeof(uint64_t); pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY; pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC; pol->sadb_x_policy_dir = XFRM_POLICY_OUT + 1; pol->sadb_x_policy_reserved = 0; pol->sadb_x_policy_id = xp->index; pol->sadb_x_policy_priority = xp->priority; /* Set sadb_comb's. */ if (x->id.proto == IPPROTO_AH) dump_ah_combs(skb, t); else if (x->id.proto == IPPROTO_ESP) dump_esp_combs(skb, t); /* security context */ if (xfrm_ctx) { sec_ctx = (struct sadb_x_sec_ctx *) skb_put(skb, sizeof(struct sadb_x_sec_ctx) + ctx_size); sec_ctx->sadb_x_sec_len = (sizeof(struct sadb_x_sec_ctx) + ctx_size) / sizeof(uint64_t); sec_ctx->sadb_x_sec_exttype = SADB_X_EXT_SEC_CTX; sec_ctx->sadb_x_ctx_doi = xfrm_ctx->ctx_doi; sec_ctx->sadb_x_ctx_alg = xfrm_ctx->ctx_alg; sec_ctx->sadb_x_ctx_len = xfrm_ctx->ctx_len; memcpy(sec_ctx + 1, xfrm_ctx->ctx_str, xfrm_ctx->ctx_len); } return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, xs_net(x)); } static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt, u8 *data, int len, int *dir) { struct net *net = sock_net(sk); struct xfrm_policy *xp; struct sadb_x_policy *pol = (struct sadb_x_policy*)data; struct sadb_x_sec_ctx *sec_ctx; switch (sk->sk_family) { case AF_INET: if (opt != IP_IPSEC_POLICY) { *dir = -EOPNOTSUPP; return NULL; } break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: if (opt != IPV6_IPSEC_POLICY) { *dir = -EOPNOTSUPP; return NULL; } break; #endif default: *dir = -EINVAL; return NULL; } *dir = -EINVAL; if (len < sizeof(struct sadb_x_policy) || pol->sadb_x_policy_len*8 > len || pol->sadb_x_policy_type > IPSEC_POLICY_BYPASS || (!pol->sadb_x_policy_dir || pol->sadb_x_policy_dir > IPSEC_DIR_OUTBOUND)) return NULL; xp = xfrm_policy_alloc(net, GFP_ATOMIC); if (xp == NULL) { *dir = -ENOBUFS; return NULL; } xp->action = (pol->sadb_x_policy_type == IPSEC_POLICY_DISCARD ? XFRM_POLICY_BLOCK : XFRM_POLICY_ALLOW); xp->lft.soft_byte_limit = XFRM_INF; xp->lft.hard_byte_limit = XFRM_INF; xp->lft.soft_packet_limit = XFRM_INF; xp->lft.hard_packet_limit = XFRM_INF; xp->family = sk->sk_family; xp->xfrm_nr = 0; if (pol->sadb_x_policy_type == IPSEC_POLICY_IPSEC && (*dir = parse_ipsecrequests(xp, pol)) < 0) goto out; /* security context too */ if (len >= (pol->sadb_x_policy_len*8 + sizeof(struct sadb_x_sec_ctx))) { char *p = (char *)pol; struct xfrm_user_sec_ctx *uctx; p += pol->sadb_x_policy_len*8; sec_ctx = (struct sadb_x_sec_ctx *)p; if (len < pol->sadb_x_policy_len*8 + sec_ctx->sadb_x_sec_len) { *dir = -EINVAL; goto out; } if ((*dir = verify_sec_ctx_len(p))) goto out; uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx); *dir = security_xfrm_policy_alloc(&xp->security, uctx); kfree(uctx); if (*dir) goto out; } *dir = pol->sadb_x_policy_dir-1; return xp; out: xp->walk.dead = 1; xfrm_policy_destroy(xp); return NULL; } static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport) { struct sk_buff *skb; struct sadb_msg *hdr; struct sadb_sa *sa; struct sadb_address *addr; struct sadb_x_nat_t_port *n_port; int sockaddr_size; int size; __u8 satype = (x->id.proto == IPPROTO_ESP ? SADB_SATYPE_ESP : 0); struct xfrm_encap_tmpl *natt = NULL; sockaddr_size = pfkey_sockaddr_size(x->props.family); if (!sockaddr_size) return -EINVAL; if (!satype) return -EINVAL; if (!x->encap) return -EINVAL; natt = x->encap; /* Build an SADB_X_NAT_T_NEW_MAPPING message: * * HDR | SA | ADDRESS_SRC (old addr) | NAT_T_SPORT (old port) | * ADDRESS_DST (new addr) | NAT_T_DPORT (new port) */ size = sizeof(struct sadb_msg) + sizeof(struct sadb_sa) + (sizeof(struct sadb_address) * 2) + (sockaddr_size * 2) + (sizeof(struct sadb_x_nat_t_port) * 2); skb = alloc_skb(size + 16, GFP_ATOMIC); if (skb == NULL) return -ENOMEM; hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg)); hdr->sadb_msg_version = PF_KEY_V2; hdr->sadb_msg_type = SADB_X_NAT_T_NEW_MAPPING; hdr->sadb_msg_satype = satype; hdr->sadb_msg_len = size / sizeof(uint64_t); hdr->sadb_msg_errno = 0; hdr->sadb_msg_reserved = 0; hdr->sadb_msg_seq = x->km.seq = get_acqseq(); hdr->sadb_msg_pid = 0; /* SA */ sa = (struct sadb_sa *) skb_put(skb, sizeof(struct sadb_sa)); sa->sadb_sa_len = sizeof(struct sadb_sa)/sizeof(uint64_t); sa->sadb_sa_exttype = SADB_EXT_SA; sa->sadb_sa_spi = x->id.spi; sa->sadb_sa_replay = 0; sa->sadb_sa_state = 0; sa->sadb_sa_auth = 0; sa->sadb_sa_encrypt = 0; sa->sadb_sa_flags = 0; /* ADDRESS_SRC (old addr) */ addr = (struct sadb_address*) skb_put(skb, sizeof(struct sadb_address)+sockaddr_size); addr->sadb_address_len = (sizeof(struct sadb_address)+sockaddr_size)/ sizeof(uint64_t); addr->sadb_address_exttype = SADB_EXT_ADDRESS_SRC; addr->sadb_address_proto = 0; addr->sadb_address_reserved = 0; addr->sadb_address_prefixlen = pfkey_sockaddr_fill(&x->props.saddr, 0, (struct sockaddr *) (addr + 1), x->props.family); if (!addr->sadb_address_prefixlen) BUG(); /* NAT_T_SPORT (old port) */ n_port = (struct sadb_x_nat_t_port*) skb_put(skb, sizeof (*n_port)); n_port->sadb_x_nat_t_port_len = sizeof(*n_port)/sizeof(uint64_t); n_port->sadb_x_nat_t_port_exttype = SADB_X_EXT_NAT_T_SPORT; n_port->sadb_x_nat_t_port_port = natt->encap_sport; n_port->sadb_x_nat_t_port_reserved = 0; /* ADDRESS_DST (new addr) */ addr = (struct sadb_address*) skb_put(skb, sizeof(struct sadb_address)+sockaddr_size); addr->sadb_address_len = (sizeof(struct sadb_address)+sockaddr_size)/ sizeof(uint64_t); addr->sadb_address_exttype = SADB_EXT_ADDRESS_DST; addr->sadb_address_proto = 0; addr->sadb_address_reserved = 0; addr->sadb_address_prefixlen = pfkey_sockaddr_fill(ipaddr, 0, (struct sockaddr *) (addr + 1), x->props.family); if (!addr->sadb_address_prefixlen) BUG(); /* NAT_T_DPORT (new port) */ n_port = (struct sadb_x_nat_t_port*) skb_put(skb, sizeof (*n_port)); n_port->sadb_x_nat_t_port_len = sizeof(*n_port)/sizeof(uint64_t); n_port->sadb_x_nat_t_port_exttype = SADB_X_EXT_NAT_T_DPORT; n_port->sadb_x_nat_t_port_port = sport; n_port->sadb_x_nat_t_port_reserved = 0; return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, xs_net(x)); } #ifdef CONFIG_NET_KEY_MIGRATE static int set_sadb_address(struct sk_buff *skb, int sasize, int type, const struct xfrm_selector *sel) { struct sadb_address *addr; addr = (struct sadb_address *)skb_put(skb, sizeof(struct sadb_address) + sasize); addr->sadb_address_len = (sizeof(struct sadb_address) + sasize)/8; addr->sadb_address_exttype = type; addr->sadb_address_proto = sel->proto; addr->sadb_address_reserved = 0; switch (type) { case SADB_EXT_ADDRESS_SRC: addr->sadb_address_prefixlen = sel->prefixlen_s; pfkey_sockaddr_fill(&sel->saddr, 0, (struct sockaddr *)(addr + 1), sel->family); break; case SADB_EXT_ADDRESS_DST: addr->sadb_address_prefixlen = sel->prefixlen_d; pfkey_sockaddr_fill(&sel->daddr, 0, (struct sockaddr *)(addr + 1), sel->family); break; default: return -EINVAL; } return 0; } static int set_sadb_kmaddress(struct sk_buff *skb, const struct xfrm_kmaddress *k) { struct sadb_x_kmaddress *kma; u8 *sa; int family = k->family; int socklen = pfkey_sockaddr_len(family); int size_req; size_req = (sizeof(struct sadb_x_kmaddress) + pfkey_sockaddr_pair_size(family)); kma = (struct sadb_x_kmaddress *)skb_put(skb, size_req); memset(kma, 0, size_req); kma->sadb_x_kmaddress_len = size_req / 8; kma->sadb_x_kmaddress_exttype = SADB_X_EXT_KMADDRESS; kma->sadb_x_kmaddress_reserved = k->reserved; sa = (u8 *)(kma + 1); if (!pfkey_sockaddr_fill(&k->local, 0, (struct sockaddr *)sa, family) || !pfkey_sockaddr_fill(&k->remote, 0, (struct sockaddr *)(sa+socklen), family)) return -EINVAL; return 0; } static int set_ipsecrequest(struct sk_buff *skb, uint8_t proto, uint8_t mode, int level, uint32_t reqid, uint8_t family, const xfrm_address_t *src, const xfrm_address_t *dst) { struct sadb_x_ipsecrequest *rq; u8 *sa; int socklen = pfkey_sockaddr_len(family); int size_req; size_req = sizeof(struct sadb_x_ipsecrequest) + pfkey_sockaddr_pair_size(family); rq = (struct sadb_x_ipsecrequest *)skb_put(skb, size_req); memset(rq, 0, size_req); rq->sadb_x_ipsecrequest_len = size_req; rq->sadb_x_ipsecrequest_proto = proto; rq->sadb_x_ipsecrequest_mode = mode; rq->sadb_x_ipsecrequest_level = level; rq->sadb_x_ipsecrequest_reqid = reqid; sa = (u8 *) (rq + 1); if (!pfkey_sockaddr_fill(src, 0, (struct sockaddr *)sa, family) || !pfkey_sockaddr_fill(dst, 0, (struct sockaddr *)(sa + socklen), family)) return -EINVAL; return 0; } #endif #ifdef CONFIG_NET_KEY_MIGRATE static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, const struct xfrm_migrate *m, int num_bundles, const struct xfrm_kmaddress *k) { int i; int sasize_sel; int size = 0; int size_pol = 0; struct sk_buff *skb; struct sadb_msg *hdr; struct sadb_x_policy *pol; const struct xfrm_migrate *mp; if (type != XFRM_POLICY_TYPE_MAIN) return 0; if (num_bundles <= 0 || num_bundles > XFRM_MAX_DEPTH) return -EINVAL; if (k != NULL) { /* addresses for KM */ size += PFKEY_ALIGN8(sizeof(struct sadb_x_kmaddress) + pfkey_sockaddr_pair_size(k->family)); } /* selector */ sasize_sel = pfkey_sockaddr_size(sel->family); if (!sasize_sel) return -EINVAL; size += (sizeof(struct sadb_address) + sasize_sel) * 2; /* policy info */ size_pol += sizeof(struct sadb_x_policy); /* ipsecrequests */ for (i = 0, mp = m; i < num_bundles; i++, mp++) { /* old locator pair */ size_pol += sizeof(struct sadb_x_ipsecrequest) + pfkey_sockaddr_pair_size(mp->old_family); /* new locator pair */ size_pol += sizeof(struct sadb_x_ipsecrequest) + pfkey_sockaddr_pair_size(mp->new_family); } size += sizeof(struct sadb_msg) + size_pol; /* alloc buffer */ skb = alloc_skb(size, GFP_ATOMIC); if (skb == NULL) return -ENOMEM; hdr = (struct sadb_msg *)skb_put(skb, sizeof(struct sadb_msg)); hdr->sadb_msg_version = PF_KEY_V2; hdr->sadb_msg_type = SADB_X_MIGRATE; hdr->sadb_msg_satype = pfkey_proto2satype(m->proto); hdr->sadb_msg_len = size / 8; hdr->sadb_msg_errno = 0; hdr->sadb_msg_reserved = 0; hdr->sadb_msg_seq = 0; hdr->sadb_msg_pid = 0; /* Addresses to be used by KM for negotiation, if ext is available */ if (k != NULL && (set_sadb_kmaddress(skb, k) < 0)) goto err; /* selector src */ set_sadb_address(skb, sasize_sel, SADB_EXT_ADDRESS_SRC, sel); /* selector dst */ set_sadb_address(skb, sasize_sel, SADB_EXT_ADDRESS_DST, sel); /* policy information */ pol = (struct sadb_x_policy *)skb_put(skb, sizeof(struct sadb_x_policy)); pol->sadb_x_policy_len = size_pol / 8; pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY; pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC; pol->sadb_x_policy_dir = dir + 1; pol->sadb_x_policy_reserved = 0; pol->sadb_x_policy_id = 0; pol->sadb_x_policy_priority = 0; for (i = 0, mp = m; i < num_bundles; i++, mp++) { /* old ipsecrequest */ int mode = pfkey_mode_from_xfrm(mp->mode); if (mode < 0) goto err; if (set_ipsecrequest(skb, mp->proto, mode, (mp->reqid ? IPSEC_LEVEL_UNIQUE : IPSEC_LEVEL_REQUIRE), mp->reqid, mp->old_family, &mp->old_saddr, &mp->old_daddr) < 0) goto err; /* new ipsecrequest */ if (set_ipsecrequest(skb, mp->proto, mode, (mp->reqid ? IPSEC_LEVEL_UNIQUE : IPSEC_LEVEL_REQUIRE), mp->reqid, mp->new_family, &mp->new_saddr, &mp->new_daddr) < 0) goto err; } /* broadcast migrate message to sockets */ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, &init_net); return 0; err: kfree_skb(skb); return -EINVAL; } #else static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, const struct xfrm_migrate *m, int num_bundles, const struct xfrm_kmaddress *k) { return -ENOPROTOOPT; } #endif static int pfkey_sendmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct sk_buff *skb = NULL; struct sadb_msg *hdr = NULL; int err; err = -EOPNOTSUPP; if (msg->msg_flags & MSG_OOB) goto out; err = -EMSGSIZE; if ((unsigned int)len > sk->sk_sndbuf - 32) goto out; err = -ENOBUFS; skb = alloc_skb(len, GFP_KERNEL); if (skb == NULL) goto out; err = -EFAULT; if (memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len)) goto out; hdr = pfkey_get_base_msg(skb, &err); if (!hdr) goto out; mutex_lock(&xfrm_cfg_mutex); err = pfkey_process(sk, skb, hdr); mutex_unlock(&xfrm_cfg_mutex); out: if (err && hdr && pfkey_error(hdr, err, sk) == 0) err = 0; kfree_skb(skb); return err ? : len; } static int pfkey_recvmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct pfkey_sock *pfk = pfkey_sk(sk); struct sk_buff *skb; int copied, err; err = -EINVAL; if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT)) goto out; msg->msg_namelen = 0; skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err); if (skb == NULL) goto out; copied = skb->len; if (copied > len) { msg->msg_flags |= MSG_TRUNC; copied = len; } skb_reset_transport_header(skb); err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto out_free; sock_recv_ts_and_drops(msg, sk, skb); err = (flags & MSG_TRUNC) ? skb->len : copied; if (pfk->dump.dump != NULL && 3 * atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) pfkey_do_dump(pfk); out_free: skb_free_datagram(sk, skb); out: return err; } static const struct proto_ops pfkey_ops = { .family = PF_KEY, .owner = THIS_MODULE, /* Operations that make no sense on pfkey sockets. */ .bind = sock_no_bind, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = sock_no_setsockopt, .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, /* Now the operations that really occur. */ .release = pfkey_release, .poll = datagram_poll, .sendmsg = pfkey_sendmsg, .recvmsg = pfkey_recvmsg, }; static const struct net_proto_family pfkey_family_ops = { .family = PF_KEY, .create = pfkey_create, .owner = THIS_MODULE, }; #ifdef CONFIG_PROC_FS static int pfkey_seq_show(struct seq_file *f, void *v) { struct sock *s = sk_entry(v); if (v == SEQ_START_TOKEN) seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n"); else seq_printf(f, "%pK %-6d %-6u %-6u %-6u %-6lu\n", s, atomic_read(&s->sk_refcnt), sk_rmem_alloc_get(s), sk_wmem_alloc_get(s), from_kuid_munged(seq_user_ns(f), sock_i_uid(s)), sock_i_ino(s) ); return 0; } static void *pfkey_seq_start(struct seq_file *f, loff_t *ppos) __acquires(rcu) { struct net *net = seq_file_net(f); struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); rcu_read_lock(); return seq_hlist_start_head_rcu(&net_pfkey->table, *ppos); } static void *pfkey_seq_next(struct seq_file *f, void *v, loff_t *ppos) { struct net *net = seq_file_net(f); struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); return seq_hlist_next_rcu(v, &net_pfkey->table, ppos); } static void pfkey_seq_stop(struct seq_file *f, void *v) __releases(rcu) { rcu_read_unlock(); } static const struct seq_operations pfkey_seq_ops = { .start = pfkey_seq_start, .next = pfkey_seq_next, .stop = pfkey_seq_stop, .show = pfkey_seq_show, }; static int pfkey_seq_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &pfkey_seq_ops, sizeof(struct seq_net_private)); } static const struct file_operations pfkey_proc_ops = { .open = pfkey_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; static int __net_init pfkey_init_proc(struct net *net) { struct proc_dir_entry *e; e = proc_create("pfkey", 0, net->proc_net, &pfkey_proc_ops); if (e == NULL) return -ENOMEM; return 0; } static void __net_exit pfkey_exit_proc(struct net *net) { remove_proc_entry("pfkey", net->proc_net); } #else static inline int pfkey_init_proc(struct net *net) { return 0; } static inline void pfkey_exit_proc(struct net *net) { } #endif static struct xfrm_mgr pfkeyv2_mgr = { .id = "pfkeyv2", .notify = pfkey_send_notify, .acquire = pfkey_send_acquire, .compile_policy = pfkey_compile_policy, .new_mapping = pfkey_send_new_mapping, .notify_policy = pfkey_send_policy_notify, .migrate = pfkey_send_migrate, }; static int __net_init pfkey_net_init(struct net *net) { struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); int rv; INIT_HLIST_HEAD(&net_pfkey->table); atomic_set(&net_pfkey->socks_nr, 0); rv = pfkey_init_proc(net); return rv; } static void __net_exit pfkey_net_exit(struct net *net) { struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); pfkey_exit_proc(net); BUG_ON(!hlist_empty(&net_pfkey->table)); } static struct pernet_operations pfkey_net_ops = { .init = pfkey_net_init, .exit = pfkey_net_exit, .id = &pfkey_net_id, .size = sizeof(struct netns_pfkey), }; static void __exit ipsec_pfkey_exit(void) { xfrm_unregister_km(&pfkeyv2_mgr); sock_unregister(PF_KEY); unregister_pernet_subsys(&pfkey_net_ops); proto_unregister(&key_proto); } static int __init ipsec_pfkey_init(void) { int err = proto_register(&key_proto, 0); if (err != 0) goto out; err = register_pernet_subsys(&pfkey_net_ops); if (err != 0) goto out_unregister_key_proto; err = sock_register(&pfkey_family_ops); if (err != 0) goto out_unregister_pernet; err = xfrm_register_km(&pfkeyv2_mgr); if (err != 0) goto out_sock_unregister; out: return err; out_sock_unregister: sock_unregister(PF_KEY); out_unregister_pernet: unregister_pernet_subsys(&pfkey_net_ops); out_unregister_key_proto: proto_unregister(&key_proto); goto out; } module_init(ipsec_pfkey_init); module_exit(ipsec_pfkey_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_KEY);
./CrossVul/dataset_final_sorted/CWE-20/c/bad_5845_18
crossvul-cpp_data_good_5110_0
/* toshiba.c * * Wiretap Library * Copyright (c) 1998 by Gilbert Ramirez <gram@alumni.rice.edu> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "config.h" #include "wtap-int.h" #include "toshiba.h" #include "file_wrappers.h" #include <stdlib.h> #include <string.h> /* This module reads the output of the 'snoop' command in the Toshiba * TR-600 and TR-650 "Compact" ISDN Routers. You can telnet to the * router and run 'snoop' on the different channels, and at different * detail levels. Be sure to choose the 'dump' level to get the hex dump. * The 'snoop' command has nothing to do with the Solaris 'snoop' * command, except that they both capture packets. */ /* Example 'snoop' output data: Script started on Thu Sep 9 21:48:49 1999 ]0;gram@nirvana:/tmp$ telnet 10.0.0.254 Trying 10.0.0.254... Connected to 10.0.0.254. Escape character is '^]'. TR-600(tr600) System Console Login:admin Password:******* *--------------------------------------------------------* | T O S H I B A T R - 6 0 0 | | < Compact Router > | | V1.02.02 | | | | (C) Copyright TOSHIBA Corp. 1997 All rights reserved. | *--------------------------------------------------------* tr600>snoop dump b1 Trace start?(on/off/dump/dtl)->dump IP Address?->b1 B1 Port Filetering Trace start(Dump Mode)... tr600>[No.1] 00:00:09.14 B1:1 Tx 207.193.26.136->151.164.1.8 DNS SPORT=1028 LEN=38 CHKSUM=4FD4 ID=2390 Query RD QCNT=1 pow.zing.org? OFFSET 0001-0203-0405-0607-0809-0A0B-0C0D-0E0F 0123456789ABCDEF LEN=67 0000 : FF03 003D C000 0008 2145 0000 3A12 6500 ...=....!E..:.e. 0010 : 003F 11E6 58CF C11A 8897 A401 0804 0400 .?..X........... 0020 : 3500 264F D409 5601 0000 0100 0000 0000 5.&O..V......... 0030 : 0003 706F 7704 7A69 6E67 036F 7267 0000 ..pow.zing.org.. 0040 : 0100 01 ... [No.2] 00:00:09.25 B1:1 Rx 151.164.1.8->207.193.26.136 DNS DPORT=1028 LEN=193 CHKSUM=3E06 ID=2390 Answer RD RA QCNT=1 pow.zing.org? ANCNT=1 pow.zing.org=206.57.36.90 TTL=2652 OFFSET 0001-0203-0405-0607-0809-0A0B-0C0D-0E0F 0123456789ABCDEF LEN=222 0000 : FF03 003D C000 0013 2145 0000 D590 9340 ...=....!E.....@ 0010 : 00F7 116F 8E97 A401 08CF C11A 8800 3504 ...o..........5. 0020 : 0400 C13E 0609 5681 8000 0100 0100 0300 ...>..V......... 0030 : 0303 706F 7704 7A69 6E67 036F 7267 0000 ..pow.zing.org.. 0040 : 0100 01C0 0C00 0100 0100 000A 5C00 04CE ............\... 0050 : 3924 5A04 5A49 4E47 036F 7267 0000 0200 9$Z.ZING.org.... 0060 : 0100 016F 5B00 0D03 4841 4E03 5449 5703 ...o[...HAN.TIW. 0070 : 4E45 5400 C02E 0002 0001 0001 6F5B 0006 NET.........o[.. 0080 : 034E 5331 C02E C02E 0002 0001 0001 6F5B .NS1..........o[ 0090 : 001C 0854 414C 4945 5349 4E0D 434F 4E46 ...TALIESIN.CONF 00A0 : 4142 554C 4154 494F 4E03 434F 4D00 C042 ABULATION.COM..B 00B0 : 0001 0001 0001 51EC 0004 CE39 2406 C05B ......Q....9$..[ 00C0 : 0001 0001 0001 6F5B 0004 CE39 245A C06D ......o[...9$Z.m 00D0 : 0001 0001 0001 4521 0004 187C 1F01 ......E!...|.. */ /* Magic text to check for toshiba-ness of file */ static const char toshiba_hdr_magic[] = { 'T', ' ', 'O', ' ', 'S', ' ', 'H', ' ', 'I', ' ', 'B', ' ', 'A' }; #define TOSHIBA_HDR_MAGIC_SIZE (sizeof toshiba_hdr_magic / sizeof toshiba_hdr_magic[0]) /* Magic text for start of packet */ static const char toshiba_rec_magic[] = { '[', 'N', 'o', '.' }; #define TOSHIBA_REC_MAGIC_SIZE (sizeof toshiba_rec_magic / sizeof toshiba_rec_magic[0]) static gboolean toshiba_read(wtap *wth, int *err, gchar **err_info, gint64 *data_offset); static gboolean toshiba_seek_read(wtap *wth, gint64 seek_off, struct wtap_pkthdr *phdr, Buffer *buf, int *err, gchar **err_info); static gboolean parse_single_hex_dump_line(char* rec, guint8 *buf, guint byte_offset); static gboolean parse_toshiba_packet(FILE_T fh, struct wtap_pkthdr *phdr, Buffer *buf, int *err, gchar **err_info); /* Seeks to the beginning of the next packet, and returns the byte offset. Returns -1 on failure, and sets "*err" to the error and "*err_info" to null or an additional error string. */ static gint64 toshiba_seek_next_packet(wtap *wth, int *err, gchar **err_info) { int byte; guint level = 0; gint64 cur_off; while ((byte = file_getc(wth->fh)) != EOF) { if (byte == toshiba_rec_magic[level]) { level++; if (level >= TOSHIBA_REC_MAGIC_SIZE) { /* note: we're leaving file pointer right after the magic characters */ cur_off = file_tell(wth->fh); if (cur_off == -1) { /* Error. */ *err = file_error(wth->fh, err_info); return -1; } return cur_off + 1; } } else { level = 0; } } /* EOF or error. */ *err = file_error(wth->fh, err_info); return -1; } #define TOSHIBA_HEADER_LINES_TO_CHECK 200 #define TOSHIBA_LINE_LENGTH 240 /* Look through the first part of a file to see if this is * a Toshiba trace file. * * Returns TRUE if it is, FALSE if it isn't or if we get an I/O error; * if we get an I/O error, "*err" will be set to a non-zero value and * "*err_info" will be set to null or an additional error string. */ static gboolean toshiba_check_file_type(wtap *wth, int *err, gchar **err_info) { char buf[TOSHIBA_LINE_LENGTH]; guint i, reclen, level, line; char byte; buf[TOSHIBA_LINE_LENGTH-1] = 0; for (line = 0; line < TOSHIBA_HEADER_LINES_TO_CHECK; line++) { if (file_gets(buf, TOSHIBA_LINE_LENGTH, wth->fh) == NULL) { /* EOF or error. */ *err = file_error(wth->fh, err_info); return FALSE; } reclen = (guint) strlen(buf); if (reclen < TOSHIBA_HDR_MAGIC_SIZE) { continue; } level = 0; for (i = 0; i < reclen; i++) { byte = buf[i]; if (byte == toshiba_hdr_magic[level]) { level++; if (level >= TOSHIBA_HDR_MAGIC_SIZE) { return TRUE; } } else { level = 0; } } } *err = 0; return FALSE; } wtap_open_return_val toshiba_open(wtap *wth, int *err, gchar **err_info) { /* Look for Toshiba header */ if (!toshiba_check_file_type(wth, err, err_info)) { if (*err != 0 && *err != WTAP_ERR_SHORT_READ) return WTAP_OPEN_ERROR; return WTAP_OPEN_NOT_MINE; } wth->file_encap = WTAP_ENCAP_PER_PACKET; wth->file_type_subtype = WTAP_FILE_TYPE_SUBTYPE_TOSHIBA; wth->snapshot_length = 0; /* not known */ wth->subtype_read = toshiba_read; wth->subtype_seek_read = toshiba_seek_read; wth->file_tsprec = WTAP_TSPREC_CSEC; return WTAP_OPEN_MINE; } /* Find the next packet and parse it; called from wtap_read(). */ static gboolean toshiba_read(wtap *wth, int *err, gchar **err_info, gint64 *data_offset) { gint64 offset; /* Find the next packet */ offset = toshiba_seek_next_packet(wth, err, err_info); if (offset < 1) return FALSE; *data_offset = offset; /* Parse the packet */ return parse_toshiba_packet(wth->fh, &wth->phdr, wth->frame_buffer, err, err_info); } /* Used to read packets in random-access fashion */ static gboolean toshiba_seek_read(wtap *wth, gint64 seek_off, struct wtap_pkthdr *phdr, Buffer *buf, int *err, gchar **err_info) { if (file_seek(wth->random_fh, seek_off - 1, SEEK_SET, err) == -1) return FALSE; if (!parse_toshiba_packet(wth->random_fh, phdr, buf, err, err_info)) { if (*err == 0) *err = WTAP_ERR_SHORT_READ; return FALSE; } return TRUE; } /* Parses a packet. */ static gboolean parse_toshiba_packet(FILE_T fh, struct wtap_pkthdr *phdr, Buffer *buf, int *err, gchar **err_info) { union wtap_pseudo_header *pseudo_header = &phdr->pseudo_header; char line[TOSHIBA_LINE_LENGTH]; int num_items_scanned; guint pkt_len; int pktnum, hr, min, sec, csec; char channel[10], direction[10]; int i, hex_lines; guint8 *pd; /* Our file pointer should be on the line containing the * summary information for a packet. Read in that line and * extract the useful information */ if (file_gets(line, TOSHIBA_LINE_LENGTH, fh) == NULL) { *err = file_error(fh, err_info); if (*err == 0) { *err = WTAP_ERR_SHORT_READ; } return FALSE; } /* Find text in line after "[No.". Limit the length of the * two strings since we have fixed buffers for channel[] and * direction[] */ num_items_scanned = sscanf(line, "%9d] %2d:%2d:%2d.%9d %9s %9s", &pktnum, &hr, &min, &sec, &csec, channel, direction); if (num_items_scanned != 7) { *err = WTAP_ERR_BAD_FILE; *err_info = g_strdup("toshiba: record header isn't valid"); return FALSE; } /* Scan lines until we find the OFFSET line. In a "telnet" trace, * this will be the next line. But if you save your telnet session * to a file from within a Windows-based telnet client, it may * put in line breaks at 80 columns (or however big your "telnet" box * is). CRT (a Windows telnet app from VanDyke) does this. * Here we assume that 80 columns will be the minimum size, and that * the OFFSET line is not broken in the middle. It's the previous * line that is normally long and can thus be broken at column 80. */ do { if (file_gets(line, TOSHIBA_LINE_LENGTH, fh) == NULL) { *err = file_error(fh, err_info); if (*err == 0) { *err = WTAP_ERR_SHORT_READ; } return FALSE; } /* Check for "OFFSET 0001-0203" at beginning of line */ line[16] = '\0'; } while (strcmp(line, "OFFSET 0001-0203") != 0); num_items_scanned = sscanf(line+64, "LEN=%9u", &pkt_len); if (num_items_scanned != 1) { *err = WTAP_ERR_BAD_FILE; *err_info = g_strdup("toshiba: OFFSET line doesn't have valid LEN item"); return FALSE; } if (pkt_len > WTAP_MAX_PACKET_SIZE) { /* * Probably a corrupt capture file; don't blow up trying * to allocate space for an immensely-large packet. */ *err = WTAP_ERR_BAD_FILE; *err_info = g_strdup_printf("toshiba: File has %u-byte packet, bigger than maximum of %u", pkt_len, WTAP_MAX_PACKET_SIZE); return FALSE; } phdr->rec_type = REC_TYPE_PACKET; phdr->presence_flags = WTAP_HAS_TS|WTAP_HAS_CAP_LEN; phdr->ts.secs = hr * 3600 + min * 60 + sec; phdr->ts.nsecs = csec * 10000000; phdr->caplen = pkt_len; phdr->len = pkt_len; switch (channel[0]) { case 'B': phdr->pkt_encap = WTAP_ENCAP_ISDN; pseudo_header->isdn.uton = (direction[0] == 'T'); pseudo_header->isdn.channel = (guint8) strtol(&channel[1], NULL, 10); break; case 'D': phdr->pkt_encap = WTAP_ENCAP_ISDN; pseudo_header->isdn.uton = (direction[0] == 'T'); pseudo_header->isdn.channel = 0; break; default: phdr->pkt_encap = WTAP_ENCAP_ETHERNET; /* XXX - is there an FCS in the frame? */ pseudo_header->eth.fcs_len = -1; break; } /* Make sure we have enough room for the packet */ ws_buffer_assure_space(buf, pkt_len); pd = ws_buffer_start_ptr(buf); /* Calculate the number of hex dump lines, each * containing 16 bytes of data */ hex_lines = pkt_len / 16 + ((pkt_len % 16) ? 1 : 0); for (i = 0; i < hex_lines; i++) { if (file_gets(line, TOSHIBA_LINE_LENGTH, fh) == NULL) { *err = file_error(fh, err_info); if (*err == 0) { *err = WTAP_ERR_SHORT_READ; } return FALSE; } if (!parse_single_hex_dump_line(line, pd, i * 16)) { *err = WTAP_ERR_BAD_FILE; *err_info = g_strdup("toshiba: hex dump not valid"); return FALSE; } } return TRUE; } /* 1 2 3 4 0123456789012345678901234567890123456789012345 0000 : FF03 003D C000 0008 2145 0000 3A12 6500 ...=....!E..:.e. 0010 : 003F 11E6 58CF C11A 8897 A401 0804 0400 .?..X........... 0020 : 0100 01 ... */ #define START_POS 7 #define HEX_LENGTH ((8 * 4) + 7) /* eight clumps of 4 bytes with 7 inner spaces */ /* Take a string representing one line from a hex dump and converts the * text to binary data. We check the printed offset with the offset * we are passed to validate the record. We place the bytes in the buffer * at the specified offset. * * In the process, we're going to write all over the string. * * Returns TRUE if good hex dump, FALSE if bad. */ static gboolean parse_single_hex_dump_line(char* rec, guint8 *buf, guint byte_offset) { int pos, i; char *s; unsigned long value; guint16 word_value; /* Get the byte_offset directly from the record */ rec[4] = '\0'; s = rec; value = strtoul(s, NULL, 16); if (value != byte_offset) { return FALSE; } /* Go through the substring representing the values and: * 1. Replace any spaces with '0's * 2. Place \0's every 5 bytes (to terminate the string) * * Then read the eight sets of hex bytes */ for (pos = START_POS; pos < START_POS + HEX_LENGTH; pos++) { if (rec[pos] == ' ') { rec[pos] = '0'; } } pos = START_POS; for (i = 0; i < 8; i++) { rec[pos+4] = '\0'; word_value = (guint16) strtoul(&rec[pos], NULL, 16); buf[byte_offset + i * 2 + 0] = (guint8) (word_value >> 8); buf[byte_offset + i * 2 + 1] = (guint8) (word_value & 0x00ff); pos += 5; } return TRUE; } /* * Editor modelines - http://www.wireshark.org/tools/modelines.html * * Local variables: * c-basic-offset: 8 * tab-width: 8 * indent-tabs-mode: t * End: * * vi: set shiftwidth=8 tabstop=8 noexpandtab: * :indentSize=8:tabSize=8:noTabs=false: */
./CrossVul/dataset_final_sorted/CWE-20/c/good_5110_0
crossvul-cpp_data_good_3978_0
/* $OpenBSD: scp.c,v 1.210 2020/05/06 20:57:38 djm Exp $ */ /* * scp - secure remote copy. This is basically patched BSD rcp which * uses ssh to do the data transfer (instead of using rcmd). * * NOTE: This version should NOT be suid root. (This uses ssh to * do the transfer and ssh has the necessary privileges.) * * 1995 Timo Rinne <tri@iki.fi>, Tatu Ylonen <ylo@cs.hut.fi> * * As far as I am concerned, the code I have written for this software * can be used freely for any purpose. Any derived versions of this * software must be clearly marked as such, and if the derived work is * incompatible with the protocol description in the RFC file, it must be * called by a name other than "ssh" or "Secure Shell". */ /* * Copyright (c) 1999 Theo de Raadt. All rights reserved. * Copyright (c) 1999 Aaron Campbell. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Parts from: * * Copyright (c) 1983, 1990, 1992, 1993, 1995 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include "includes.h" #include <sys/types.h> #ifdef HAVE_SYS_STAT_H # include <sys/stat.h> #endif #ifdef HAVE_POLL_H #include <poll.h> #else # ifdef HAVE_SYS_POLL_H # include <sys/poll.h> # endif #endif #ifdef HAVE_SYS_TIME_H # include <sys/time.h> #endif #include <sys/wait.h> #include <sys/uio.h> #include <ctype.h> #include <dirent.h> #include <errno.h> #include <fcntl.h> #ifdef HAVE_FNMATCH_H #include <fnmatch.h> #endif #include <limits.h> #include <locale.h> #include <pwd.h> #include <signal.h> #include <stdarg.h> #ifdef HAVE_STDINT_H # include <stdint.h> #endif #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <unistd.h> #if defined(HAVE_STRNVIS) && defined(HAVE_VIS_H) && !defined(BROKEN_STRNVIS) #include <vis.h> #endif #include "xmalloc.h" #include "ssh.h" #include "atomicio.h" #include "pathnames.h" #include "log.h" #include "misc.h" #include "progressmeter.h" #include "utf8.h" extern char *__progname; #define COPY_BUFLEN 16384 int do_cmd(char *host, char *remuser, int port, char *cmd, int *fdin, int *fdout); int do_cmd2(char *host, char *remuser, int port, char *cmd, int fdin, int fdout); /* Struct for addargs */ arglist args; arglist remote_remote_args; /* Bandwidth limit */ long long limit_kbps = 0; struct bwlimit bwlimit; /* Name of current file being transferred. */ char *curfile; /* This is set to non-zero to enable verbose mode. */ int verbose_mode = 0; /* This is set to zero if the progressmeter is not desired. */ int showprogress = 1; /* * This is set to non-zero if remote-remote copy should be piped * through this process. */ int throughlocal = 0; /* Non-standard port to use for the ssh connection or -1. */ int sshport = -1; /* This is the program to execute for the secured connection. ("ssh" or -S) */ char *ssh_program = _PATH_SSH_PROGRAM; /* This is used to store the pid of ssh_program */ pid_t do_cmd_pid = -1; static void killchild(int signo) { if (do_cmd_pid > 1) { kill(do_cmd_pid, signo ? signo : SIGTERM); waitpid(do_cmd_pid, NULL, 0); } if (signo) _exit(1); exit(1); } static void suspchild(int signo) { int status; if (do_cmd_pid > 1) { kill(do_cmd_pid, signo); while (waitpid(do_cmd_pid, &status, WUNTRACED) == -1 && errno == EINTR) ; kill(getpid(), SIGSTOP); } } static int do_local_cmd(arglist *a) { u_int i; int status; pid_t pid; if (a->num == 0) fatal("do_local_cmd: no arguments"); if (verbose_mode) { fprintf(stderr, "Executing:"); for (i = 0; i < a->num; i++) fmprintf(stderr, " %s", a->list[i]); fprintf(stderr, "\n"); } if ((pid = fork()) == -1) fatal("do_local_cmd: fork: %s", strerror(errno)); if (pid == 0) { execvp(a->list[0], a->list); perror(a->list[0]); exit(1); } do_cmd_pid = pid; ssh_signal(SIGTERM, killchild); ssh_signal(SIGINT, killchild); ssh_signal(SIGHUP, killchild); while (waitpid(pid, &status, 0) == -1) if (errno != EINTR) fatal("do_local_cmd: waitpid: %s", strerror(errno)); do_cmd_pid = -1; if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) return (-1); return (0); } /* * This function executes the given command as the specified user on the * given host. This returns < 0 if execution fails, and >= 0 otherwise. This * assigns the input and output file descriptors on success. */ int do_cmd(char *host, char *remuser, int port, char *cmd, int *fdin, int *fdout) { int pin[2], pout[2], reserved[2]; if (verbose_mode) fmprintf(stderr, "Executing: program %s host %s, user %s, command %s\n", ssh_program, host, remuser ? remuser : "(unspecified)", cmd); if (port == -1) port = sshport; /* * Reserve two descriptors so that the real pipes won't get * descriptors 0 and 1 because that will screw up dup2 below. */ if (pipe(reserved) == -1) fatal("pipe: %s", strerror(errno)); /* Create a socket pair for communicating with ssh. */ if (pipe(pin) == -1) fatal("pipe: %s", strerror(errno)); if (pipe(pout) == -1) fatal("pipe: %s", strerror(errno)); /* Free the reserved descriptors. */ close(reserved[0]); close(reserved[1]); ssh_signal(SIGTSTP, suspchild); ssh_signal(SIGTTIN, suspchild); ssh_signal(SIGTTOU, suspchild); /* Fork a child to execute the command on the remote host using ssh. */ do_cmd_pid = fork(); if (do_cmd_pid == 0) { /* Child. */ close(pin[1]); close(pout[0]); dup2(pin[0], 0); dup2(pout[1], 1); close(pin[0]); close(pout[1]); replacearg(&args, 0, "%s", ssh_program); if (port != -1) { addargs(&args, "-p"); addargs(&args, "%d", port); } if (remuser != NULL) { addargs(&args, "-l"); addargs(&args, "%s", remuser); } addargs(&args, "--"); addargs(&args, "%s", host); addargs(&args, "%s", cmd); execvp(ssh_program, args.list); perror(ssh_program); exit(1); } else if (do_cmd_pid == -1) { fatal("fork: %s", strerror(errno)); } /* Parent. Close the other side, and return the local side. */ close(pin[0]); *fdout = pin[1]; close(pout[1]); *fdin = pout[0]; ssh_signal(SIGTERM, killchild); ssh_signal(SIGINT, killchild); ssh_signal(SIGHUP, killchild); return 0; } /* * This function executes a command similar to do_cmd(), but expects the * input and output descriptors to be setup by a previous call to do_cmd(). * This way the input and output of two commands can be connected. */ int do_cmd2(char *host, char *remuser, int port, char *cmd, int fdin, int fdout) { pid_t pid; int status; if (verbose_mode) fmprintf(stderr, "Executing: 2nd program %s host %s, user %s, command %s\n", ssh_program, host, remuser ? remuser : "(unspecified)", cmd); if (port == -1) port = sshport; /* Fork a child to execute the command on the remote host using ssh. */ pid = fork(); if (pid == 0) { dup2(fdin, 0); dup2(fdout, 1); replacearg(&args, 0, "%s", ssh_program); if (port != -1) { addargs(&args, "-p"); addargs(&args, "%d", port); } if (remuser != NULL) { addargs(&args, "-l"); addargs(&args, "%s", remuser); } addargs(&args, "-oBatchMode=yes"); addargs(&args, "--"); addargs(&args, "%s", host); addargs(&args, "%s", cmd); execvp(ssh_program, args.list); perror(ssh_program); exit(1); } else if (pid == -1) { fatal("fork: %s", strerror(errno)); } while (waitpid(pid, &status, 0) == -1) if (errno != EINTR) fatal("do_cmd2: waitpid: %s", strerror(errno)); return 0; } typedef struct { size_t cnt; char *buf; } BUF; BUF *allocbuf(BUF *, int, int); void lostconn(int); int okname(char *); void run_err(const char *,...); int note_err(const char *,...); void verifydir(char *); struct passwd *pwd; uid_t userid; int errs, remin, remout; int Tflag, pflag, iamremote, iamrecursive, targetshouldbedirectory; #define CMDNEEDS 64 char cmd[CMDNEEDS]; /* must hold "rcp -r -p -d\0" */ int response(void); void rsource(char *, struct stat *); void sink(int, char *[], const char *); void source(int, char *[]); void tolocal(int, char *[]); void toremote(int, char *[]); void usage(void); int main(int argc, char **argv) { int ch, fflag, tflag, status, n; char **newargv; const char *errstr; extern char *optarg; extern int optind; /* Ensure that fds 0, 1 and 2 are open or directed to /dev/null */ sanitise_stdfd(); seed_rng(); msetlocale(); /* Copy argv, because we modify it */ newargv = xcalloc(MAXIMUM(argc + 1, 1), sizeof(*newargv)); for (n = 0; n < argc; n++) newargv[n] = xstrdup(argv[n]); argv = newargv; __progname = ssh_get_progname(argv[0]); memset(&args, '\0', sizeof(args)); memset(&remote_remote_args, '\0', sizeof(remote_remote_args)); args.list = remote_remote_args.list = NULL; addargs(&args, "%s", ssh_program); addargs(&args, "-x"); addargs(&args, "-oForwardAgent=no"); addargs(&args, "-oPermitLocalCommand=no"); addargs(&args, "-oClearAllForwardings=yes"); addargs(&args, "-oRemoteCommand=none"); addargs(&args, "-oRequestTTY=no"); fflag = Tflag = tflag = 0; while ((ch = getopt(argc, argv, "dfl:prtTvBCc:i:P:q12346S:o:F:J:")) != -1) { switch (ch) { /* User-visible flags. */ case '1': fatal("SSH protocol v.1 is no longer supported"); break; case '2': /* Ignored */ break; case '4': case '6': case 'C': addargs(&args, "-%c", ch); addargs(&remote_remote_args, "-%c", ch); break; case '3': throughlocal = 1; break; case 'o': case 'c': case 'i': case 'F': case 'J': addargs(&remote_remote_args, "-%c", ch); addargs(&remote_remote_args, "%s", optarg); addargs(&args, "-%c", ch); addargs(&args, "%s", optarg); break; case 'P': sshport = a2port(optarg); if (sshport <= 0) fatal("bad port \"%s\"\n", optarg); break; case 'B': addargs(&remote_remote_args, "-oBatchmode=yes"); addargs(&args, "-oBatchmode=yes"); break; case 'l': limit_kbps = strtonum(optarg, 1, 100 * 1024 * 1024, &errstr); if (errstr != NULL) usage(); limit_kbps *= 1024; /* kbps */ bandwidth_limit_init(&bwlimit, limit_kbps, COPY_BUFLEN); break; case 'p': pflag = 1; break; case 'r': iamrecursive = 1; break; case 'S': ssh_program = xstrdup(optarg); break; case 'v': addargs(&args, "-v"); addargs(&remote_remote_args, "-v"); verbose_mode = 1; break; case 'q': addargs(&args, "-q"); addargs(&remote_remote_args, "-q"); showprogress = 0; break; /* Server options. */ case 'd': targetshouldbedirectory = 1; break; case 'f': /* "from" */ iamremote = 1; fflag = 1; break; case 't': /* "to" */ iamremote = 1; tflag = 1; #ifdef HAVE_CYGWIN setmode(0, O_BINARY); #endif break; case 'T': Tflag = 1; break; default: usage(); } } argc -= optind; argv += optind; if ((pwd = getpwuid(userid = getuid())) == NULL) fatal("unknown user %u", (u_int) userid); if (!isatty(STDOUT_FILENO)) showprogress = 0; if (pflag) { /* Cannot pledge: -p allows setuid/setgid files... */ } else { if (pledge("stdio rpath wpath cpath fattr tty proc exec", NULL) == -1) { perror("pledge"); exit(1); } } remin = STDIN_FILENO; remout = STDOUT_FILENO; if (fflag) { /* Follow "protocol", send data. */ (void) response(); source(argc, argv); exit(errs != 0); } if (tflag) { /* Receive data. */ sink(argc, argv, NULL); exit(errs != 0); } if (argc < 2) usage(); if (argc > 2) targetshouldbedirectory = 1; remin = remout = -1; do_cmd_pid = -1; /* Command to be executed on remote system using "ssh". */ (void) snprintf(cmd, sizeof cmd, "scp%s%s%s%s", verbose_mode ? " -v" : "", iamrecursive ? " -r" : "", pflag ? " -p" : "", targetshouldbedirectory ? " -d" : ""); (void) ssh_signal(SIGPIPE, lostconn); if (colon(argv[argc - 1])) /* Dest is remote host. */ toremote(argc, argv); else { if (targetshouldbedirectory) verifydir(argv[argc - 1]); tolocal(argc, argv); /* Dest is local host. */ } /* * Finally check the exit status of the ssh process, if one was forked * and no error has occurred yet */ if (do_cmd_pid != -1 && errs == 0) { if (remin != -1) (void) close(remin); if (remout != -1) (void) close(remout); if (waitpid(do_cmd_pid, &status, 0) == -1) errs = 1; else { if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) errs = 1; } } exit(errs != 0); } /* Callback from atomicio6 to update progress meter and limit bandwidth */ static int scpio(void *_cnt, size_t s) { off_t *cnt = (off_t *)_cnt; *cnt += s; refresh_progress_meter(0); if (limit_kbps > 0) bandwidth_limit(&bwlimit, s); return 0; } static int do_times(int fd, int verb, const struct stat *sb) { /* strlen(2^64) == 20; strlen(10^6) == 7 */ char buf[(20 + 7 + 2) * 2 + 2]; (void)snprintf(buf, sizeof(buf), "T%llu 0 %llu 0\n", (unsigned long long) (sb->st_mtime < 0 ? 0 : sb->st_mtime), (unsigned long long) (sb->st_atime < 0 ? 0 : sb->st_atime)); if (verb) { fprintf(stderr, "File mtime %lld atime %lld\n", (long long)sb->st_mtime, (long long)sb->st_atime); fprintf(stderr, "Sending file timestamps: %s", buf); } (void) atomicio(vwrite, fd, buf, strlen(buf)); return (response()); } static int parse_scp_uri(const char *uri, char **userp, char **hostp, int *portp, char **pathp) { int r; r = parse_uri("scp", uri, userp, hostp, portp, pathp); if (r == 0 && *pathp == NULL) *pathp = xstrdup("."); return r; } /* Appends a string to an array; returns 0 on success, -1 on alloc failure */ static int append(char *cp, char ***ap, size_t *np) { char **tmp; if ((tmp = reallocarray(*ap, *np + 1, sizeof(*tmp))) == NULL) return -1; tmp[(*np)] = cp; (*np)++; *ap = tmp; return 0; } /* * Finds the start and end of the first brace pair in the pattern. * returns 0 on success or -1 for invalid patterns. */ static int find_brace(const char *pattern, int *startp, int *endp) { int i; int in_bracket, brace_level; *startp = *endp = -1; in_bracket = brace_level = 0; for (i = 0; i < INT_MAX && *endp < 0 && pattern[i] != '\0'; i++) { switch (pattern[i]) { case '\\': /* skip next character */ if (pattern[i + 1] != '\0') i++; break; case '[': in_bracket = 1; break; case ']': in_bracket = 0; break; case '{': if (in_bracket) break; if (pattern[i + 1] == '}') { /* Protect a single {}, for find(1), like csh */ i++; /* skip */ break; } if (*startp == -1) *startp = i; brace_level++; break; case '}': if (in_bracket) break; if (*startp < 0) { /* Unbalanced brace */ return -1; } if (--brace_level <= 0) *endp = i; break; } } /* unbalanced brackets/braces */ if (*endp < 0 && (*startp >= 0 || in_bracket)) return -1; return 0; } /* * Assembles and records a successfully-expanded pattern, returns -1 on * alloc failure. */ static int emit_expansion(const char *pattern, int brace_start, int brace_end, int sel_start, int sel_end, char ***patternsp, size_t *npatternsp) { char *cp; int o = 0, tail_len = strlen(pattern + brace_end + 1); if ((cp = malloc(brace_start + (sel_end - sel_start) + tail_len + 1)) == NULL) return -1; /* Pattern before initial brace */ if (brace_start > 0) { memcpy(cp, pattern, brace_start); o = brace_start; } /* Current braced selection */ if (sel_end - sel_start > 0) { memcpy(cp + o, pattern + sel_start, sel_end - sel_start); o += sel_end - sel_start; } /* Remainder of pattern after closing brace */ if (tail_len > 0) { memcpy(cp + o, pattern + brace_end + 1, tail_len); o += tail_len; } cp[o] = '\0'; if (append(cp, patternsp, npatternsp) != 0) { free(cp); return -1; } return 0; } /* * Expand the first encountered brace in pattern, appending the expanded * patterns it yielded to the *patternsp array. * * Returns 0 on success or -1 on allocation failure. * * Signals whether expansion was performed via *expanded and whether * pattern was invalid via *invalid. */ static int brace_expand_one(const char *pattern, char ***patternsp, size_t *npatternsp, int *expanded, int *invalid) { int i; int in_bracket, brace_start, brace_end, brace_level; int sel_start, sel_end; *invalid = *expanded = 0; if (find_brace(pattern, &brace_start, &brace_end) != 0) { *invalid = 1; return 0; } else if (brace_start == -1) return 0; in_bracket = brace_level = 0; for (i = sel_start = brace_start + 1; i < brace_end; i++) { switch (pattern[i]) { case '{': if (in_bracket) break; brace_level++; break; case '}': if (in_bracket) break; brace_level--; break; case '[': in_bracket = 1; break; case ']': in_bracket = 0; break; case '\\': if (i < brace_end - 1) i++; /* skip */ break; } if (pattern[i] == ',' || i == brace_end - 1) { if (in_bracket || brace_level > 0) continue; /* End of a selection, emit an expanded pattern */ /* Adjust end index for last selection */ sel_end = (i == brace_end - 1) ? brace_end : i; if (emit_expansion(pattern, brace_start, brace_end, sel_start, sel_end, patternsp, npatternsp) != 0) return -1; /* move on to the next selection */ sel_start = i + 1; continue; } } if (in_bracket || brace_level > 0) { *invalid = 1; return 0; } /* success */ *expanded = 1; return 0; } /* Expand braces from pattern. Returns 0 on success, -1 on failure */ static int brace_expand(const char *pattern, char ***patternsp, size_t *npatternsp) { char *cp, *cp2, **active = NULL, **done = NULL; size_t i, nactive = 0, ndone = 0; int ret = -1, invalid = 0, expanded = 0; *patternsp = NULL; *npatternsp = 0; /* Start the worklist with the original pattern */ if ((cp = strdup(pattern)) == NULL) return -1; if (append(cp, &active, &nactive) != 0) { free(cp); return -1; } while (nactive > 0) { cp = active[nactive - 1]; nactive--; if (brace_expand_one(cp, &active, &nactive, &expanded, &invalid) == -1) { free(cp); goto fail; } if (invalid) fatal("%s: invalid brace pattern \"%s\"", __func__, cp); if (expanded) { /* * Current entry expanded to new entries on the * active list; discard the progenitor pattern. */ free(cp); continue; } /* * Pattern did not expand; append the finename component to * the completed list */ if ((cp2 = strrchr(cp, '/')) != NULL) *cp2++ = '\0'; else cp2 = cp; if (append(xstrdup(cp2), &done, &ndone) != 0) { free(cp); goto fail; } free(cp); } /* success */ *patternsp = done; *npatternsp = ndone; done = NULL; ndone = 0; ret = 0; fail: for (i = 0; i < nactive; i++) free(active[i]); free(active); for (i = 0; i < ndone; i++) free(done[i]); free(done); return ret; } void toremote(int argc, char **argv) { char *suser = NULL, *host = NULL, *src = NULL; char *bp, *tuser, *thost, *targ; int sport = -1, tport = -1; arglist alist; int i, r; u_int j; memset(&alist, '\0', sizeof(alist)); alist.list = NULL; /* Parse target */ r = parse_scp_uri(argv[argc - 1], &tuser, &thost, &tport, &targ); if (r == -1) { fmprintf(stderr, "%s: invalid uri\n", argv[argc - 1]); ++errs; goto out; } if (r != 0) { if (parse_user_host_path(argv[argc - 1], &tuser, &thost, &targ) == -1) { fmprintf(stderr, "%s: invalid target\n", argv[argc - 1]); ++errs; goto out; } } if (tuser != NULL && !okname(tuser)) { ++errs; goto out; } /* Parse source files */ for (i = 0; i < argc - 1; i++) { free(suser); free(host); free(src); r = parse_scp_uri(argv[i], &suser, &host, &sport, &src); if (r == -1) { fmprintf(stderr, "%s: invalid uri\n", argv[i]); ++errs; continue; } if (r != 0) { parse_user_host_path(argv[i], &suser, &host, &src); } if (suser != NULL && !okname(suser)) { ++errs; continue; } if (host && throughlocal) { /* extended remote to remote */ xasprintf(&bp, "%s -f %s%s", cmd, *src == '-' ? "-- " : "", src); if (do_cmd(host, suser, sport, bp, &remin, &remout) < 0) exit(1); free(bp); xasprintf(&bp, "%s -t %s%s", cmd, *targ == '-' ? "-- " : "", targ); if (do_cmd2(thost, tuser, tport, bp, remin, remout) < 0) exit(1); free(bp); (void) close(remin); (void) close(remout); remin = remout = -1; } else if (host) { /* standard remote to remote */ if (tport != -1 && tport != SSH_DEFAULT_PORT) { /* This would require the remote support URIs */ fatal("target port not supported with two " "remote hosts without the -3 option"); } freeargs(&alist); addargs(&alist, "%s", ssh_program); addargs(&alist, "-x"); addargs(&alist, "-oClearAllForwardings=yes"); addargs(&alist, "-n"); for (j = 0; j < remote_remote_args.num; j++) { addargs(&alist, "%s", remote_remote_args.list[j]); } if (sport != -1) { addargs(&alist, "-p"); addargs(&alist, "%d", sport); } if (suser) { addargs(&alist, "-l"); addargs(&alist, "%s", suser); } addargs(&alist, "--"); addargs(&alist, "%s", host); addargs(&alist, "%s", cmd); addargs(&alist, "%s", src); addargs(&alist, "%s%s%s:%s", tuser ? tuser : "", tuser ? "@" : "", thost, targ); if (do_local_cmd(&alist) != 0) errs = 1; } else { /* local to remote */ if (remin == -1) { xasprintf(&bp, "%s -t %s%s", cmd, *targ == '-' ? "-- " : "", targ); if (do_cmd(thost, tuser, tport, bp, &remin, &remout) < 0) exit(1); if (response() < 0) exit(1); free(bp); } source(1, argv + i); } } out: free(tuser); free(thost); free(targ); free(suser); free(host); free(src); } void tolocal(int argc, char **argv) { char *bp, *host = NULL, *src = NULL, *suser = NULL; arglist alist; int i, r, sport = -1; memset(&alist, '\0', sizeof(alist)); alist.list = NULL; for (i = 0; i < argc - 1; i++) { free(suser); free(host); free(src); r = parse_scp_uri(argv[i], &suser, &host, &sport, &src); if (r == -1) { fmprintf(stderr, "%s: invalid uri\n", argv[i]); ++errs; continue; } if (r != 0) parse_user_host_path(argv[i], &suser, &host, &src); if (suser != NULL && !okname(suser)) { ++errs; continue; } if (!host) { /* Local to local. */ freeargs(&alist); addargs(&alist, "%s", _PATH_CP); if (iamrecursive) addargs(&alist, "-r"); if (pflag) addargs(&alist, "-p"); addargs(&alist, "--"); addargs(&alist, "%s", argv[i]); addargs(&alist, "%s", argv[argc-1]); if (do_local_cmd(&alist)) ++errs; continue; } /* Remote to local. */ xasprintf(&bp, "%s -f %s%s", cmd, *src == '-' ? "-- " : "", src); if (do_cmd(host, suser, sport, bp, &remin, &remout) < 0) { free(bp); ++errs; continue; } free(bp); sink(1, argv + argc - 1, src); (void) close(remin); remin = remout = -1; } free(suser); free(host); free(src); } void source(int argc, char **argv) { struct stat stb; static BUF buffer; BUF *bp; off_t i, statbytes; size_t amt, nr; int fd = -1, haderr, indx; char *last, *name, buf[PATH_MAX + 128], encname[PATH_MAX]; int len; for (indx = 0; indx < argc; ++indx) { name = argv[indx]; statbytes = 0; len = strlen(name); while (len > 1 && name[len-1] == '/') name[--len] = '\0'; if ((fd = open(name, O_RDONLY|O_NONBLOCK, 0)) == -1) goto syserr; if (strchr(name, '\n') != NULL) { strnvis(encname, name, sizeof(encname), VIS_NL); name = encname; } if (fstat(fd, &stb) == -1) { syserr: run_err("%s: %s", name, strerror(errno)); goto next; } if (stb.st_size < 0) { run_err("%s: %s", name, "Negative file size"); goto next; } unset_nonblock(fd); switch (stb.st_mode & S_IFMT) { case S_IFREG: break; case S_IFDIR: if (iamrecursive) { rsource(name, &stb); goto next; } /* FALLTHROUGH */ default: run_err("%s: not a regular file", name); goto next; } if ((last = strrchr(name, '/')) == NULL) last = name; else ++last; curfile = last; if (pflag) { if (do_times(remout, verbose_mode, &stb) < 0) goto next; } #define FILEMODEMASK (S_ISUID|S_ISGID|S_IRWXU|S_IRWXG|S_IRWXO) snprintf(buf, sizeof buf, "C%04o %lld %s\n", (u_int) (stb.st_mode & FILEMODEMASK), (long long)stb.st_size, last); if (verbose_mode) fmprintf(stderr, "Sending file modes: %s", buf); (void) atomicio(vwrite, remout, buf, strlen(buf)); if (response() < 0) goto next; if ((bp = allocbuf(&buffer, fd, COPY_BUFLEN)) == NULL) { next: if (fd != -1) { (void) close(fd); fd = -1; } continue; } if (showprogress) start_progress_meter(curfile, stb.st_size, &statbytes); set_nonblock(remout); for (haderr = i = 0; i < stb.st_size; i += bp->cnt) { amt = bp->cnt; if (i + (off_t)amt > stb.st_size) amt = stb.st_size - i; if (!haderr) { if ((nr = atomicio(read, fd, bp->buf, amt)) != amt) { haderr = errno; memset(bp->buf + nr, 0, amt - nr); } } /* Keep writing after error to retain sync */ if (haderr) { (void)atomicio(vwrite, remout, bp->buf, amt); memset(bp->buf, 0, amt); continue; } if (atomicio6(vwrite, remout, bp->buf, amt, scpio, &statbytes) != amt) haderr = errno; } unset_nonblock(remout); if (fd != -1) { if (close(fd) == -1 && !haderr) haderr = errno; fd = -1; } if (!haderr) (void) atomicio(vwrite, remout, "", 1); else run_err("%s: %s", name, strerror(haderr)); (void) response(); if (showprogress) stop_progress_meter(); } } void rsource(char *name, struct stat *statp) { DIR *dirp; struct dirent *dp; char *last, *vect[1], path[PATH_MAX]; if (!(dirp = opendir(name))) { run_err("%s: %s", name, strerror(errno)); return; } last = strrchr(name, '/'); if (last == NULL) last = name; else last++; if (pflag) { if (do_times(remout, verbose_mode, statp) < 0) { closedir(dirp); return; } } (void) snprintf(path, sizeof path, "D%04o %d %.1024s\n", (u_int) (statp->st_mode & FILEMODEMASK), 0, last); if (verbose_mode) fmprintf(stderr, "Entering directory: %s", path); (void) atomicio(vwrite, remout, path, strlen(path)); if (response() < 0) { closedir(dirp); return; } while ((dp = readdir(dirp)) != NULL) { if (dp->d_ino == 0) continue; if (!strcmp(dp->d_name, ".") || !strcmp(dp->d_name, "..")) continue; if (strlen(name) + 1 + strlen(dp->d_name) >= sizeof(path) - 1) { run_err("%s/%s: name too long", name, dp->d_name); continue; } (void) snprintf(path, sizeof path, "%s/%s", name, dp->d_name); vect[0] = path; source(1, vect); } (void) closedir(dirp); (void) atomicio(vwrite, remout, "E\n", 2); (void) response(); } #define TYPE_OVERFLOW(type, val) \ ((sizeof(type) == 4 && (val) > INT32_MAX) || \ (sizeof(type) == 8 && (val) > INT64_MAX) || \ (sizeof(type) != 4 && sizeof(type) != 8)) void sink(int argc, char **argv, const char *src) { static BUF buffer; struct stat stb; BUF *bp; off_t i; size_t j, count; int amt, exists, first, ofd; mode_t mode, omode, mask; off_t size, statbytes; unsigned long long ull; int setimes, targisdir, wrerr; char ch, *cp, *np, *targ, *why, *vect[1], buf[2048], visbuf[2048]; char **patterns = NULL; size_t n, npatterns = 0; struct timeval tv[2]; #define atime tv[0] #define mtime tv[1] #define SCREWUP(str) { why = str; goto screwup; } if (TYPE_OVERFLOW(time_t, 0) || TYPE_OVERFLOW(off_t, 0)) SCREWUP("Unexpected off_t/time_t size"); setimes = targisdir = 0; mask = umask(0); if (!pflag) (void) umask(mask); if (argc != 1) { run_err("ambiguous target"); exit(1); } targ = *argv; if (targetshouldbedirectory) verifydir(targ); (void) atomicio(vwrite, remout, "", 1); if (stat(targ, &stb) == 0 && S_ISDIR(stb.st_mode)) targisdir = 1; if (src != NULL && !iamrecursive && !Tflag) { /* * Prepare to try to restrict incoming filenames to match * the requested destination file glob. */ if (brace_expand(src, &patterns, &npatterns) != 0) fatal("%s: could not expand pattern", __func__); } for (first = 1;; first = 0) { cp = buf; if (atomicio(read, remin, cp, 1) != 1) goto done; if (*cp++ == '\n') SCREWUP("unexpected <newline>"); do { if (atomicio(read, remin, &ch, sizeof(ch)) != sizeof(ch)) SCREWUP("lost connection"); *cp++ = ch; } while (cp < &buf[sizeof(buf) - 1] && ch != '\n'); *cp = 0; if (verbose_mode) fmprintf(stderr, "Sink: %s", buf); if (buf[0] == '\01' || buf[0] == '\02') { if (iamremote == 0) { (void) snmprintf(visbuf, sizeof(visbuf), NULL, "%s", buf + 1); (void) atomicio(vwrite, STDERR_FILENO, visbuf, strlen(visbuf)); } if (buf[0] == '\02') exit(1); ++errs; continue; } if (buf[0] == 'E') { (void) atomicio(vwrite, remout, "", 1); goto done; } if (ch == '\n') *--cp = 0; cp = buf; if (*cp == 'T') { setimes++; cp++; if (!isdigit((unsigned char)*cp)) SCREWUP("mtime.sec not present"); ull = strtoull(cp, &cp, 10); if (!cp || *cp++ != ' ') SCREWUP("mtime.sec not delimited"); if (TYPE_OVERFLOW(time_t, ull)) setimes = 0; /* out of range */ mtime.tv_sec = ull; mtime.tv_usec = strtol(cp, &cp, 10); if (!cp || *cp++ != ' ' || mtime.tv_usec < 0 || mtime.tv_usec > 999999) SCREWUP("mtime.usec not delimited"); if (!isdigit((unsigned char)*cp)) SCREWUP("atime.sec not present"); ull = strtoull(cp, &cp, 10); if (!cp || *cp++ != ' ') SCREWUP("atime.sec not delimited"); if (TYPE_OVERFLOW(time_t, ull)) setimes = 0; /* out of range */ atime.tv_sec = ull; atime.tv_usec = strtol(cp, &cp, 10); if (!cp || *cp++ != '\0' || atime.tv_usec < 0 || atime.tv_usec > 999999) SCREWUP("atime.usec not delimited"); (void) atomicio(vwrite, remout, "", 1); continue; } if (*cp != 'C' && *cp != 'D') { /* * Check for the case "rcp remote:foo\* local:bar". * In this case, the line "No match." can be returned * by the shell before the rcp command on the remote is * executed so the ^Aerror_message convention isn't * followed. */ if (first) { run_err("%s", cp); exit(1); } SCREWUP("expected control record"); } mode = 0; for (++cp; cp < buf + 5; cp++) { if (*cp < '0' || *cp > '7') SCREWUP("bad mode"); mode = (mode << 3) | (*cp - '0'); } if (!pflag) mode &= ~mask; if (*cp++ != ' ') SCREWUP("mode not delimited"); if (!isdigit((unsigned char)*cp)) SCREWUP("size not present"); ull = strtoull(cp, &cp, 10); if (!cp || *cp++ != ' ') SCREWUP("size not delimited"); if (TYPE_OVERFLOW(off_t, ull)) SCREWUP("size out of range"); size = (off_t)ull; if (*cp == '\0' || strchr(cp, '/') != NULL || strcmp(cp, ".") == 0 || strcmp(cp, "..") == 0) { run_err("error: unexpected filename: %s", cp); exit(1); } if (npatterns > 0) { for (n = 0; n < npatterns; n++) { if (fnmatch(patterns[n], cp, 0) == 0) break; } if (n >= npatterns) SCREWUP("filename does not match request"); } if (targisdir) { static char *namebuf; static size_t cursize; size_t need; need = strlen(targ) + strlen(cp) + 250; if (need > cursize) { free(namebuf); namebuf = xmalloc(need); cursize = need; } (void) snprintf(namebuf, need, "%s%s%s", targ, strcmp(targ, "/") ? "/" : "", cp); np = namebuf; } else np = targ; curfile = cp; exists = stat(np, &stb) == 0; if (buf[0] == 'D') { int mod_flag = pflag; if (!iamrecursive) SCREWUP("received directory without -r"); if (exists) { if (!S_ISDIR(stb.st_mode)) { errno = ENOTDIR; goto bad; } if (pflag) (void) chmod(np, mode); } else { /* Handle copying from a read-only directory */ mod_flag = 1; if (mkdir(np, mode | S_IRWXU) == -1) goto bad; } vect[0] = xstrdup(np); sink(1, vect, src); if (setimes) { setimes = 0; (void) utimes(vect[0], tv); } if (mod_flag) (void) chmod(vect[0], mode); free(vect[0]); continue; } omode = mode; mode |= S_IWUSR; if ((ofd = open(np, O_WRONLY|O_CREAT, mode)) == -1) { bad: run_err("%s: %s", np, strerror(errno)); continue; } (void) atomicio(vwrite, remout, "", 1); if ((bp = allocbuf(&buffer, ofd, COPY_BUFLEN)) == NULL) { (void) close(ofd); continue; } cp = bp->buf; wrerr = 0; /* * NB. do not use run_err() unless immediately followed by * exit() below as it may send a spurious reply that might * desyncronise us from the peer. Use note_err() instead. */ statbytes = 0; if (showprogress) start_progress_meter(curfile, size, &statbytes); set_nonblock(remin); for (count = i = 0; i < size; i += bp->cnt) { amt = bp->cnt; if (i + amt > size) amt = size - i; count += amt; do { j = atomicio6(read, remin, cp, amt, scpio, &statbytes); if (j == 0) { run_err("%s", j != EPIPE ? strerror(errno) : "dropped connection"); exit(1); } amt -= j; cp += j; } while (amt > 0); if (count == bp->cnt) { /* Keep reading so we stay sync'd up. */ if (!wrerr) { if (atomicio(vwrite, ofd, bp->buf, count) != count) { note_err("%s: %s", np, strerror(errno)); wrerr = 1; } } count = 0; cp = bp->buf; } } unset_nonblock(remin); if (count != 0 && !wrerr && atomicio(vwrite, ofd, bp->buf, count) != count) { note_err("%s: %s", np, strerror(errno)); wrerr = 1; } if (!wrerr && (!exists || S_ISREG(stb.st_mode)) && ftruncate(ofd, size) != 0) note_err("%s: truncate: %s", np, strerror(errno)); if (pflag) { if (exists || omode != mode) #ifdef HAVE_FCHMOD if (fchmod(ofd, omode)) { #else /* HAVE_FCHMOD */ if (chmod(np, omode)) { #endif /* HAVE_FCHMOD */ note_err("%s: set mode: %s", np, strerror(errno)); } } else { if (!exists && omode != mode) #ifdef HAVE_FCHMOD if (fchmod(ofd, omode & ~mask)) { #else /* HAVE_FCHMOD */ if (chmod(np, omode & ~mask)) { #endif /* HAVE_FCHMOD */ note_err("%s: set mode: %s", np, strerror(errno)); } } if (close(ofd) == -1) note_err(np, "%s: close: %s", np, strerror(errno)); (void) response(); if (showprogress) stop_progress_meter(); if (setimes && !wrerr) { setimes = 0; if (utimes(np, tv) == -1) { note_err("%s: set times: %s", np, strerror(errno)); } } /* If no error was noted then signal success for this file */ if (note_err(NULL) == 0) (void) atomicio(vwrite, remout, "", 1); } done: for (n = 0; n < npatterns; n++) free(patterns[n]); free(patterns); return; screwup: for (n = 0; n < npatterns; n++) free(patterns[n]); free(patterns); run_err("protocol error: %s", why); exit(1); } int response(void) { char ch, *cp, resp, rbuf[2048], visbuf[2048]; if (atomicio(read, remin, &resp, sizeof(resp)) != sizeof(resp)) lostconn(0); cp = rbuf; switch (resp) { case 0: /* ok */ return (0); default: *cp++ = resp; /* FALLTHROUGH */ case 1: /* error, followed by error msg */ case 2: /* fatal error, "" */ do { if (atomicio(read, remin, &ch, sizeof(ch)) != sizeof(ch)) lostconn(0); *cp++ = ch; } while (cp < &rbuf[sizeof(rbuf) - 1] && ch != '\n'); if (!iamremote) { cp[-1] = '\0'; (void) snmprintf(visbuf, sizeof(visbuf), NULL, "%s\n", rbuf); (void) atomicio(vwrite, STDERR_FILENO, visbuf, strlen(visbuf)); } ++errs; if (resp == 1) return (-1); exit(1); } /* NOTREACHED */ } void usage(void) { (void) fprintf(stderr, "usage: scp [-346BCpqrTv] [-c cipher] [-F ssh_config] [-i identity_file]\n" " [-J destination] [-l limit] [-o ssh_option] [-P port]\n" " [-S program] source ... target\n"); exit(1); } void run_err(const char *fmt,...) { static FILE *fp; va_list ap; ++errs; if (fp != NULL || (remout != -1 && (fp = fdopen(remout, "w")))) { (void) fprintf(fp, "%c", 0x01); (void) fprintf(fp, "scp: "); va_start(ap, fmt); (void) vfprintf(fp, fmt, ap); va_end(ap); (void) fprintf(fp, "\n"); (void) fflush(fp); } if (!iamremote) { va_start(ap, fmt); vfmprintf(stderr, fmt, ap); va_end(ap); fprintf(stderr, "\n"); } } /* * Notes a sink error for sending at the end of a file transfer. Returns 0 if * no error has been noted or -1 otherwise. Use note_err(NULL) to flush * any active error at the end of the transfer. */ int note_err(const char *fmt, ...) { static char *emsg; va_list ap; /* Replay any previously-noted error */ if (fmt == NULL) { if (emsg == NULL) return 0; run_err("%s", emsg); free(emsg); emsg = NULL; return -1; } errs++; /* Prefer first-noted error */ if (emsg != NULL) return -1; va_start(ap, fmt); vasnmprintf(&emsg, INT_MAX, NULL, fmt, ap); va_end(ap); return -1; } void verifydir(char *cp) { struct stat stb; if (!stat(cp, &stb)) { if (S_ISDIR(stb.st_mode)) return; errno = ENOTDIR; } run_err("%s: %s", cp, strerror(errno)); killchild(0); } int okname(char *cp0) { int c; char *cp; cp = cp0; do { c = (int)*cp; if (c & 0200) goto bad; if (!isalpha(c) && !isdigit((unsigned char)c)) { switch (c) { case '\'': case '"': case '`': case ' ': case '#': goto bad; default: break; } } } while (*++cp); return (1); bad: fmprintf(stderr, "%s: invalid user name\n", cp0); return (0); } BUF * allocbuf(BUF *bp, int fd, int blksize) { size_t size; #ifdef HAVE_STRUCT_STAT_ST_BLKSIZE struct stat stb; if (fstat(fd, &stb) == -1) { run_err("fstat: %s", strerror(errno)); return (0); } size = ROUNDUP(stb.st_blksize, blksize); if (size == 0) size = blksize; #else /* HAVE_STRUCT_STAT_ST_BLKSIZE */ size = blksize; #endif /* HAVE_STRUCT_STAT_ST_BLKSIZE */ if (bp->cnt >= size) return (bp); bp->buf = xrecallocarray(bp->buf, bp->cnt, size, 1); bp->cnt = size; return (bp); } void lostconn(int signo) { if (!iamremote) (void)write(STDERR_FILENO, "lost connection\n", 16); if (signo) _exit(1); else exit(1); }
./CrossVul/dataset_final_sorted/CWE-20/c/good_3978_0
crossvul-cpp_data_good_2789_0
/* mboxlist.c -- Mailbox list manipulation routines * * Copyright (c) 1994-2008 Carnegie Mellon University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. The name "Carnegie Mellon University" must not be used to * endorse or promote products derived from this software without * prior written permission. For permission or any legal * details, please contact * Carnegie Mellon University * Center for Technology Transfer and Enterprise Creation * 4615 Forbes Avenue * Suite 302 * Pittsburgh, PA 15213 * (412) 268-7393, fax: (412) 268-7395 * innovation@andrew.cmu.edu * * 4. Redistributions of any form whatsoever must retain the following * acknowledgment: * "This product includes software developed by Computing Services * at Carnegie Mellon University (http://www.cmu.edu/computing/)." * * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <config.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #include <sys/types.h> #include <sys/stat.h> #include <sys/uio.h> #include <fcntl.h> #include <syslog.h> #include <sys/ipc.h> #include <sys/msg.h> #include "acl.h" #include "annotate.h" #include "glob.h" #include "assert.h" #include "global.h" #include "cyrusdb.h" #include "util.h" #include "mailbox.h" #include "mboxevent.h" #include "exitcodes.h" #include "xmalloc.h" #include "xstrlcpy.h" #include "partlist.h" #include "xstrlcat.h" #include "user.h" /* generated headers are not necessarily in current directory */ #include "imap/imap_err.h" #include "mboxname.h" #include "mupdate-client.h" #include "mboxlist.h" #include "quota.h" #include "sync_log.h" #define DB config_mboxlist_db #define SUBDB config_subscription_db cyrus_acl_canonproc_t mboxlist_ensureOwnerRights; static struct db *mbdb; static int mboxlist_dbopen = 0; static int mboxlist_opensubs(const char *userid, struct db **ret); static void mboxlist_closesubs(struct db *sub); static int mboxlist_rmquota(const mbentry_t *mbentry, void *rock); static int mboxlist_changequota(const mbentry_t *mbentry, void *rock); EXPORTED mbentry_t *mboxlist_entry_create(void) { mbentry_t *ret = xzmalloc(sizeof(mbentry_t)); /* xxx - initialiser functions here? */ return ret; } EXPORTED mbentry_t *mboxlist_entry_copy(const mbentry_t *src) { mbentry_t *copy = mboxlist_entry_create(); copy->name = xstrdupnull(src->name); copy->ext_name = xstrdupnull(src->ext_name); copy->mtime = src->mtime; copy->uidvalidity = src->uidvalidity; copy->mbtype = src->mbtype; copy->foldermodseq = src->foldermodseq; copy->partition = xstrdupnull(src->partition); copy->server = xstrdupnull(src->server); copy->acl = xstrdupnull(src->acl); copy->uniqueid = xstrdupnull(src->uniqueid); copy->legacy_specialuse = xstrdupnull(src->legacy_specialuse); return copy; } EXPORTED void mboxlist_entry_free(mbentry_t **mbentryptr) { mbentry_t *mbentry = *mbentryptr; /* idempotent */ if (!mbentry) return; free(mbentry->name); free(mbentry->ext_name); free(mbentry->partition); free(mbentry->server); free(mbentry->acl); free(mbentry->uniqueid); free(mbentry->legacy_specialuse); free(mbentry); *mbentryptr = NULL; } static void _write_acl(struct dlist *dl, const char *aclstr) { const char *p, *q; struct dlist *al = dlist_newkvlist(dl, "A"); p = aclstr; while (p && *p) { char *name,*val; q = strchr(p, '\t'); if (!q) break; name = xstrndup(p, q-p); q++; p = strchr(q, '\t'); if (p) { val = xstrndup(q, p-q); p++; } else val = xstrdup(q); dlist_setatom(al, name, val); free(name); free(val); } } EXPORTED const char *mboxlist_mbtype_to_string(uint32_t mbtype) { static struct buf buf = BUF_INITIALIZER; buf_reset(&buf); if (mbtype & MBTYPE_DELETED) buf_putc(&buf, 'd'); if (mbtype & MBTYPE_MOVING) buf_putc(&buf, 'm'); if (mbtype & MBTYPE_NETNEWS) buf_putc(&buf, 'n'); if (mbtype & MBTYPE_REMOTE) buf_putc(&buf, 'r'); if (mbtype & MBTYPE_RESERVE) buf_putc(&buf, 'z'); if (mbtype & MBTYPE_CALENDAR) buf_putc(&buf, 'c'); if (mbtype & MBTYPE_COLLECTION) buf_putc(&buf, 'b'); if (mbtype & MBTYPE_ADDRESSBOOK) buf_putc(&buf, 'a'); return buf_cstring(&buf); } static char *mboxlist_entry_cstring(const mbentry_t *mbentry) { struct buf buf = BUF_INITIALIZER; struct dlist *dl = dlist_newkvlist(NULL, mbentry->name); if (mbentry->acl) _write_acl(dl, mbentry->acl); if (mbentry->uniqueid) dlist_setatom(dl, "I", mbentry->uniqueid); if (mbentry->partition) dlist_setatom(dl, "P", mbentry->partition); if (mbentry->server) dlist_setatom(dl, "S", mbentry->server); if (mbentry->mbtype) dlist_setatom(dl, "T", mboxlist_mbtype_to_string(mbentry->mbtype)); if (mbentry->uidvalidity) dlist_setnum32(dl, "V", mbentry->uidvalidity); if (mbentry->foldermodseq) dlist_setnum64(dl, "F", mbentry->foldermodseq); dlist_setdate(dl, "M", time(NULL)); dlist_printbuf(dl, 0, &buf); dlist_free(&dl); return buf_release(&buf); } EXPORTED char *mbentry_metapath(const struct mboxlist_entry *mbentry, int metatype, int isnew) { return mboxname_metapath(mbentry->partition, mbentry->name, mbentry->uniqueid, metatype, isnew); } EXPORTED char *mbentry_datapath(const struct mboxlist_entry *mbentry, uint32_t uid) { return mboxname_datapath(mbentry->partition, mbentry->name, mbentry->uniqueid, uid); } /* * read a single record from the mailboxes.db and return a pointer to it */ static int mboxlist_read(const char *name, const char **dataptr, size_t *datalenptr, struct txn **tid, int wrlock) { int namelen = strlen(name); int r; if (!namelen) return IMAP_MAILBOX_NONEXISTENT; if (wrlock) { r = cyrusdb_fetchlock(mbdb, name, namelen, dataptr, datalenptr, tid); } else { r = cyrusdb_fetch(mbdb, name, namelen, dataptr, datalenptr, tid); } switch (r) { case CYRUSDB_OK: /* no entry required, just checking if it exists */ return 0; break; case CYRUSDB_AGAIN: return IMAP_AGAIN; break; case CYRUSDB_NOTFOUND: return IMAP_MAILBOX_NONEXISTENT; break; default: syslog(LOG_ERR, "DBERROR: error fetching mboxlist %s: %s", name, cyrusdb_strerror(r)); return IMAP_IOERROR; break; } /* never get here */ } EXPORTED uint32_t mboxlist_string_to_mbtype(const char *string) { uint32_t mbtype = 0; if (!string) return 0; /* null just means default */ for (; *string; string++) { switch (*string) { case 'a': mbtype |= MBTYPE_ADDRESSBOOK; break; case 'b': mbtype |= MBTYPE_COLLECTION; break; case 'c': mbtype |= MBTYPE_CALENDAR; break; case 'd': mbtype |= MBTYPE_DELETED; break; case 'm': mbtype |= MBTYPE_MOVING; break; case 'n': mbtype |= MBTYPE_NETNEWS; break; case 'r': mbtype |= MBTYPE_REMOTE; break; case 'z': mbtype |= MBTYPE_RESERVE; break; } } return mbtype; } struct parseentry_rock { struct mboxlist_entry *mbentry; struct buf *aclbuf; int doingacl; }; int parseentry_cb(int type, struct dlistsax_data *d) { struct parseentry_rock *rock = (struct parseentry_rock *)d->rock; switch(type) { case DLISTSAX_KVLISTSTART: if (!strcmp(buf_cstring(&d->kbuf), "A")) { rock->doingacl = 1; } break; case DLISTSAX_KVLISTEND: rock->doingacl = 0; break; case DLISTSAX_STRING: if (rock->doingacl) { buf_append(rock->aclbuf, &d->kbuf); buf_putc(rock->aclbuf, '\t'); buf_append(rock->aclbuf, &d->buf); buf_putc(rock->aclbuf, '\t'); } else { const char *key = buf_cstring(&d->kbuf); if (!strcmp(key, "F")) { rock->mbentry->foldermodseq = atoll(buf_cstring(&d->buf)); } else if (!strcmp(key, "I")) { rock->mbentry->uniqueid = buf_newcstring(&d->buf); } else if (!strcmp(key, "M")) { rock->mbentry->mtime = atoi(buf_cstring(&d->buf)); } else if (!strcmp(key, "P")) { rock->mbentry->partition = buf_newcstring(&d->buf); } else if (!strcmp(key, "S")) { rock->mbentry->server = buf_newcstring(&d->buf); } else if (!strcmp(key, "T")) { rock->mbentry->mbtype = mboxlist_string_to_mbtype(buf_cstring(&d->buf)); } else if (!strcmp(key, "V")) { rock->mbentry->uidvalidity = atol(buf_cstring(&d->buf)); } } } return 0; } /* * parse a record read from the mailboxes.db into its parts. * * full dlist format is: * A: _a_cl * I: unique_i_d * M: _m_time * P: _p_artition * S: _s_erver * T: _t_ype * V: uid_v_alidity */ EXPORTED int mboxlist_parse_entry(mbentry_t **mbentryptr, const char *name, size_t namelen, const char *data, size_t datalen) { static struct buf aclbuf; int r = IMAP_MAILBOX_BADFORMAT; char *freeme = NULL; char **target; char *p, *q; mbentry_t *mbentry = mboxlist_entry_create(); if (!datalen) goto done; /* copy name */ if (namelen) mbentry->name = xstrndup(name, namelen); else mbentry->name = xstrdup(name); /* check for DLIST mboxlist */ if (*data == '%') { struct parseentry_rock rock; memset(&rock, 0, sizeof(struct parseentry_rock)); rock.mbentry = mbentry; rock.aclbuf = &aclbuf; aclbuf.len = 0; r = dlist_parsesax(data, datalen, 0, parseentry_cb, &rock); if (!r) mbentry->acl = buf_newcstring(&aclbuf); goto done; } /* copy data */ freeme = p = xstrndup(data, datalen); /* check for extended mboxlist entry */ if (*p == '(') { int last = 0; p++; /* past leading '(' */ while (!last) { target = NULL; q = p; while (*q && *q != ' ' && *q != ')') q++; if (*q != ' ') break; *q++ = '\0'; if (!strcmp(p, "uniqueid")) target = &mbentry->uniqueid; if (!strcmp(p, "specialuse")) target = &mbentry->legacy_specialuse; p = q; while (*q && *q != ' ' && *q != ')') q++; if (*q != ' ') last = 1; if (*q) *q++ = '\0'; if (target) *target = xstrdup(p); p = q; } if (*p == ' ') p++; /* past trailing ' ' */ } /* copy out interesting parts */ mbentry->mbtype = strtol(p, &p, 10); if (*p == ' ') p++; q = p; while (*q && *q != ' ' && *q != '!') q++; if (*q == '!') { *q++ = '\0'; mbentry->server = xstrdup(p); p = q; while (*q && *q != ' ') q++; } if (*q) *q++ = '\0'; mbentry->partition = xstrdup(p); mbentry->acl = xstrdup(q); r = 0; done: if (!r && mbentryptr) *mbentryptr = mbentry; else mboxlist_entry_free(&mbentry); free(freeme); return r; } /* read a record and parse into parts */ static int mboxlist_mylookup(const char *name, mbentry_t **mbentryptr, struct txn **tid, int wrlock) { int r; const char *data; size_t datalen; r = mboxlist_read(name, &data, &datalen, tid, wrlock); if (r) return r; return mboxlist_parse_entry(mbentryptr, name, 0, data, datalen); } /* * Lookup 'name' in the mailbox list, ignoring reserved records */ EXPORTED int mboxlist_lookup(const char *name, mbentry_t **entryptr, struct txn **tid) { mbentry_t *entry = NULL; int r; r = mboxlist_mylookup(name, &entry, tid, 0); if (r) return r; /* Ignore "reserved" entries, like they aren't there */ if (entry->mbtype & MBTYPE_RESERVE) { mboxlist_entry_free(&entry); return IMAP_MAILBOX_RESERVED; } /* Ignore "deleted" entries, like they aren't there */ if (entry->mbtype & MBTYPE_DELETED) { mboxlist_entry_free(&entry); return IMAP_MAILBOX_NONEXISTENT; } if (entryptr) *entryptr = entry; else mboxlist_entry_free(&entry); return 0; } EXPORTED int mboxlist_lookup_allow_all(const char *name, mbentry_t **entryptr, struct txn **tid) { return mboxlist_mylookup(name, entryptr, tid, 0); } struct _find_specialuse_data { const char *use; const char *userid; char *mboxname; }; static int _find_specialuse(const mbentry_t *mbentry, void *rock) { struct _find_specialuse_data *d = (struct _find_specialuse_data *)rock; struct buf attrib = BUF_INITIALIZER; annotatemore_lookup(mbentry->name, "/specialuse", d->userid, &attrib); if (attrib.len) { strarray_t *uses = strarray_split(buf_cstring(&attrib), " ", 0); if (strarray_find_case(uses, d->use, 0) >= 0) d->mboxname = xstrdup(mbentry->name); strarray_free(uses); } buf_free(&attrib); if (d->mboxname) return CYRUSDB_DONE; return 0; } EXPORTED char *mboxlist_find_specialuse(const char *use, const char *userid) { /* \\Inbox is magical */ if (!strcasecmp(use, "\\Inbox")) return mboxname_user_mbox(userid, NULL); struct _find_specialuse_data rock = { use, userid, NULL }; mboxlist_usermboxtree(userid, _find_specialuse, &rock, MBOXTREE_SKIP_ROOT); return rock.mboxname; } struct _find_uniqueid_data { const char *uniqueid; char *mboxname; }; static int _find_uniqueid(const mbentry_t *mbentry, void *rock) { struct _find_uniqueid_data *d = (struct _find_uniqueid_data *) rock; int r = 0; if (!strcmp(d->uniqueid, mbentry->uniqueid)) { d->mboxname = xstrdup(mbentry->name); r = CYRUSDB_DONE; } return r; } EXPORTED char *mboxlist_find_uniqueid(const char *uniqueid, const char *userid) { struct _find_uniqueid_data rock = { uniqueid, NULL }; mboxlist_usermboxtree(userid, _find_uniqueid, &rock, MBOXTREE_PLUS_RACL); return rock.mboxname; } /* given a mailbox name, find the staging directory. XXX - this should * require more locking, and staging directories should be by pid */ HIDDEN int mboxlist_findstage(const char *name, char *stagedir, size_t sd_len) { const char *root; mbentry_t *mbentry = NULL; int r; assert(stagedir != NULL); /* Find mailbox */ r = mboxlist_lookup(name, &mbentry, NULL); if (r) return r; root = config_partitiondir(mbentry->partition); mboxlist_entry_free(&mbentry); if (!root) return IMAP_PARTITION_UNKNOWN; snprintf(stagedir, sd_len, "%s/stage./", root); return 0; } static void mboxlist_racl_key(int isuser, const char *keyuser, const char *mbname, struct buf *buf) { buf_setcstr(buf, "$RACL$"); buf_putc(buf, isuser ? 'U' : 'S'); buf_putc(buf, '$'); if (keyuser) { buf_appendcstr(buf, keyuser); buf_putc(buf, '$'); } if (mbname) { buf_appendcstr(buf, mbname); } } static int user_is_in(const strarray_t *aclbits, const char *user) { int i; if (!aclbits) return 0; for (i = 0; i+1 < strarray_size(aclbits); i+=2) { if (!strcmp(strarray_nth(aclbits, i), user)) return 1; } return 0; } static int mboxlist_update_racl(const char *name, const mbentry_t *oldmbentry, const mbentry_t *newmbentry, struct txn **txn) { static strarray_t *admins = NULL; struct buf buf = BUF_INITIALIZER; char *userid = mboxname_to_userid(name); strarray_t *oldusers = NULL; strarray_t *newusers = NULL; int i; int r = 0; if (!admins) admins = strarray_split(config_getstring(IMAPOPT_ADMINS), NULL, 0); if (oldmbentry && oldmbentry->mbtype != MBTYPE_DELETED) oldusers = strarray_split(oldmbentry->acl, "\t", 0); if (newmbentry && newmbentry->mbtype != MBTYPE_DELETED) newusers = strarray_split(newmbentry->acl, "\t", 0); if (oldusers) { for (i = 0; i+1 < strarray_size(oldusers); i+=2) { const char *acluser = strarray_nth(oldusers, i); const char *aclval = strarray_nth(oldusers, i+1); if (!strchr(aclval, 'l')) continue; /* non-lookup ACLs can be skipped */ if (!strcmpsafe(userid, acluser)) continue; if (strarray_find(admins, acluser, 0) >= 0) continue; if (user_is_in(newusers, acluser)) continue; mboxlist_racl_key(!!userid, acluser, name, &buf); r = cyrusdb_delete(mbdb, buf.s, buf.len, txn, /*force*/1); if (r) goto done; } } if (newusers) { for (i = 0; i+1 < strarray_size(newusers); i+=2) { const char *acluser = strarray_nth(newusers, i); const char *aclval = strarray_nth(newusers, i+1); if (!strchr(aclval, 'l')) continue; /* non-lookup ACLs can be skipped */ if (!strcmpsafe(userid, acluser)) continue; if (strarray_find(admins, acluser, 0) >= 0) continue; if (user_is_in(oldusers, acluser)) continue; mboxlist_racl_key(!!userid, acluser, name, &buf); r = cyrusdb_store(mbdb, buf.s, buf.len, "", 0, txn); if (r) goto done; } } done: strarray_free(oldusers); strarray_free(newusers); free(userid); buf_free(&buf); return r; } static int mboxlist_update_entry(const char *name, const mbentry_t *mbentry, struct txn **txn) { mbentry_t *old = NULL; int r = 0; mboxlist_mylookup(name, &old, txn, 0); // ignore errors, it will be NULL if (!cyrusdb_fetch(mbdb, "$RACL", 5, NULL, NULL, txn)) { r = mboxlist_update_racl(name, old, mbentry, txn); /* XXX return value here is discarded? */ } if (mbentry) { char *mboxent = mboxlist_entry_cstring(mbentry); r = cyrusdb_store(mbdb, name, strlen(name), mboxent, strlen(mboxent), txn); free(mboxent); if (!r && config_auditlog) { /* XXX is there a difference between "" and NULL? */ if (old && strcmpsafe(old->acl, mbentry->acl)) { syslog(LOG_NOTICE, "auditlog: acl sessionid=<%s> " "mailbox=<%s> uniqueid=<%s> " "oldacl=<%s> acl=<%s>", session_id(), name, mbentry->uniqueid, old->acl, mbentry->acl); } } } else { r = cyrusdb_delete(mbdb, name, strlen(name), txn, /*force*/1); } mboxlist_entry_free(&old); return r; } EXPORTED int mboxlist_delete(const char *name) { return mboxlist_update_entry(name, NULL, NULL); } EXPORTED int mboxlist_update(mbentry_t *mbentry, int localonly) { int r = 0, r2 = 0; struct txn *tid = NULL; r = mboxlist_update_entry(mbentry->name, mbentry, &tid); if (!r) mboxname_setmodseq(mbentry->name, mbentry->foldermodseq, mbentry->mbtype, /*dofolder*/1); /* commit the change to mupdate */ if (!r && !localonly && config_mupdate_server) { mupdate_handle *mupdate_h = NULL; r = mupdate_connect(config_mupdate_server, NULL, &mupdate_h, NULL); if (r) { syslog(LOG_ERR, "cannot connect to mupdate server for update of '%s'", mbentry->name); } else { char *location = strconcat(config_servername, "!", mbentry->partition, (char *)NULL); r = mupdate_activate(mupdate_h, mbentry->name, location, mbentry->acl); free(location); if (r) { syslog(LOG_ERR, "MUPDATE: can't update mailbox entry for '%s'", mbentry->name); } } mupdate_disconnect(&mupdate_h); } if (tid) { if (r) { r2 = cyrusdb_abort(mbdb, tid); } else { r2 = cyrusdb_commit(mbdb, tid); } } if (r2) { syslog(LOG_ERR, "DBERROR: error %s txn in mboxlist_update: %s", r ? "aborting" : "commiting", cyrusdb_strerror(r2)); } return r; } EXPORTED int mboxlist_findparent(const char *mboxname, mbentry_t **mbentryp) { mbentry_t *mbentry = NULL; mbname_t *mbname = mbname_from_intname(mboxname); int r = IMAP_MAILBOX_NONEXISTENT; while (strarray_size(mbname_boxes(mbname))) { free(mbname_pop_boxes(mbname)); mboxlist_entry_free(&mbentry); r = mboxlist_lookup(mbname_intname(mbname), &mbentry, NULL); if (r != IMAP_MAILBOX_NONEXISTENT) break; } if (r) mboxlist_entry_free(&mbentry); else *mbentryp = mbentry; mbname_free(&mbname); return r; } static int mboxlist_create_partition(const char *mboxname, const char *part, char **out) { mbentry_t *parent = NULL; if (!part) { int r = mboxlist_findparent(mboxname, &parent); if (!r) part = parent->partition; } /* use defaultpartition if specified */ if (!part && config_defpartition) part = config_defpartition; /* look for most fitting partition */ if (!part) part = partlist_local_select(); /* Configuration error */ if (!part || (strlen(part) > MAX_PARTITION_LEN)) goto err; if (!config_partitiondir(part)) goto err; *out = xstrdupnull(part); mboxlist_entry_free(&parent); return 0; err: mboxlist_entry_free(&parent); return IMAP_PARTITION_UNKNOWN; } /* * Check if a mailbox can be created. There is no other setup at this * stage, just the check! */ static int mboxlist_create_namecheck(const char *mboxname, const char *userid, const struct auth_state *auth_state, int isadmin, int force_subdirs) { mbentry_t *mbentry = NULL; int r = 0; /* policy first */ r = mboxname_policycheck(mboxname); if (r) goto done; /* is this the user's INBOX namespace? */ if (!isadmin && mboxname_userownsmailbox(userid, mboxname)) { /* User has admin rights over their own mailbox namespace */ if (config_implicitrights & ACL_ADMIN) isadmin = 1; } /* Check to see if mailbox already exists */ r = mboxlist_lookup(mboxname, &mbentry, NULL); if (r != IMAP_MAILBOX_NONEXISTENT) { if (!r) { r = IMAP_MAILBOX_EXISTS; /* Lie about error if privacy demands */ if (!isadmin && !(cyrus_acl_myrights(auth_state, mbentry->acl) & ACL_LOOKUP)) { r = IMAP_PERMISSION_DENIED; } } goto done; } mboxlist_entry_free(&mbentry); /* look for a parent mailbox */ r = mboxlist_findparent(mboxname, &mbentry); if (r == 0) { /* found a parent */ char root[MAX_MAILBOX_NAME+1]; /* check acl */ if (!isadmin && !(cyrus_acl_myrights(auth_state, mbentry->acl) & ACL_CREATE)) { r = IMAP_PERMISSION_DENIED; goto done; } /* check quota */ if (quota_findroot(root, sizeof(root), mboxname)) { quota_t qdiffs[QUOTA_NUMRESOURCES] = QUOTA_DIFFS_DONTCARE_INITIALIZER; qdiffs[QUOTA_NUMFOLDERS] = 1; r = quota_check_useds(root, qdiffs); if (r) goto done; } } else if (r == IMAP_MAILBOX_NONEXISTENT) { /* no parent mailbox */ if (!isadmin) { r = IMAP_PERMISSION_DENIED; goto done; } if (!force_subdirs) { mbname_t *mbname = mbname_from_intname(mboxname); if (!mbname_isdeleted(mbname) && mbname_userid(mbname) && strarray_size(mbname_boxes(mbname))) { /* Disallow creating user.X.* when no user.X */ r = IMAP_PERMISSION_DENIED; goto done; } mbname_free(&mbname); } /* otherwise no parent is OK */ r = 0; } done: mboxlist_entry_free(&mbentry); return r; } static int mboxlist_create_acl(const char *mboxname, char **out) { mbentry_t *mbentry = NULL; int r; int mask; char *defaultacl; char *identifier; char *rights; char *p; r = mboxlist_findparent(mboxname, &mbentry); if (!r) { *out = xstrdup(mbentry->acl); mboxlist_entry_free(&mbentry); return 0; } *out = xstrdup(""); char *owner = mboxname_to_userid(mboxname); if (owner) { /* owner gets full permission on own mailbox by default */ cyrus_acl_set(out, owner, ACL_MODE_SET, ACL_ALL, (cyrus_acl_canonproc_t *)0, (void *)0); free(owner); return 0; } defaultacl = identifier = xstrdup(config_getstring(IMAPOPT_DEFAULTACL)); for (;;) { while (*identifier && Uisspace(*identifier)) identifier++; rights = identifier; while (*rights && !Uisspace(*rights)) rights++; if (!*rights) break; *rights++ = '\0'; while (*rights && Uisspace(*rights)) rights++; if (!*rights) break; p = rights; while (*p && !Uisspace(*p)) p++; if (*p) *p++ = '\0'; cyrus_acl_strtomask(rights, &mask); /* XXX and if strtomask fails? */ cyrus_acl_set(out, identifier, ACL_MODE_SET, mask, (cyrus_acl_canonproc_t *)0, (void *)0); identifier = p; } free(defaultacl); return 0; } /* and this API just plain sucks */ EXPORTED int mboxlist_createmailboxcheck(const char *name, int mbtype __attribute__((unused)), const char *partition, int isadmin, const char *userid, const struct auth_state *auth_state, char **newacl, char **newpartition, int forceuser) { char *part = NULL; char *acl = NULL; int r = 0; r = mboxlist_create_namecheck(name, userid, auth_state, isadmin, forceuser); if (r) goto done; if (newacl) { r = mboxlist_create_acl(name, &acl); if (r) goto done; } if (newpartition) { r = mboxlist_create_partition(name, partition, &part); if (r) goto done; } done: if (r || !newacl) free(acl); else *newacl = acl; if (r || !newpartition) free(part); else *newpartition = part; return r; } /* * Create a mailbox * * 1. verify ACL's to best of ability (CRASH: abort) * 2. verify parent ACL's if need to * 3. create the local mailbox locally (exclusive lock) and keep it locked * 4. open mupdate connection if necessary * 5. create mupdate entry (CRASH: mupdate inconsistant) * */ static int mboxlist_createmailbox_full(const char *mboxname, int mbtype, const char *partition, int isadmin, const char *userid, const struct auth_state *auth_state, int options, unsigned uidvalidity, modseq_t highestmodseq, const char *copyacl, const char *uniqueid, int localonly, int forceuser, int dbonly, struct mailbox **mboxptr) { int r; char *newpartition = NULL; char *acl = NULL; struct mailbox *newmailbox = NULL; int isremote = mbtype & MBTYPE_REMOTE; mbentry_t *newmbentry = NULL; r = mboxlist_create_namecheck(mboxname, userid, auth_state, isadmin, forceuser); if (r) goto done; if (copyacl) { acl = xstrdup(copyacl); } else { r = mboxlist_create_acl(mboxname, &acl); if (r) goto done; } r = mboxlist_create_partition(mboxname, partition, &newpartition); if (r) goto done; if (!dbonly && !isremote) { /* Filesystem Operations */ r = mailbox_create(mboxname, mbtype, newpartition, acl, uniqueid, options, uidvalidity, highestmodseq, &newmailbox); if (r) goto done; /* CREATE failed */ r = mailbox_add_conversations(newmailbox); if (r) goto done; } /* all is well - activate the mailbox */ newmbentry = mboxlist_entry_create(); newmbentry->acl = xstrdupnull(acl); newmbentry->mbtype = mbtype; newmbentry->partition = xstrdupnull(newpartition); if (newmailbox) { newmbentry->uniqueid = xstrdupnull(newmailbox->uniqueid); newmbentry->uidvalidity = newmailbox->i.uidvalidity; newmbentry->foldermodseq = newmailbox->i.highestmodseq; } r = mboxlist_update_entry(mboxname, newmbentry, NULL); if (r) { syslog(LOG_ERR, "DBERROR: failed to insert to mailboxes list %s: %s", mboxname, cyrusdb_strerror(r)); r = IMAP_IOERROR; } /* 9. set MUPDATE entry as commited (CRASH: commited) */ if (!r && config_mupdate_server && !localonly) { mupdate_handle *mupdate_h = NULL; char *loc = strconcat(config_servername, "!", newpartition, (char *)NULL); r = mupdate_connect(config_mupdate_server, NULL, &mupdate_h, NULL); if (!r) r = mupdate_reserve(mupdate_h, mboxname, loc); if (!r) r = mupdate_activate(mupdate_h, mboxname, loc, acl); if (r) { syslog(LOG_ERR, "MUPDATE: can't commit mailbox entry for '%s'", mboxname); mboxlist_update_entry(mboxname, NULL, 0); } if (mupdate_h) mupdate_disconnect(&mupdate_h); free(loc); } done: if (newmailbox) { if (r) mailbox_delete(&newmailbox); else if (mboxptr) *mboxptr = newmailbox; else mailbox_close(&newmailbox); } free(acl); free(newpartition); mboxlist_entry_free(&newmbentry); return r; } EXPORTED int mboxlist_createmailbox(const char *name, int mbtype, const char *partition, int isadmin, const char *userid, const struct auth_state *auth_state, int localonly, int forceuser, int dbonly, int notify, struct mailbox **mailboxptr) { int options = config_getint(IMAPOPT_MAILBOX_DEFAULT_OPTIONS) | OPT_POP3_NEW_UIDL; int r; struct mailbox *mailbox = NULL; uint32_t uidvalidity = 0; /* check if a previous deleted mailbox existed */ mbentry_t *oldmbentry = NULL; r = mboxlist_lookup_allow_all(name, &oldmbentry, NULL); if (!r && oldmbentry->mbtype == MBTYPE_DELETED) { /* then the UIDVALIDITY must be higher than before */ if (uidvalidity <= oldmbentry->uidvalidity) uidvalidity = oldmbentry->uidvalidity+1; } mboxlist_entry_free(&oldmbentry); r = mboxlist_createmailbox_full(name, mbtype, partition, isadmin, userid, auth_state, options, uidvalidity, 0, NULL, NULL, localonly, forceuser, dbonly, &mailbox); if (notify && !r) { /* send a MailboxCreate event notification */ struct mboxevent *mboxevent = mboxevent_new(EVENT_MAILBOX_CREATE); mboxevent_extract_mailbox(mboxevent, mailbox); mboxevent_set_access(mboxevent, NULL, NULL, userid, mailbox->name, 1); mboxevent_notify(&mboxevent); mboxevent_free(&mboxevent); } if (mailboxptr && !r) *mailboxptr = mailbox; else mailbox_close(&mailbox); return r; } EXPORTED int mboxlist_createsync(const char *name, int mbtype, const char *partition, const char *userid, const struct auth_state *auth_state, int options, unsigned uidvalidity, modseq_t highestmodseq, const char *acl, const char *uniqueid, int local_only, struct mailbox **mboxptr) { return mboxlist_createmailbox_full(name, mbtype, partition, 1, userid, auth_state, options, uidvalidity, highestmodseq, acl, uniqueid, local_only, 1, 0, mboxptr); } /* insert an entry for the proxy */ EXPORTED int mboxlist_insertremote(mbentry_t *mbentry, struct txn **txn) { int r = 0; if (mbentry->server) { /* remote mailbox */ if (config_mupdate_config == IMAP_ENUM_MUPDATE_CONFIG_UNIFIED && !strcasecmp(mbentry->server, config_servername)) { /* its on our server, make it a local mailbox */ mbentry->mbtype &= ~MBTYPE_REMOTE; mbentry->server = NULL; } else { /* make sure it's a remote mailbox */ mbentry->mbtype |= MBTYPE_REMOTE; } } /* database put */ r = mboxlist_update_entry(mbentry->name, mbentry, txn); switch (r) { case CYRUSDB_OK: break; case CYRUSDB_AGAIN: abort(); /* shouldn't happen ! */ break; default: syslog(LOG_ERR, "DBERROR: error updating database %s: %s", mbentry->name, cyrusdb_strerror(r)); r = IMAP_IOERROR; break; } return r; } /* Special function to delete a remote mailbox. * Only affects mboxlist. * Assumes admin powers. */ EXPORTED int mboxlist_deleteremote(const char *name, struct txn **in_tid) { int r; struct txn **tid; struct txn *lcl_tid = NULL; mbentry_t *mbentry = NULL; if(in_tid) { tid = in_tid; } else { tid = &lcl_tid; } retry: r = mboxlist_mylookup(name, &mbentry, tid, 1); switch (r) { case 0: break; case IMAP_MAILBOX_NONEXISTENT: r = 0; break; case IMAP_AGAIN: goto retry; break; default: goto done; } if (mbentry && (mbentry->mbtype & MBTYPE_REMOTE) && !mbentry->server) { syslog(LOG_ERR, "mboxlist_deleteremote called on non-remote mailbox: %s", name); goto done; } r = mboxlist_update_entry(name, NULL, tid); if (r) { syslog(LOG_ERR, "DBERROR: error deleting %s: %s", name, cyrusdb_strerror(r)); r = IMAP_IOERROR; } /* commit db operations, but only if we weren't passed a transaction */ if (!in_tid) { r = cyrusdb_commit(mbdb, *tid); if (r) { syslog(LOG_ERR, "DBERROR: failed on commit: %s", cyrusdb_strerror(r)); r = IMAP_IOERROR; } tid = NULL; } done: if (r && !in_tid && tid) { /* Abort the transaction if it is still in progress */ cyrusdb_abort(mbdb, *tid); } return r; } static int addmbox_to_list(const mbentry_t *mbentry, void *rock) { strarray_t *list = (strarray_t *)rock; strarray_append(list, mbentry->name); return 0; } /* * Delayed Delete a mailbox: translate delete into rename */ EXPORTED int mboxlist_delayed_deletemailbox(const char *name, int isadmin, const char *userid, const struct auth_state *auth_state, struct mboxevent *mboxevent, int checkacl, int localonly, int force) { mbentry_t *mbentry = NULL; strarray_t existing = STRARRAY_INITIALIZER; int i; char newname[MAX_MAILBOX_BUFFER]; int r = 0; long myrights; if (!isadmin && force) return IMAP_PERMISSION_DENIED; /* delete of a user.X folder */ mbname_t *mbname = mbname_from_intname(name); if (mbname_userid(mbname) && !strarray_size(mbname_boxes(mbname))) { /* Can't DELETE INBOX (your own inbox) */ if (!strcmpsafe(mbname_userid(mbname), userid)) { r = IMAP_MAILBOX_NOTSUPPORTED; goto done; } /* Only admins may delete user */ if (!isadmin) { r = IMAP_PERMISSION_DENIED; goto done; } } if (!isadmin && mbname_userid(mbname)) { struct buf attrib = BUF_INITIALIZER; annotatemore_lookup(mbname_intname(mbname), "/specialuse", mbname_userid(mbname), &attrib); if (attrib.len) r = IMAP_MAILBOX_SPECIALUSE; buf_free(&attrib); if (r) goto done; } r = mboxlist_lookup(name, &mbentry, NULL); if (r) goto done; /* check if user has Delete right (we've already excluded non-admins * from deleting a user mailbox) */ if (checkacl) { myrights = cyrus_acl_myrights(auth_state, mbentry->acl); if (!(myrights & ACL_DELETEMBOX)) { /* User has admin rights over their own mailbox namespace */ if (mboxname_userownsmailbox(userid, name) && (config_implicitrights & ACL_ADMIN)) { isadmin = 1; } /* Lie about error if privacy demands */ r = (isadmin || (myrights & ACL_LOOKUP)) ? IMAP_PERMISSION_DENIED : IMAP_MAILBOX_NONEXISTENT; goto done; } } /* check if there are already too many! */ mboxname_todeleted(name, newname, 0); r = mboxlist_mboxtree(newname, addmbox_to_list, &existing, MBOXTREE_SKIP_ROOT); if (r) goto done; /* keep the last 19, so the new one is the 20th */ for (i = 0; i < (int)existing.count - 19; i++) { const char *subname = strarray_nth(&existing, i); syslog(LOG_NOTICE, "too many subfolders for %s, deleting %s (%d / %d)", newname, subname, i+1, (int)existing.count); r = mboxlist_deletemailbox(subname, 1, userid, auth_state, NULL, 0, 1, 1); if (r) goto done; } /* get the deleted name */ mboxname_todeleted(name, newname, 1); /* Get mboxlist_renamemailbox to do the hard work. No ACL checks needed */ r = mboxlist_renamemailbox((char *)name, newname, mbentry->partition, 0 /* uidvalidity */, 1 /* isadmin */, userid, auth_state, mboxevent, localonly /* local_only */, force, 1); done: strarray_fini(&existing); mboxlist_entry_free(&mbentry); mbname_free(&mbname); return r; } /* * Delete a mailbox. * Deleting the mailbox user.FOO may only be performed by an admin. * * 1. Begin transaction * 2. Verify ACL's * 3. remove from database * 4. remove from disk * 5. commit transaction * 6. Open mupdate connection if necessary * 7. delete from mupdate * */ EXPORTED int mboxlist_deletemailbox(const char *name, int isadmin, const char *userid, const struct auth_state *auth_state, struct mboxevent *mboxevent, int checkacl, int local_only, int force) { mbentry_t *mbentry = NULL; int r = 0; long myrights; struct mailbox *mailbox = NULL; int isremote = 0; mupdate_handle *mupdate_h = NULL; if (!isadmin && force) return IMAP_PERMISSION_DENIED; /* delete of a user.X folder */ mbname_t *mbname = mbname_from_intname(name); if (mbname_userid(mbname) && !strarray_size(mbname_boxes(mbname))) { /* Can't DELETE INBOX (your own inbox) */ if (!strcmpsafe(mbname_userid(mbname), userid)) { r = IMAP_MAILBOX_NOTSUPPORTED; goto done; } /* Only admins may delete user */ if (!isadmin) { r = IMAP_PERMISSION_DENIED; goto done; } } if (!isadmin && mbname_userid(mbname)) { struct buf attrib = BUF_INITIALIZER; annotatemore_lookup(mbname_intname(mbname), "/specialuse", mbname_userid(mbname), &attrib); if (attrib.len) r = IMAP_MAILBOX_SPECIALUSE; buf_free(&attrib); if (r) goto done; } r = mboxlist_lookup_allow_all(name, &mbentry, NULL); if (r) goto done; isremote = mbentry->mbtype & MBTYPE_REMOTE; /* check if user has Delete right (we've already excluded non-admins * from deleting a user mailbox) */ if (checkacl) { myrights = cyrus_acl_myrights(auth_state, mbentry->acl); if(!(myrights & ACL_DELETEMBOX)) { /* User has admin rights over their own mailbox namespace */ if (mboxname_userownsmailbox(userid, name) && (config_implicitrights & ACL_ADMIN)) { isadmin = 1; } /* Lie about error if privacy demands */ r = (isadmin || (myrights & ACL_LOOKUP)) ? IMAP_PERMISSION_DENIED : IMAP_MAILBOX_NONEXISTENT; goto done; } } /* Lock the mailbox if it isn't a remote mailbox */ if (!isremote) { r = mailbox_open_iwl(name, &mailbox); } if (r && !force) goto done; /* remove from mupdate */ if (!isremote && !local_only && config_mupdate_server) { /* delete the mailbox in MUPDATE */ r = mupdate_connect(config_mupdate_server, NULL, &mupdate_h, NULL); if (r) { syslog(LOG_ERR, "cannot connect to mupdate server for delete of '%s'", name); goto done; } r = mupdate_delete(mupdate_h, name); if(r) { syslog(LOG_ERR, "MUPDATE: can't delete mailbox entry '%s'", name); } if (mupdate_h) mupdate_disconnect(&mupdate_h); } if (r && !force) goto done; if (!isremote && !mboxname_isdeletedmailbox(name, NULL)) { /* store a DELETED marker */ mbentry_t *newmbentry = mboxlist_entry_create(); newmbentry->name = xstrdupnull(name); newmbentry->mbtype = MBTYPE_DELETED; if (mailbox) { newmbentry->uniqueid = xstrdupnull(mailbox->uniqueid); newmbentry->uidvalidity = mailbox->i.uidvalidity; newmbentry->foldermodseq = mailbox_modseq_dirty(mailbox); } r = mboxlist_update(newmbentry, /*localonly*/1); mboxlist_entry_free(&newmbentry); } else { /* delete entry (including DELETED.* mailboxes, no need * to keep that rubbish around) */ r = mboxlist_update_entry(name, NULL, 0); if (r) { syslog(LOG_ERR, "DBERROR: error deleting %s: %s", name, cyrusdb_strerror(r)); r = IMAP_IOERROR; if (!force) goto done; } if (r && !force) goto done; } /* delete underlying mailbox */ if (!isremote && mailbox) { /* only on a real delete do we delete from the remote end as well */ sync_log_unmailbox(mailbox->name); mboxevent_extract_mailbox(mboxevent, mailbox); mboxevent_set_access(mboxevent, NULL, NULL, userid, mailbox->name, 1); r = mailbox_delete(&mailbox); /* abort event notification */ if (r && mboxevent) mboxevent_free(&mboxevent); } done: mailbox_close(&mailbox); mboxlist_entry_free(&mbentry); mbname_free(&mbname); return r; } static int _rename_check_specialuse(const char *oldname, const char *newname) { mbname_t *old = mbname_from_intname(oldname); mbname_t *new = mbname_from_intname(newname); struct buf attrib = BUF_INITIALIZER; int r = 0; if (mbname_userid(old)) annotatemore_lookup(oldname, "/specialuse", mbname_userid(old), &attrib); /* we have specialuse? */ if (attrib.len) { /* then target must be a single-depth mailbox too */ if (strarray_size(mbname_boxes(new)) != 1) r = IMAP_MAILBOX_SPECIALUSE; /* and have a userid as well */ if (!mbname_userid(new)) r = IMAP_MAILBOX_SPECIALUSE; /* and not be deleted */ if (mbname_isdeleted(new)) r = IMAP_MAILBOX_SPECIALUSE; } mbname_free(&new); mbname_free(&old); buf_free(&attrib); return r; } /* * Rename/move a single mailbox (recursive renames are handled at a * higher level). This only supports local mailboxes. Remote * mailboxes are handled up in imapd.c */ EXPORTED int mboxlist_renamemailbox(const char *oldname, const char *newname, const char *partition, unsigned uidvalidity, int isadmin, const char *userid, const struct auth_state *auth_state, struct mboxevent *mboxevent, int local_only, int forceuser, int ignorequota) { int r; int mupdatecommiterror = 0; long myrights; int isusermbox = 0; /* Are we renaming someone's inbox */ int partitionmove = 0; struct mailbox *oldmailbox = NULL; struct mailbox *newmailbox = NULL; struct txn *tid = NULL; const char *root = NULL; char *newpartition = NULL; mupdate_handle *mupdate_h = NULL; mbentry_t *newmbentry = NULL; /* 1. open mailbox */ r = mailbox_open_iwl(oldname, &oldmailbox); if (r) return r; myrights = cyrus_acl_myrights(auth_state, oldmailbox->acl); /* check the ACLs up-front */ if (!isadmin) { if (!(myrights & ACL_DELETEMBOX)) { r = (myrights & ACL_LOOKUP) ? IMAP_PERMISSION_DENIED : IMAP_MAILBOX_NONEXISTENT; goto done; } } /* 2. verify valid move */ /* XXX - handle remote mailbox */ /* special case: same mailbox, must be a partition move */ if (!strcmp(oldname, newname)) { const char *oldpath = mailbox_datapath(oldmailbox, 0); /* Only admin can move mailboxes between partitions */ if (!isadmin) { r = IMAP_PERMISSION_DENIED; goto done; } /* No partition, we're definitely not moving anywhere */ if (!partition) { r = IMAP_MAILBOX_EXISTS; goto done; } /* let mupdate code below know it was a partition move */ partitionmove = 1; /* this is OK because it uses a different static buffer */ root = config_partitiondir(partition); if (!root) { r = IMAP_PARTITION_UNKNOWN; goto done; } if (!strncmp(root, oldpath, strlen(root)) && oldpath[strlen(root)] == '/') { /* partitions are the same or share common prefix */ r = IMAP_MAILBOX_EXISTS; goto done; } /* NOTE: this is a rename to the same mailbox name on a * different partition. This is a pretty filthy hack, * which should be handled by having four totally different * codepaths: INBOX -> INBOX.foo, user rename, regular rename * and of course this one, partition move */ newpartition = xstrdup(partition); r = mailbox_copy_files(oldmailbox, newpartition, newname, oldmailbox->uniqueid); if (r) goto done; newmbentry = mboxlist_entry_create(); newmbentry->mbtype = oldmailbox->mbtype; newmbentry->partition = xstrdupnull(newpartition); newmbentry->acl = xstrdupnull(oldmailbox->acl); newmbentry->uidvalidity = oldmailbox->i.uidvalidity; newmbentry->uniqueid = xstrdupnull(oldmailbox->uniqueid); newmbentry->foldermodseq = oldmailbox->i.highestmodseq; /* bump regardless, it's rare */ r = mboxlist_update_entry(newname, newmbentry, &tid); if (r) goto done; /* skip ahead to the commit */ goto dbdone; } if (!isadmin) { r = _rename_check_specialuse(oldname, newname); if (r) goto done; } /* RENAME of some user's INBOX */ if (mboxname_isusermailbox(oldname, 1)) { if (mboxname_isdeletedmailbox(newname, NULL)) { /* delete user is OK */ } else if (mboxname_isusermailbox(newname, 1)) { /* user rename is depends on config */ if (!config_getswitch(IMAPOPT_ALLOWUSERMOVES)) { r = IMAP_MAILBOX_NOTSUPPORTED; goto done; } } else if (mboxname_userownsmailbox(userid, oldname) && mboxname_userownsmailbox(userid, newname)) { /* Special case of renaming inbox */ isusermbox = 1; } else { /* Everything else is bogus */ r = IMAP_MAILBOX_NOTSUPPORTED; goto done; } } r = mboxlist_create_namecheck(newname, userid, auth_state, isadmin, forceuser); if (r) goto done; r = mboxlist_create_partition(newname, partition, &newpartition); if (r) goto done; if (!newpartition) newpartition = xstrdup(config_defpartition); /* keep uidvalidity on rename unless specified */ if (!uidvalidity) uidvalidity = oldmailbox->i.uidvalidity; /* Rename the actual mailbox */ r = mailbox_rename_copy(oldmailbox, newname, newpartition, uidvalidity, isusermbox ? userid : NULL, ignorequota, &newmailbox); if (r) goto done; syslog(LOG_INFO, "Rename: %s -> %s", oldname, newname); /* create new entry */ newmbentry = mboxlist_entry_create(); newmbentry->name = xstrdupnull(newmailbox->name); newmbentry->mbtype = newmailbox->mbtype; newmbentry->partition = xstrdupnull(newmailbox->part); newmbentry->acl = xstrdupnull(newmailbox->acl); newmbentry->uidvalidity = newmailbox->i.uidvalidity; newmbentry->uniqueid = xstrdupnull(newmailbox->uniqueid); newmbentry->foldermodseq = newmailbox->i.highestmodseq; do { r = 0; /* delete the old entry */ if (!isusermbox) { /* store a DELETED marker */ mbentry_t *oldmbentry = mboxlist_entry_create(); oldmbentry->name = xstrdupnull(oldmailbox->name); oldmbentry->mbtype = MBTYPE_DELETED; oldmbentry->uidvalidity = oldmailbox->i.uidvalidity; oldmbentry->uniqueid = xstrdupnull(oldmailbox->uniqueid); oldmbentry->foldermodseq = mailbox_modseq_dirty(oldmailbox); r = mboxlist_update_entry(oldname, oldmbentry, &tid); mboxlist_entry_free(&oldmbentry); } /* create a new entry */ if (!r) { r = mboxlist_update_entry(newname, newmbentry, &tid); } switch (r) { case 0: /* success */ break; case CYRUSDB_AGAIN: tid = NULL; break; default: syslog(LOG_ERR, "DBERROR: rename failed on store %s %s: %s", oldname, newname, cyrusdb_strerror(r)); r = IMAP_IOERROR; goto done; break; } } while (r == CYRUSDB_AGAIN); dbdone: /* 3. Commit transaction */ r = cyrusdb_commit(mbdb, tid); tid = NULL; if (r) { syslog(LOG_ERR, "DBERROR: rename failed on commit %s %s: %s", oldname, newname, cyrusdb_strerror(r)); r = IMAP_IOERROR; goto done; } if (!local_only && config_mupdate_server) { /* commit the mailbox in MUPDATE */ char *loc = strconcat(config_servername, "!", newpartition, (char *)NULL); r = mupdate_connect(config_mupdate_server, NULL, &mupdate_h, NULL); if (!partitionmove) { if (!r && !isusermbox) r = mupdate_delete(mupdate_h, oldname); if (!r) r = mupdate_reserve(mupdate_h, newname, loc); } if (!r) r = mupdate_activate(mupdate_h, newname, loc, newmbentry->acl); if (r) { syslog(LOG_ERR, "MUPDATE: can't commit mailbox entry for '%s'", newname); mupdatecommiterror = r; } if (mupdate_h) mupdate_disconnect(&mupdate_h); free(loc); } done: /* Commit or cleanup */ if (!r && newmailbox) r = mailbox_commit(newmailbox); if (r) { /* rollback DB changes if it was an mupdate failure */ if (mupdatecommiterror) { r = 0; /* recreate an old entry */ if (!isusermbox) r = mboxlist_update_entry(oldname, newmbentry, &tid); /* delete the new entry */ if (!r) r = mboxlist_update_entry(newname, NULL, &tid); /* Commit transaction */ if (!r) r = cyrusdb_commit(mbdb, tid); tid = NULL; if (r) { /* XXX HOWTO repair this mess! */ syslog(LOG_ERR, "DBERROR: failed DB rollback on mailboxrename %s %s: %s", oldname, newname, cyrusdb_strerror(r)); syslog(LOG_ERR, "DBERROR: mailboxdb on mupdate and backend ARE NOT CONSISTENT"); syslog(LOG_ERR, "DBERROR: mailboxdb on mupdate has entry for %s, mailboxdb on backend has entry for %s and files are on the old position", oldname, newname); r = IMAP_IOERROR; } else { r = mupdatecommiterror; } } if (newmailbox) mailbox_delete(&newmailbox); if (partitionmove && newpartition) mailbox_delete_cleanup(NULL, newpartition, newname, oldmailbox->uniqueid); mailbox_close(&oldmailbox); } else { if (newmailbox) { /* prepare the event notification */ if (mboxevent) { /* case of delayed delete */ if (mboxevent->type == EVENT_MAILBOX_DELETE) mboxevent_extract_mailbox(mboxevent, oldmailbox); else { mboxevent_extract_mailbox(mboxevent, newmailbox); mboxevent_extract_old_mailbox(mboxevent, oldmailbox); } mboxevent_set_access(mboxevent, NULL, NULL, userid, newmailbox->name, 1); } /* log the rename before we close either mailbox, so that * we never nuke the mailbox from the replica before realising * that it has been renamed. This can be moved later again when * we sync mailboxes by uniqueid rather than name... */ sync_log_mailbox_double(oldname, newname); mailbox_rename_cleanup(&oldmailbox, isusermbox); #ifdef WITH_DAV mailbox_add_dav(newmailbox); #endif mailbox_close(&newmailbox); /* and log an append so that squatter indexes it */ sync_log_append(newname); } else if (partitionmove) { char *oldpartition = xstrdup(oldmailbox->part); char *olduniqueid = xstrdup(oldmailbox->uniqueid); if (config_auditlog) syslog(LOG_NOTICE, "auditlog: partitionmove sessionid=<%s> " "mailbox=<%s> uniqueid=<%s> oldpart=<%s> newpart=<%s>", session_id(), oldmailbox->name, oldmailbox->uniqueid, oldpartition, partition); /* this will sync-log the name anyway */ mailbox_close(&oldmailbox); mailbox_delete_cleanup(NULL, oldpartition, oldname, olduniqueid); free(olduniqueid); free(oldpartition); } else abort(); /* impossible, in theory */ } /* free memory */ free(newpartition); mboxlist_entry_free(&newmbentry); return r; } /* * Check if the admin rights are present in the 'rights' */ static int mboxlist_have_admin_rights(const char *rights) { int access, have_admin_access; cyrus_acl_strtomask(rights, &access); have_admin_access = access & ACL_ADMIN; return have_admin_access; } /* * Change the ACL for mailbox 'name' so that 'identifier' has the * rights enumerated in the string 'rights'. If 'rights' is the null * pointer, removes the ACL entry for 'identifier'. 'isadmin' is * nonzero if user is a mailbox admin. 'userid' is the user's login id. * * 1. Start transaction * 2. Check rights * 3. Set db entry * 4. Change backup copy (cyrus.header) * 5. Commit transaction * 6. Change mupdate entry * */ EXPORTED int mboxlist_setacl(const struct namespace *namespace __attribute__((unused)), const char *name, const char *identifier, const char *rights, int isadmin, const char *userid, const struct auth_state *auth_state) { mbentry_t *mbentry = NULL; int r; int myrights; int mode = ACL_MODE_SET; int isusermbox = 0; int isidentifiermbox = 0; int anyoneuseracl = 1; int ensure_owner_rights = 0; int mask; const char *mailbox_owner = NULL; struct mailbox *mailbox = NULL; char *newacl = NULL; struct txn *tid = NULL; /* round trip identifier to potentially strip domain */ mbname_t *mbname = mbname_from_userid(identifier); /* XXX - enforce cross domain restrictions */ identifier = mbname_userid(mbname); /* checks if the mailbox belongs to the user who is trying to change the access rights */ if (mboxname_userownsmailbox(userid, name)) isusermbox = 1; anyoneuseracl = config_getswitch(IMAPOPT_ANYONEUSERACL); /* checks if the identifier is the mailbox owner */ if (mboxname_userownsmailbox(identifier, name)) isidentifiermbox = 1; /* who is the mailbox owner? */ if (isusermbox) { mailbox_owner = userid; } else if (isidentifiermbox) { mailbox_owner = identifier; } /* ensure the access rights if the folder owner is the current user or the identifier */ ensure_owner_rights = isusermbox || isidentifiermbox; /* 1. Start Transaction */ /* lookup the mailbox to make sure it exists and get its acl */ do { r = mboxlist_mylookup(name, &mbentry, &tid, 1); } while(r == IMAP_AGAIN); /* Can't do this to an in-transit or reserved mailbox */ if (!r && mbentry->mbtype & (MBTYPE_MOVING | MBTYPE_RESERVE | MBTYPE_DELETED)) { r = IMAP_MAILBOX_NOTSUPPORTED; } /* if it is not a remote mailbox, we need to unlock the mailbox list, * lock the mailbox, and re-lock the mailboxes list */ /* we must do this to obey our locking rules */ if (!r && !(mbentry->mbtype & MBTYPE_REMOTE)) { cyrusdb_abort(mbdb, tid); tid = NULL; mboxlist_entry_free(&mbentry); /* open & lock mailbox header */ r = mailbox_open_iwl(name, &mailbox); if (!r) { do { /* lookup the mailbox to make sure it exists and get its acl */ r = mboxlist_mylookup(name, &mbentry, &tid, 1); } while (r == IMAP_AGAIN); } if(r) goto done; } /* 2. Check Rights */ if (!r && !isadmin) { myrights = cyrus_acl_myrights(auth_state, mbentry->acl); if (!(myrights & ACL_ADMIN)) { r = (myrights & ACL_LOOKUP) ? IMAP_PERMISSION_DENIED : IMAP_MAILBOX_NONEXISTENT; goto done; } } /* 2.1 Only admin user can set 'anyone' rights if config says so */ if (!r && !isadmin && !anyoneuseracl && !strncmp(identifier, "anyone", 6)) { r = IMAP_PERMISSION_DENIED; goto done; } /* 3. Set DB Entry */ if(!r) { /* Make change to ACL */ newacl = xstrdup(mbentry->acl); if (rights && *rights) { /* rights are present and non-empty */ mode = ACL_MODE_SET; if (*rights == '+') { rights++; mode = ACL_MODE_ADD; } else if (*rights == '-') { rights++; mode = ACL_MODE_REMOVE; } /* do not allow non-admin user to remove the admin rights from mailbox owner */ if (!isadmin && isidentifiermbox && mode != ACL_MODE_ADD) { int has_admin_rights = mboxlist_have_admin_rights(rights); if ((has_admin_rights && mode == ACL_MODE_REMOVE) || (!has_admin_rights && mode != ACL_MODE_REMOVE)) { syslog(LOG_ERR, "Denied removal of admin rights on " "folder \"%s\" (owner: %s) by user \"%s\"", name, mailbox_owner, userid); r = IMAP_PERMISSION_DENIED; goto done; } } r = cyrus_acl_strtomask(rights, &mask); if (!r && cyrus_acl_set(&newacl, identifier, mode, mask, ensure_owner_rights ? mboxlist_ensureOwnerRights : 0, (void *)mailbox_owner)) { r = IMAP_INVALID_IDENTIFIER; } } else { /* do not allow to remove the admin rights from mailbox owner */ if (!isadmin && isidentifiermbox) { syslog(LOG_ERR, "Denied removal of admin rights on " "folder \"%s\" (owner: %s) by user \"%s\"", name, mailbox_owner, userid); r = IMAP_PERMISSION_DENIED; goto done; } if (cyrus_acl_remove(&newacl, identifier, ensure_owner_rights ? mboxlist_ensureOwnerRights : 0, (void *)mailbox_owner)) { r = IMAP_INVALID_IDENTIFIER; } } } if (!r) { /* ok, change the database */ free(mbentry->acl); mbentry->acl = xstrdupnull(newacl); r = mboxlist_update_entry(name, mbentry, &tid); if (r) { syslog(LOG_ERR, "DBERROR: error updating acl %s: %s", name, cyrusdb_strerror(r)); r = IMAP_IOERROR; } /* send a AclChange event notification */ struct mboxevent *mboxevent = mboxevent_new(EVENT_ACL_CHANGE); mboxevent_extract_mailbox(mboxevent, mailbox); mboxevent_set_acl(mboxevent, identifier, rights); mboxevent_set_access(mboxevent, NULL, NULL, userid, mailbox->name, 0); mboxevent_notify(&mboxevent); mboxevent_free(&mboxevent); } /* 4. Change backup copy (cyrus.header) */ /* we already have it locked from above */ if (!r && !(mbentry->mbtype & MBTYPE_REMOTE)) { mailbox_set_acl(mailbox, newacl, 1); /* want to commit immediately to ensure ordering */ r = mailbox_commit(mailbox); } /* 5. Commit transaction */ if (!r) { if((r = cyrusdb_commit(mbdb, tid)) != 0) { syslog(LOG_ERR, "DBERROR: failed on commit: %s", cyrusdb_strerror(r)); r = IMAP_IOERROR; } tid = NULL; } /* 6. Change mupdate entry */ if (!r && config_mupdate_server) { mupdate_handle *mupdate_h = NULL; /* commit the update to MUPDATE */ char buf[MAX_PARTITION_LEN + HOSTNAME_SIZE + 2]; snprintf(buf, sizeof(buf), "%s!%s", config_servername, mbentry->partition); r = mupdate_connect(config_mupdate_server, NULL, &mupdate_h, NULL); if(r) { syslog(LOG_ERR, "cannot connect to mupdate server for setacl on '%s'", name); } else { r = mupdate_activate(mupdate_h, name, buf, newacl); if(r) { syslog(LOG_ERR, "MUPDATE: can't update mailbox entry for '%s'", name); } } mupdate_disconnect(&mupdate_h); } done: if (r && tid) { /* if we are mid-transaction, abort it! */ int r2 = cyrusdb_abort(mbdb, tid); if (r2) { syslog(LOG_ERR, "DBERROR: error aborting txn in mboxlist_setacl: %s", cyrusdb_strerror(r2)); } } mailbox_close(&mailbox); free(newacl); mboxlist_entry_free(&mbentry); mbname_free(&mbname); return r; } /* * Change the ACL for mailbox 'name'. We already have it locked * and have written the backup copy to the header, so there's * nothing left but to write the mailboxes.db. * * 1. Start transaction * 2. Set db entry * 3. Commit transaction * 4. Change mupdate entry * */ EXPORTED int mboxlist_sync_setacls(const char *name, const char *newacl) { mbentry_t *mbentry = NULL; int r; struct txn *tid = NULL; /* 1. Start Transaction */ /* lookup the mailbox to make sure it exists and get its acl */ do { r = mboxlist_mylookup(name, &mbentry, &tid, 1); } while(r == IMAP_AGAIN); /* Can't do this to an in-transit or reserved mailbox */ if (!r && mbentry->mbtype & (MBTYPE_MOVING | MBTYPE_RESERVE | MBTYPE_DELETED)) { r = IMAP_MAILBOX_NOTSUPPORTED; } /* 2. Set DB Entry */ if (!r) { /* ok, change the database */ free(mbentry->acl); mbentry->acl = xstrdupnull(newacl); r = mboxlist_update_entry(name, mbentry, &tid); if (r) { syslog(LOG_ERR, "DBERROR: error updating acl %s: %s", name, cyrusdb_strerror(r)); r = IMAP_IOERROR; } } /* 3. Commit transaction */ if (!r) { r = cyrusdb_commit(mbdb, tid); if (r) { syslog(LOG_ERR, "DBERROR: failed on commit %s: %s", name, cyrusdb_strerror(r)); r = IMAP_IOERROR; } tid = NULL; } /* 4. Change mupdate entry */ if (!r && config_mupdate_server) { mupdate_handle *mupdate_h = NULL; /* commit the update to MUPDATE */ char buf[MAX_PARTITION_LEN + HOSTNAME_SIZE + 2]; sprintf(buf, "%s!%s", config_servername, mbentry->partition); r = mupdate_connect(config_mupdate_server, NULL, &mupdate_h, NULL); if (r) { syslog(LOG_ERR, "cannot connect to mupdate server for syncacl on '%s'", name); } else { r = mupdate_activate(mupdate_h, name, buf, newacl); if(r) { syslog(LOG_ERR, "MUPDATE: can't update mailbox entry for '%s'", name); } } mupdate_disconnect(&mupdate_h); } if (r && tid) { /* if we are mid-transaction, abort it! */ int r2 = cyrusdb_abort(mbdb, tid); if (r2) { syslog(LOG_ERR, "DBERROR: error aborting txn in sync_setacls %s: %s", name, cyrusdb_strerror(r2)); } } mboxlist_entry_free(&mbentry); return r; } struct find_rock { ptrarray_t globs; struct namespace *namespace; const char *userid; const char *domain; int mb_category; int checkmboxlist; int issubs; int singlepercent; struct db *db; int isadmin; const struct auth_state *auth_state; mbname_t *mbname; mbentry_t *mbentry; int matchlen; findall_cb *proc; void *procrock; }; /* return non-zero if we like this one */ static int find_p(void *rockp, const char *key, size_t keylen, const char *data, size_t datalen) { struct find_rock *rock = (struct find_rock *) rockp; char intname[MAX_MAILBOX_PATH+1]; int i; /* skip any $RACL or future $ space keys */ if (key[0] == '$') return 0; memcpy(intname, key, keylen); intname[keylen] = 0; assert(!rock->mbname); rock->mbname = mbname_from_intname(intname); if (!rock->isadmin && !config_getswitch(IMAPOPT_CROSSDOMAINS)) { /* don't list mailboxes outside of the default domain */ if (strcmpsafe(rock->domain, mbname_domain(rock->mbname))) goto nomatch; } if (rock->mb_category && mbname_category(rock->mbname, rock->namespace, rock->userid) != rock->mb_category) goto nomatch; /* NOTE: this will all be cleaned up to be much more efficient sooner or later, with * a mbname_t being kept inside the mbentry, and the extname cached all the way to * final use. For now, we pay the cost of re-calculating for simplicity of the * changes to mbname_t itself */ const char *extname = mbname_extname(rock->mbname, rock->namespace, rock->userid); if (!extname) goto nomatch; int matchlen = 0; for (i = 0; i < rock->globs.count; i++) { glob *g = ptrarray_nth(&rock->globs, i); int thismatch = glob_test(g, extname); if (thismatch > matchlen) matchlen = thismatch; } /* If its not a match, skip it -- partial matches are ok. */ if (!matchlen) goto nomatch; rock->matchlen = matchlen; /* subs DB has empty keys */ if (rock->issubs) goto good; /* ignore entirely deleted records */ if (mboxlist_parse_entry(&rock->mbentry, key, keylen, data, datalen)) goto nomatch; /* nobody sees tombstones */ if (rock->mbentry->mbtype & MBTYPE_DELETED) goto nomatch; /* check acl */ if (!rock->isadmin) { /* always suppress deleted for non-admin */ if (mbname_isdeleted(rock->mbname)) goto nomatch; /* check the acls */ if (!(cyrus_acl_myrights(rock->auth_state, rock->mbentry->acl) & ACL_LOOKUP)) goto nomatch; } good: return 1; nomatch: mboxlist_entry_free(&rock->mbentry); mbname_free(&rock->mbname); return 0; } static int find_cb(void *rockp, /* XXX - confirm these are the same? - nah */ const char *key __attribute__((unused)), size_t keylen __attribute__((unused)), const char *data __attribute__((unused)), size_t datalen __attribute__((unused))) { struct find_rock *rock = (struct find_rock *) rockp; char *testname = NULL; int r = 0; int i; if (rock->checkmboxlist && !rock->mbentry) { r = mboxlist_lookup(mbname_intname(rock->mbname), &rock->mbentry, NULL); if (r) { if (r == IMAP_MAILBOX_NONEXISTENT) r = 0; goto done; } } const char *extname = mbname_extname(rock->mbname, rock->namespace, rock->userid); testname = xstrndup(extname, rock->matchlen); struct findall_data fdata = { testname, rock->mb_category, rock->mbentry, NULL }; if (rock->singlepercent) { char sep = rock->namespace->hier_sep; char *p = testname; /* we need to try all the previous names in order */ while ((p = strchr(p, sep)) != NULL) { *p = '\0'; /* only if this expression could fully match */ int matchlen = 0; for (i = 0; i < rock->globs.count; i++) { glob *g = ptrarray_nth(&rock->globs, i); int thismatch = glob_test(g, testname); if (thismatch > matchlen) matchlen = thismatch; } if (matchlen == (int)strlen(testname)) { r = (*rock->proc)(&fdata, rock->procrock); if (r) goto done; } /* replace the separator for the next longest name */ *p++ = sep; } } /* mbname confirms that it's an exact match */ if (rock->matchlen == (int)strlen(extname)) fdata.mbname = rock->mbname; r = (*rock->proc)(&fdata, rock->procrock); done: free(testname); mboxlist_entry_free(&rock->mbentry); mbname_free(&rock->mbname); return r; } struct allmb_rock { struct mboxlist_entry *mbentry; int flags; mboxlist_cb *proc; void *rock; }; static int allmbox_cb(void *rock, const char *key, size_t keylen, const char *data, size_t datalen) { struct allmb_rock *mbrock = (struct allmb_rock *)rock; if (!mbrock->mbentry) { int r = mboxlist_parse_entry(&mbrock->mbentry, key, keylen, data, datalen); if (r) return r; } return mbrock->proc(mbrock->mbentry, mbrock->rock); } static int allmbox_p(void *rock, const char *key, size_t keylen, const char *data, size_t datalen) { struct allmb_rock *mbrock = (struct allmb_rock *)rock; int r; /* skip any dollar keys */ if (keylen && key[0] == '$') return 0; /* free previous record */ mboxlist_entry_free(&mbrock->mbentry); r = mboxlist_parse_entry(&mbrock->mbentry, key, keylen, data, datalen); if (r) return 0; if (!(mbrock->flags & MBOXTREE_TOMBSTONES) && (mbrock->mbentry->mbtype & MBTYPE_DELETED)) return 0; return 1; /* process this record */ } EXPORTED int mboxlist_allmbox(const char *prefix, mboxlist_cb *proc, void *rock, int incdel) { struct allmb_rock mbrock = { NULL, 0, proc, rock }; int r = 0; if (incdel) mbrock.flags |= MBOXTREE_TOMBSTONES; if (!prefix) prefix = ""; r = cyrusdb_foreach(mbdb, prefix, strlen(prefix), allmbox_p, allmbox_cb, &mbrock, 0); mboxlist_entry_free(&mbrock.mbentry); return r; } EXPORTED int mboxlist_mboxtree(const char *mboxname, mboxlist_cb *proc, void *rock, int flags) { struct allmb_rock mbrock = { NULL, flags, proc, rock }; int r = 0; if (!(flags & MBOXTREE_SKIP_ROOT)) { r = cyrusdb_forone(mbdb, mboxname, strlen(mboxname), allmbox_p, allmbox_cb, &mbrock, 0); if (r) goto done; } if (!(flags & MBOXTREE_SKIP_CHILDREN)) { char *prefix = strconcat(mboxname, ".", (char *)NULL); r = cyrusdb_foreach(mbdb, prefix, strlen(prefix), allmbox_p, allmbox_cb, &mbrock, 0); free(prefix); if (r) goto done; } if ((flags & MBOXTREE_DELETED)) { struct buf buf = BUF_INITIALIZER; const char *p = strchr(mboxname, '!'); const char *dp = config_getstring(IMAPOPT_DELETEDPREFIX); if (p) { buf_printf(&buf, "%.*s!%s.%s", (int)(p-mboxname), mboxname, dp, p+1); } else { buf_printf(&buf, "%s.%s", dp, mboxname); } const char *prefix = buf_cstring(&buf); r = cyrusdb_foreach(mbdb, prefix, strlen(prefix), allmbox_p, allmbox_cb, &mbrock, 0); buf_free(&buf); if (r) goto done; } done: mboxlist_entry_free(&mbrock.mbentry); return r; } static int racls_del_cb(void *rock, const char *key, size_t keylen, const char *data __attribute__((unused)), size_t datalen __attribute__((unused))) { struct txn **txn = (struct txn **)rock; return cyrusdb_delete(mbdb, key, keylen, txn, /*force*/0); } static int racls_add_cb(const mbentry_t *mbentry, void *rock) { struct txn **txn = (struct txn **)rock; return mboxlist_update_racl(mbentry->name, NULL, mbentry, txn); } EXPORTED int mboxlist_set_racls(int enabled) { struct txn *tid = NULL; int r = 0; int now = !cyrusdb_fetch(mbdb, "$RACL", 5, NULL, NULL, &tid); if (now && !enabled) { syslog(LOG_NOTICE, "removing reverse acl support"); /* remove */ r = cyrusdb_foreach(mbdb, "$RACL", 5, NULL, racls_del_cb, &tid, &tid); } else if (enabled && !now) { /* add */ struct allmb_rock mbrock = { NULL, 0, racls_add_cb, &tid }; /* we can't use mboxlist_allmbox because it doesn't do transactions */ syslog(LOG_NOTICE, "adding reverse acl support"); r = cyrusdb_foreach(mbdb, "", 0, allmbox_p, allmbox_cb, &mbrock, &tid); if (r) { syslog(LOG_ERR, "ERROR: failed to add reverse acl support %s", error_message(r)); } mboxlist_entry_free(&mbrock.mbentry); if (!r) r = cyrusdb_store(mbdb, "$RACL", 5, "", 0, &tid); } if (r) cyrusdb_abort(mbdb, tid); else cyrusdb_commit(mbdb, tid); return r; } struct alluser_rock { char *prev; user_cb *proc; void *rock; }; static int alluser_cb(const mbentry_t *mbentry, void *rock) { struct alluser_rock *urock = (struct alluser_rock *)rock; char *userid = mboxname_to_userid(mbentry->name); int r = 0; if (userid) { if (strcmpsafe(urock->prev, userid)) { r = urock->proc(userid, urock->rock); free(urock->prev); urock->prev = xstrdup(userid); } free(userid); } return r; } EXPORTED int mboxlist_alluser(user_cb *proc, void *rock) { struct alluser_rock urock; int r = 0; urock.prev = NULL; urock.proc = proc; urock.rock = rock; r = mboxlist_allmbox(NULL, alluser_cb, &urock, /*flags*/0); free(urock.prev); return r; } struct raclrock { int prefixlen; strarray_t *list; }; static int racl_cb(void *rock, const char *key, size_t keylen, const char *data __attribute__((unused)), size_t datalen __attribute__((unused))) { struct raclrock *raclrock = (struct raclrock *)rock; strarray_appendm(raclrock->list, xstrndup(key + raclrock->prefixlen, keylen - raclrock->prefixlen)); return 0; } EXPORTED int mboxlist_usermboxtree(const char *userid, mboxlist_cb *proc, void *rock, int flags) { char *inbox = mboxname_user_mbox(userid, 0); int r = mboxlist_mboxtree(inbox, proc, rock, flags); if (flags & MBOXTREE_PLUS_RACL) { struct allmb_rock mbrock = { NULL, flags, proc, rock }; /* we're using reverse ACLs */ struct buf buf = BUF_INITIALIZER; strarray_t matches = STRARRAY_INITIALIZER; /* user items */ mboxlist_racl_key(1, userid, NULL, &buf); /* this is the prefix */ struct raclrock raclrock = { buf.len, &matches }; /* we only need to look inside the prefix still, but we keep the length * in raclrock pointing to the start of the mboxname part of the key so * we get correct names in matches */ r = cyrusdb_foreach(mbdb, buf.s, buf.len, NULL, racl_cb, &raclrock, NULL); buf_reset(&buf); /* shared items */ mboxlist_racl_key(0, userid, NULL, &buf); raclrock.prefixlen = buf.len; if (!r) r = cyrusdb_foreach(mbdb, buf.s, buf.len, NULL, racl_cb, &raclrock, NULL); /* XXX - later we need to sort the array when we've added groups */ int i; for (i = 0; !r && i < strarray_size(&matches); i++) { const char *mboxname = strarray_nth(&matches, i); r = cyrusdb_forone(mbdb, mboxname, strlen(mboxname), allmbox_p, allmbox_cb, &mbrock, 0); } buf_free(&buf); strarray_fini(&matches); mboxlist_entry_free(&mbrock.mbentry); } free(inbox); return r; } static int mboxlist_find_category(struct find_rock *rock, const char *prefix, size_t len) { int r = 0; if (!rock->issubs && !rock->isadmin && !cyrusdb_fetch(rock->db, "$RACL", 5, NULL, NULL, NULL)) { /* we're using reverse ACLs */ struct buf buf = BUF_INITIALIZER; strarray_t matches = STRARRAY_INITIALIZER; mboxlist_racl_key(rock->mb_category == MBNAME_OTHERUSER, rock->userid, NULL, &buf); /* this is the prefix */ struct raclrock raclrock = { buf.len, &matches }; /* we only need to look inside the prefix still, but we keep the length * in raclrock pointing to the start of the mboxname part of the key so * we get correct names in matches */ if (len) buf_appendmap(&buf, prefix, len); r = cyrusdb_foreach(rock->db, buf.s, buf.len, NULL, racl_cb, &raclrock, NULL); /* XXX - later we need to sort the array when we've added groups */ int i; for (i = 0; !r && i < strarray_size(&matches); i++) { const char *key = strarray_nth(&matches, i); r = cyrusdb_forone(rock->db, key, strlen(key), &find_p, &find_cb, rock, NULL); } strarray_fini(&matches); } else { r = cyrusdb_foreach(rock->db, prefix, len, &find_p, &find_cb, rock, NULL); } if (r == CYRUSDB_DONE) r = 0; return r; } /* * Find all mailboxes that match 'pattern'. * 'isadmin' is nonzero if user is a mailbox admin. 'userid' * is the user's login id. For each matching mailbox, calls * 'proc' with the name of the mailbox. If 'proc' ever returns * a nonzero value, mboxlist_findall immediately stops searching * and returns that value. 'rock' is passed along as an argument to proc in * case it wants some persistant storage or extra data. */ /* Find all mailboxes that match 'pattern'. */ static int mboxlist_do_find(struct find_rock *rock, const strarray_t *patterns) { const char *userid = rock->userid; int isadmin = rock->isadmin; int crossdomains = config_getswitch(IMAPOPT_CROSSDOMAINS); char inbox[MAX_MAILBOX_BUFFER]; size_t inboxlen = 0; size_t prefixlen, len; size_t domainlen = 0; size_t userlen = userid ? strlen(userid) : 0; char domainpat[MAX_MAILBOX_BUFFER]; /* do intra-domain fetches only */ char commonpat[MAX_MAILBOX_BUFFER]; int r = 0; int i; const char *p; if (patterns->count < 1) return 0; /* nothing to do */ for (i = 0; i < patterns->count; i++) { glob *g = glob_init(strarray_nth(patterns, i), rock->namespace->hier_sep); ptrarray_append(&rock->globs, g); } if (config_virtdomains && userid && (p = strchr(userid, '@'))) { userlen = p - userid; domainlen = strlen(p); /* includes separator */ snprintf(domainpat, sizeof(domainpat), "%s!", p+1); } else domainpat[0] = '\0'; /* calculate the inbox (with trailing .INBOX. for later use) */ if (userid && (!(p = strchr(userid, rock->namespace->hier_sep)) || ((p - userid) > (int)userlen)) && strlen(userid)+7 < MAX_MAILBOX_BUFFER) { char *t, *tmpuser = NULL; const char *inboxuser; if (domainlen) snprintf(inbox, sizeof(inbox), "%s!", userid+userlen+1); if (rock->namespace->hier_sep == '/' && (p = strchr(userid, '.'))) { tmpuser = xmalloc(userlen); memcpy(tmpuser, userid, userlen); t = tmpuser + (p - userid); while(t < (tmpuser + userlen)) { if (*t == '.') *t = '^'; t++; } inboxuser = tmpuser; } else inboxuser = userid; snprintf(inbox+domainlen, sizeof(inbox)-domainlen, "user.%.*s.INBOX.", (int)userlen, inboxuser); free(tmpuser); inboxlen = strlen(inbox) - 7; } else { userid = 0; } /* Find the common search prefix of all patterns */ const char *firstpat = strarray_nth(patterns, 0); for (prefixlen = 0; firstpat[prefixlen]; prefixlen++) { if (prefixlen >= MAX_MAILBOX_NAME) { r = IMAP_MAILBOX_BADNAME; goto done; } char c = firstpat[prefixlen]; for (i = 1; i < patterns->count; i++) { const char *pat = strarray_nth(patterns, i); if (pat[prefixlen] != c) break; } if (i < patterns->count) break; if (c == '*' || c == '%' || c == '?') break; commonpat[prefixlen] = c; } commonpat[prefixlen] = '\0'; if (patterns->count == 1) { /* Skip pattern which matches shared namespace prefix */ if (!strcmp(firstpat+prefixlen, "%")) rock->singlepercent = 2; /* output prefix regardless */ if (!strcmp(firstpat+prefixlen, "*%")) rock->singlepercent = 1; } /* * Personal (INBOX) namespace (only if not admin) */ if (userid && !isadmin) { /* first the INBOX */ rock->mb_category = MBNAME_INBOX; r = cyrusdb_forone(rock->db, inbox, inboxlen, &find_p, &find_cb, rock, NULL); if (r == CYRUSDB_DONE) r = 0; if (r) goto done; if (rock->namespace->isalt) { /* do exact INBOX subs before resetting the namebuffer */ rock->mb_category = MBNAME_INBOXSUB; r = cyrusdb_foreach(rock->db, inbox, inboxlen+7, &find_p, &find_cb, rock, NULL); if (r == CYRUSDB_DONE) r = 0; if (r) goto done; /* reset the the namebuffer */ r = (*rock->proc)(NULL, rock->procrock); if (r) goto done; } /* iterate through all the mailboxes under the user's inbox */ rock->mb_category = MBNAME_OWNER; r = cyrusdb_foreach(rock->db, inbox, inboxlen+1, &find_p, &find_cb, rock, NULL); if (r == CYRUSDB_DONE) r = 0; if (r) goto done; /* "Alt Prefix" folders */ if (rock->namespace->isalt) { /* reset the the namebuffer */ r = (*rock->proc)(NULL, rock->procrock); if (r) goto done; rock->mb_category = MBNAME_ALTINBOX; /* special case user.foo.INBOX. If we're singlepercent == 2, this could return DONE, in which case we don't need to foreach the rest of the altprefix space */ r = cyrusdb_forone(rock->db, inbox, inboxlen+6, &find_p, &find_cb, rock, NULL); if (r == CYRUSDB_DONE) goto skipalt; if (r) goto done; /* special case any other altprefix stuff */ rock->mb_category = MBNAME_ALTPREFIX; r = cyrusdb_foreach(rock->db, inbox, inboxlen+1, &find_p, &find_cb, rock, NULL); skipalt: /* we got a done, so skip out of the foreach early */ if (r == CYRUSDB_DONE) r = 0; if (r) goto done; } } /* * Other Users namespace * * If "Other Users*" can match pattern, search for those mailboxes next */ if (isadmin || rock->namespace->accessible[NAMESPACE_USER]) { len = strlen(rock->namespace->prefix[NAMESPACE_USER]); if (len) len--; // trailing separator if (!strncmp(rock->namespace->prefix[NAMESPACE_USER], commonpat, MIN(len, prefixlen))) { if (prefixlen <= len) { /* we match all users */ strlcpy(domainpat+domainlen, "user.", sizeof(domainpat)-domainlen); } else { /* just those in this prefix */ strlcpy(domainpat+domainlen, "user.", sizeof(domainpat)-domainlen); strlcpy(domainpat+domainlen+5, commonpat+len+1, sizeof(domainpat)-domainlen-5); } rock->mb_category = MBNAME_OTHERUSER; /* because of how domains work, with crossdomains or admin you can't prefix at all :( */ size_t thislen = (isadmin || crossdomains) ? 0 : strlen(domainpat); /* reset the the namebuffer */ r = (*rock->proc)(NULL, rock->procrock); if (r) goto done; r = mboxlist_find_category(rock, domainpat, thislen); if (r) goto done; } } /* * Shared namespace * * search for all remaining mailboxes. * just bother looking at the ones that have the same pattern prefix. */ if (isadmin || rock->namespace->accessible[NAMESPACE_SHARED]) { len = strlen(rock->namespace->prefix[NAMESPACE_SHARED]); if (len) len--; // trailing separator if (!strncmp(rock->namespace->prefix[NAMESPACE_SHARED], commonpat, MIN(len, prefixlen))) { rock->mb_category = MBNAME_SHARED; /* reset the the namebuffer */ r = (*rock->proc)(NULL, rock->procrock); if (r) goto done; /* iterate through all the non-user folders on the server */ r = mboxlist_find_category(rock, domainpat, domainlen); if (r) goto done; } } /* finish with a reset call always */ r = (*rock->proc)(NULL, rock->procrock); done: for (i = 0; i < rock->globs.count; i++) { glob *g = ptrarray_nth(&rock->globs, i); glob_free(&g); } ptrarray_fini(&rock->globs); return r; } EXPORTED int mboxlist_findallmulti(struct namespace *namespace, const strarray_t *patterns, int isadmin, const char *userid, const struct auth_state *auth_state, findall_cb *proc, void *rock) { int r = 0; if (!namespace) namespace = mboxname_get_adminnamespace(); struct find_rock cbrock; memset(&cbrock, 0, sizeof(struct find_rock)); cbrock.auth_state = auth_state; cbrock.db = mbdb; cbrock.isadmin = isadmin; cbrock.namespace = namespace; cbrock.proc = proc; cbrock.procrock = rock; cbrock.userid = userid; if (userid) { const char *domp = strchr(userid, '@'); if (domp) cbrock.domain = domp + 1; } r = mboxlist_do_find(&cbrock, patterns); return r; } EXPORTED int mboxlist_findall(struct namespace *namespace, const char *pattern, int isadmin, const char *userid, const struct auth_state *auth_state, findall_cb *proc, void *rock) { strarray_t patterns = STRARRAY_INITIALIZER; strarray_append(&patterns, pattern); int r = mboxlist_findallmulti(namespace, &patterns, isadmin, userid, auth_state, proc, rock); strarray_fini(&patterns); return r; } EXPORTED int mboxlist_findone(struct namespace *namespace, const char *intname, int isadmin, const char *userid, const struct auth_state *auth_state, findall_cb *proc, void *rock) { int r = 0; if (!namespace) namespace = mboxname_get_adminnamespace(); struct find_rock cbrock; memset(&cbrock, 0, sizeof(struct find_rock)); cbrock.auth_state = auth_state; cbrock.db = mbdb; cbrock.isadmin = isadmin; cbrock.namespace = namespace; cbrock.proc = proc; cbrock.procrock = rock; cbrock.userid = userid; if (userid) { const char *domp = strchr(userid, '@'); if (domp) cbrock.domain = domp + 1; } mbname_t *mbname = mbname_from_intname(intname); glob *g = glob_init(mbname_extname(mbname, namespace, userid), namespace->hier_sep); ptrarray_append(&cbrock.globs, g); mbname_free(&mbname); r = cyrusdb_forone(cbrock.db, intname, strlen(intname), &find_p, &find_cb, &cbrock, NULL); glob_free(&g); ptrarray_fini(&cbrock.globs); return r; } static int exists_cb(const mbentry_t *mbentry __attribute__((unused)), void *rock) { int *exists = (int *)rock; *exists = 1; return CYRUSDB_DONE; /* one is enough */ } /* * Set all the resource quotas on, or create a quota root. */ EXPORTED int mboxlist_setquotas(const char *root, quota_t newquotas[QUOTA_NUMRESOURCES], int force) { struct quota q; int r; int res; struct txn *tid = NULL; struct mboxevent *mboxevents = NULL; struct mboxevent *quotachange_event = NULL; struct mboxevent *quotawithin_event = NULL; if (!root[0] || root[0] == '.' || strchr(root, '/') || strchr(root, '*') || strchr(root, '%') || strchr(root, '?')) { return IMAP_MAILBOX_BADNAME; } quota_init(&q, root); r = quota_read(&q, &tid, 1); if (!r) { int changed = 0; int underquota; /* has it changed? */ for (res = 0 ; res < QUOTA_NUMRESOURCES ; res++) { if (q.limits[res] != newquotas[res]) { underquota = 0; /* Prepare a QuotaChange event notification *now*. * * This is to ensure the QuotaChange is emitted before the * subsequent QuotaWithin (if the latter becomes applicable). */ if (quotachange_event == NULL) { quotachange_event = mboxevent_enqueue(EVENT_QUOTA_CHANGE, &mboxevents); } /* prepare a QuotaWithin event notification if now under quota */ if (quota_is_overquota(&q, res, NULL) && (!quota_is_overquota(&q, res, newquotas) || newquotas[res] == -1)) { if (quotawithin_event == NULL) quotawithin_event = mboxevent_enqueue(EVENT_QUOTA_WITHIN, &mboxevents); underquota++; } q.limits[res] = newquotas[res]; changed++; mboxevent_extract_quota(quotachange_event, &q, res); if (underquota) mboxevent_extract_quota(quotawithin_event, &q, res); } } if (changed) { r = quota_write(&q, &tid); if (quotachange_event == NULL) { quotachange_event = mboxevent_enqueue(EVENT_QUOTA_CHANGE, &mboxevents); } for (res = 0; res < QUOTA_NUMRESOURCES; res++) { mboxevent_extract_quota(quotachange_event, &q, res); } } if (!r) quota_commit(&tid); goto done; } if (r != IMAP_QUOTAROOT_NONEXISTENT) goto done; if (config_virtdomains && root[strlen(root)-1] == '!') { /* domain quota */ } else { mbentry_t *mbentry = NULL; /* look for a top-level mailbox in the proposed quotaroot */ r = mboxlist_lookup(root, &mbentry, NULL); if (r) { if (!force && r == IMAP_MAILBOX_NONEXISTENT) { mboxlist_mboxtree(root, exists_cb, &force, MBOXTREE_SKIP_ROOT); } /* are we going to force the create anyway? */ if (force) { r = 0; } } else if (mbentry->mbtype & (MBTYPE_REMOTE | MBTYPE_MOVING)) { /* Can't set quota on a remote mailbox */ r = IMAP_MAILBOX_NOTSUPPORTED; } mboxlist_entry_free(&mbentry); if (r) goto done; } /* safe against quota -f and other root change races */ r = quota_changelock(); if (r) goto done; /* initialise the quota */ memcpy(q.limits, newquotas, sizeof(q.limits)); r = quota_write(&q, &tid); if (r) goto done; /* prepare a QuotaChange event notification */ if (quotachange_event == NULL) quotachange_event = mboxevent_enqueue(EVENT_QUOTA_CHANGE, &mboxevents); for (res = 0; res < QUOTA_NUMRESOURCES; res++) { mboxevent_extract_quota(quotachange_event, &q, res); } quota_commit(&tid); /* recurse through mailboxes, setting the quota and finding * out the usage */ mboxlist_mboxtree(root, mboxlist_changequota, (void *)root, 0); quota_changelockrelease(); done: quota_free(&q); if (r && tid) quota_abort(&tid); if (!r) { sync_log_quota(root); /* send QuotaChange and QuotaWithin event notifications */ mboxevent_notify(&mboxevents); } mboxevent_freequeue(&mboxevents); return r; } /* * Remove a quota root */ EXPORTED int mboxlist_unsetquota(const char *root) { struct quota q; int r=0; if (!root[0] || root[0] == '.' || strchr(root, '/') || strchr(root, '*') || strchr(root, '%') || strchr(root, '?')) { return IMAP_MAILBOX_BADNAME; } quota_init(&q, root); r = quota_read(&q, NULL, 0); /* already unset */ if (r == IMAP_QUOTAROOT_NONEXISTENT) { r = 0; goto done; } if (r) goto done; r = quota_changelock(); /* * Have to remove it from all affected mailboxes */ mboxlist_mboxtree(root, mboxlist_rmquota, (void *)root, /*flags*/0); r = quota_deleteroot(root); quota_changelockrelease(); if (!r) sync_log_quota(root); done: quota_free(&q); return r; } EXPORTED modseq_t mboxlist_foldermodseq_dirty(struct mailbox *mailbox) { mbentry_t *mbentry = NULL; modseq_t ret = 0; if (mboxlist_mylookup(mailbox->name, &mbentry, NULL, 0)) return 0; ret = mbentry->foldermodseq = mailbox_modseq_dirty(mailbox); mboxlist_update(mbentry, 0); mboxlist_entry_free(&mbentry); return ret; } /* * ACL access canonicalization routine which ensures that 'owner' * retains lookup, administer, and create rights over a mailbox. */ EXPORTED int mboxlist_ensureOwnerRights(void *rock, const char *identifier, int myrights) { char *owner = (char *)rock; if (strcmp(identifier, owner) != 0) return myrights; return myrights|config_implicitrights; } /* * Helper function to remove the quota root for 'name' */ static int mboxlist_rmquota(const mbentry_t *mbentry, void *rock) { int r = 0; struct mailbox *mailbox = NULL; const char *oldroot = (const char *) rock; assert(oldroot != NULL); r = mailbox_open_iwl(mbentry->name, &mailbox); if (r) goto done; if (mailbox->quotaroot) { if (strcmp(mailbox->quotaroot, oldroot)) { /* Part of a different quota root */ goto done; } r = mailbox_set_quotaroot(mailbox, NULL); } done: mailbox_close(&mailbox); if (r) { syslog(LOG_ERR, "LOSTQUOTA: unable to remove quota root %s for %s: %s", oldroot, mbentry->name, error_message(r)); } /* not a huge tragedy if we failed, so always return success */ return 0; } /* * Helper function to change the quota root for 'name' to that pointed * to by the static global struct pointer 'mboxlist_newquota'. */ static int mboxlist_changequota(const mbentry_t *mbentry, void *rock) { int r = 0; struct mailbox *mailbox = NULL; const char *root = (const char *) rock; int res; quota_t quota_usage[QUOTA_NUMRESOURCES]; assert(root); r = mailbox_open_iwl(mbentry->name, &mailbox); if (r) goto done; mailbox_get_usage(mailbox, quota_usage); if (mailbox->quotaroot) { quota_t quota_diff[QUOTA_NUMRESOURCES]; if (strlen(mailbox->quotaroot) >= strlen(root)) { /* Part of a child quota root - skip */ goto done; } /* remove usage from the old quotaroot */ for (res = 0; res < QUOTA_NUMRESOURCES ; res++) { quota_diff[res] = -quota_usage[res]; } r = quota_update_useds(mailbox->quotaroot, quota_diff, mailbox->name); } /* update (or set) the quotaroot */ r = mailbox_set_quotaroot(mailbox, root); if (r) goto done; /* update the new quota root */ r = quota_update_useds(root, quota_usage, mailbox->name); done: mailbox_close(&mailbox); if (r) { syslog(LOG_ERR, "LOSTQUOTA: unable to change quota root for %s to %s: %s", mbentry->name, root, error_message(r)); } /* Note, we're a callback, and it's not a huge tragedy if we * fail, so we don't ever return a failure */ return 0; } /* must be called after cyrus_init */ EXPORTED void mboxlist_init(int myflags) { if (myflags & MBOXLIST_SYNC) { cyrusdb_sync(DB); } } EXPORTED void mboxlist_open(const char *fname) { int ret, flags; char *tofree = NULL; if (!fname) fname = config_getstring(IMAPOPT_MBOXLIST_DB_PATH); /* create db file name */ if (!fname) { tofree = strconcat(config_dir, FNAME_MBOXLIST, (char *)NULL); fname = tofree; } flags = CYRUSDB_CREATE; if (config_getswitch(IMAPOPT_IMPROVED_MBOXLIST_SORT)) { flags |= CYRUSDB_MBOXSORT; } ret = cyrusdb_open(DB, fname, flags, &mbdb); if (ret != 0) { syslog(LOG_ERR, "DBERROR: opening %s: %s", fname, cyrusdb_strerror(ret)); /* Exiting TEMPFAIL because Sendmail thinks this EC_OSFILE == permanent failure. */ fatal("can't read mailboxes file", EC_TEMPFAIL); } free(tofree); mboxlist_dbopen = 1; } EXPORTED void mboxlist_close(void) { int r; if (mboxlist_dbopen) { r = cyrusdb_close(mbdb); if (r) { syslog(LOG_ERR, "DBERROR: error closing mailboxes: %s", cyrusdb_strerror(r)); } mboxlist_dbopen = 0; } } EXPORTED void mboxlist_done(void) { /* DB->done() handled by cyrus_done() */ } /* * Open the subscription list for 'userid'. * * On success, returns zero. * On failure, returns an error code. */ static int mboxlist_opensubs(const char *userid, struct db **ret) { int r = 0, flags; char *subsfname; /* Build subscription list filename */ subsfname = user_hash_subs(userid); flags = CYRUSDB_CREATE; if (config_getswitch(IMAPOPT_IMPROVED_MBOXLIST_SORT)) { flags |= CYRUSDB_MBOXSORT; } r = cyrusdb_open(SUBDB, subsfname, flags, ret); if (r != CYRUSDB_OK) { r = IMAP_IOERROR; } free(subsfname); return r; } /* * Close a subscription file */ static void mboxlist_closesubs(struct db *sub) { cyrusdb_close(sub); } /* * Find subscribed mailboxes that match 'pattern'. * 'isadmin' is nonzero if user is a mailbox admin. 'userid' * is the user's login id. For each matching mailbox, calls * 'proc' with the name of the mailbox. */ EXPORTED int mboxlist_findsubmulti(struct namespace *namespace, const strarray_t *patterns, int isadmin, const char *userid, const struct auth_state *auth_state, findall_cb *proc, void *rock, int force) { int r = 0; if (!namespace) namespace = mboxname_get_adminnamespace(); struct find_rock cbrock; memset(&cbrock, 0, sizeof(struct find_rock)); /* open the subscription file that contains the mailboxes the user is subscribed to */ struct db *subs = NULL; r = mboxlist_opensubs(userid, &subs); if (r) return r; cbrock.auth_state = auth_state; cbrock.checkmboxlist = !force; cbrock.db = subs; cbrock.isadmin = isadmin; cbrock.issubs = 1; cbrock.namespace = namespace; cbrock.proc = proc; cbrock.procrock = rock; cbrock.userid = userid; if (userid) { const char *domp = strchr(userid, '@'); if (domp) cbrock.domain = domp + 1; } r = mboxlist_do_find(&cbrock, patterns); mboxlist_closesubs(subs); return r; } EXPORTED int mboxlist_findsub(struct namespace *namespace, const char *pattern, int isadmin, const char *userid, const struct auth_state *auth_state, findall_cb *proc, void *rock, int force) { strarray_t patterns = STRARRAY_INITIALIZER; strarray_append(&patterns, pattern); int r = mboxlist_findsubmulti(namespace, &patterns, isadmin, userid, auth_state, proc, rock, force); strarray_fini(&patterns); return r; } static int subsadd_cb(void *rock, const char *key, size_t keylen, const char *val __attribute__((unused)), size_t vallen __attribute__((unused))) { strarray_t *list = (strarray_t *)rock; strarray_appendm(list, xstrndup(key, keylen)); return 0; } EXPORTED strarray_t *mboxlist_sublist(const char *userid) { struct db *subs = NULL; strarray_t *list = strarray_new(); int r; /* open subs DB */ r = mboxlist_opensubs(userid, &subs); if (r) goto done; /* faster to do it all in a single slurp! */ r = cyrusdb_foreach(subs, "", 0, subsadd_cb, NULL, list, 0); mboxlist_closesubs(subs); done: if (r) { strarray_free(list); return NULL; } return list; } struct submb_rock { struct mboxlist_entry *mbentry; const char *userid; int flags; mboxlist_cb *proc; void *rock; }; static int usersubs_cb(void *rock, const char *key, size_t keylen, const char *data __attribute__((unused)), size_t datalen __attribute__((unused))) { struct submb_rock *mbrock = (struct submb_rock *) rock; char mboxname[MAX_MAILBOX_NAME+1]; int r; /* free previous record */ mboxlist_entry_free(&mbrock->mbentry); snprintf(mboxname, MAX_MAILBOX_NAME, "%.*s", (int) keylen, key); if ((mbrock->flags & MBOXTREE_SKIP_PERSONAL) && mboxname_userownsmailbox(mbrock->userid, mboxname)) return 0; r = mboxlist_lookup(mboxname, &mbrock->mbentry, NULL); if (r) { syslog(LOG_INFO, "mboxlist_lookup(%s) failed: %s", mboxname, error_message(r)); return r; } return mbrock->proc(mbrock->mbentry, mbrock->rock); } EXPORTED int mboxlist_usersubs(const char *userid, mboxlist_cb *proc, void *rock, int flags) { struct db *subs = NULL; struct submb_rock mbrock = { NULL, userid, flags, proc, rock }; int r = 0; /* open subs DB */ r = mboxlist_opensubs(userid, &subs); if (r) return r; /* faster to do it all in a single slurp! */ r = cyrusdb_foreach(subs, "", 0, NULL, usersubs_cb, &mbrock, 0); mboxlist_entry_free(&mbrock.mbentry); mboxlist_closesubs(subs); return r; } /* returns CYRUSDB_NOTFOUND if the folder doesn't exist, and 0 if it does! */ EXPORTED int mboxlist_checksub(const char *name, const char *userid) { int r; struct db *subs; const char *val; size_t vallen; r = mboxlist_opensubs(userid, &subs); if (!r) r = cyrusdb_fetch(subs, name, strlen(name), &val, &vallen, NULL); mboxlist_closesubs(subs); return r; } /* * Change 'user's subscription status for mailbox 'name'. * Subscribes if 'add' is nonzero, unsubscribes otherwise. * if 'force' is set, force the subscription through even if * we don't know about 'name'. */ EXPORTED int mboxlist_changesub(const char *name, const char *userid, const struct auth_state *auth_state, int add, int force, int notify) { mbentry_t *mbentry = NULL; int r; struct db *subs; struct mboxevent *mboxevent; if ((r = mboxlist_opensubs(userid, &subs)) != 0) { return r; } if (add && !force) { /* Ensure mailbox exists and can be seen by user */ if ((r = mboxlist_lookup(name, &mbentry, NULL))!=0) { mboxlist_closesubs(subs); return r; } if ((cyrus_acl_myrights(auth_state, mbentry->acl) & ACL_LOOKUP) == 0) { mboxlist_closesubs(subs); mboxlist_entry_free(&mbentry); return IMAP_MAILBOX_NONEXISTENT; } } if (add) { r = cyrusdb_store(subs, name, strlen(name), "", 0, NULL); } else { r = cyrusdb_delete(subs, name, strlen(name), NULL, 0); /* if it didn't exist, that's ok */ if (r == CYRUSDB_EXISTS) r = CYRUSDB_OK; } switch (r) { case CYRUSDB_OK: r = 0; break; default: r = IMAP_IOERROR; break; } sync_log_subscribe(userid, name); mboxlist_closesubs(subs); mboxlist_entry_free(&mbentry); /* prepare a MailboxSubscribe or MailboxUnSubscribe event notification */ if (notify && r == 0) { mboxevent = mboxevent_new(add ? EVENT_MAILBOX_SUBSCRIBE : EVENT_MAILBOX_UNSUBSCRIBE); mboxevent_set_access(mboxevent, NULL, NULL, userid, name, 1); mboxevent_notify(&mboxevent); mboxevent_free(&mboxevent); } return r; } /* Transaction Handlers */ EXPORTED int mboxlist_commit(struct txn *tid) { assert(tid); return cyrusdb_commit(mbdb, tid); } int mboxlist_abort(struct txn *tid) { assert(tid); return cyrusdb_abort(mbdb, tid); } EXPORTED int mboxlist_delayed_delete_isenabled(void) { enum enum_value config_delete_mode = config_getenum(IMAPOPT_DELETE_MODE); return(config_delete_mode == IMAP_ENUM_DELETE_MODE_DELAYED); }
./CrossVul/dataset_final_sorted/CWE-20/c/good_2789_0
crossvul-cpp_data_good_5600_0
/* * Copyright (c) 2000-2006 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include <linux/stddef.h> #include <linux/errno.h> #include <linux/gfp.h> #include <linux/pagemap.h> #include <linux/init.h> #include <linux/vmalloc.h> #include <linux/bio.h> #include <linux/sysctl.h> #include <linux/proc_fs.h> #include <linux/workqueue.h> #include <linux/percpu.h> #include <linux/blkdev.h> #include <linux/hash.h> #include <linux/kthread.h> #include <linux/migrate.h> #include <linux/backing-dev.h> #include <linux/freezer.h> #include "xfs_sb.h" #include "xfs_log.h" #include "xfs_ag.h" #include "xfs_mount.h" #include "xfs_trace.h" static kmem_zone_t *xfs_buf_zone; static struct workqueue_struct *xfslogd_workqueue; #ifdef XFS_BUF_LOCK_TRACKING # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid) # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1) # define XB_GET_OWNER(bp) ((bp)->b_last_holder) #else # define XB_SET_OWNER(bp) do { } while (0) # define XB_CLEAR_OWNER(bp) do { } while (0) # define XB_GET_OWNER(bp) do { } while (0) #endif #define xb_to_gfp(flags) \ ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN) static inline int xfs_buf_is_vmapped( struct xfs_buf *bp) { /* * Return true if the buffer is vmapped. * * b_addr is null if the buffer is not mapped, but the code is clever * enough to know it doesn't have to map a single page, so the check has * to be both for b_addr and bp->b_page_count > 1. */ return bp->b_addr && bp->b_page_count > 1; } static inline int xfs_buf_vmap_len( struct xfs_buf *bp) { return (bp->b_page_count * PAGE_SIZE) - bp->b_offset; } /* * xfs_buf_lru_add - add a buffer to the LRU. * * The LRU takes a new reference to the buffer so that it will only be freed * once the shrinker takes the buffer off the LRU. */ STATIC void xfs_buf_lru_add( struct xfs_buf *bp) { struct xfs_buftarg *btp = bp->b_target; spin_lock(&btp->bt_lru_lock); if (list_empty(&bp->b_lru)) { atomic_inc(&bp->b_hold); list_add_tail(&bp->b_lru, &btp->bt_lru); btp->bt_lru_nr++; bp->b_lru_flags &= ~_XBF_LRU_DISPOSE; } spin_unlock(&btp->bt_lru_lock); } /* * xfs_buf_lru_del - remove a buffer from the LRU * * The unlocked check is safe here because it only occurs when there are not * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there * to optimise the shrinker removing the buffer from the LRU and calling * xfs_buf_free(). i.e. it removes an unnecessary round trip on the * bt_lru_lock. */ STATIC void xfs_buf_lru_del( struct xfs_buf *bp) { struct xfs_buftarg *btp = bp->b_target; if (list_empty(&bp->b_lru)) return; spin_lock(&btp->bt_lru_lock); if (!list_empty(&bp->b_lru)) { list_del_init(&bp->b_lru); btp->bt_lru_nr--; } spin_unlock(&btp->bt_lru_lock); } /* * When we mark a buffer stale, we remove the buffer from the LRU and clear the * b_lru_ref count so that the buffer is freed immediately when the buffer * reference count falls to zero. If the buffer is already on the LRU, we need * to remove the reference that LRU holds on the buffer. * * This prevents build-up of stale buffers on the LRU. */ void xfs_buf_stale( struct xfs_buf *bp) { ASSERT(xfs_buf_islocked(bp)); bp->b_flags |= XBF_STALE; /* * Clear the delwri status so that a delwri queue walker will not * flush this buffer to disk now that it is stale. The delwri queue has * a reference to the buffer, so this is safe to do. */ bp->b_flags &= ~_XBF_DELWRI_Q; atomic_set(&(bp)->b_lru_ref, 0); if (!list_empty(&bp->b_lru)) { struct xfs_buftarg *btp = bp->b_target; spin_lock(&btp->bt_lru_lock); if (!list_empty(&bp->b_lru) && !(bp->b_lru_flags & _XBF_LRU_DISPOSE)) { list_del_init(&bp->b_lru); btp->bt_lru_nr--; atomic_dec(&bp->b_hold); } spin_unlock(&btp->bt_lru_lock); } ASSERT(atomic_read(&bp->b_hold) >= 1); } static int xfs_buf_get_maps( struct xfs_buf *bp, int map_count) { ASSERT(bp->b_maps == NULL); bp->b_map_count = map_count; if (map_count == 1) { bp->b_maps = &bp->__b_map; return 0; } bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map), KM_NOFS); if (!bp->b_maps) return ENOMEM; return 0; } /* * Frees b_pages if it was allocated. */ static void xfs_buf_free_maps( struct xfs_buf *bp) { if (bp->b_maps != &bp->__b_map) { kmem_free(bp->b_maps); bp->b_maps = NULL; } } struct xfs_buf * _xfs_buf_alloc( struct xfs_buftarg *target, struct xfs_buf_map *map, int nmaps, xfs_buf_flags_t flags) { struct xfs_buf *bp; int error; int i; bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS); if (unlikely(!bp)) return NULL; /* * We don't want certain flags to appear in b_flags unless they are * specifically set by later operations on the buffer. */ flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD); atomic_set(&bp->b_hold, 1); atomic_set(&bp->b_lru_ref, 1); init_completion(&bp->b_iowait); INIT_LIST_HEAD(&bp->b_lru); INIT_LIST_HEAD(&bp->b_list); RB_CLEAR_NODE(&bp->b_rbnode); sema_init(&bp->b_sema, 0); /* held, no waiters */ XB_SET_OWNER(bp); bp->b_target = target; bp->b_flags = flags; /* * Set length and io_length to the same value initially. * I/O routines should use io_length, which will be the same in * most cases but may be reset (e.g. XFS recovery). */ error = xfs_buf_get_maps(bp, nmaps); if (error) { kmem_zone_free(xfs_buf_zone, bp); return NULL; } bp->b_bn = map[0].bm_bn; bp->b_length = 0; for (i = 0; i < nmaps; i++) { bp->b_maps[i].bm_bn = map[i].bm_bn; bp->b_maps[i].bm_len = map[i].bm_len; bp->b_length += map[i].bm_len; } bp->b_io_length = bp->b_length; atomic_set(&bp->b_pin_count, 0); init_waitqueue_head(&bp->b_waiters); XFS_STATS_INC(xb_create); trace_xfs_buf_init(bp, _RET_IP_); return bp; } /* * Allocate a page array capable of holding a specified number * of pages, and point the page buf at it. */ STATIC int _xfs_buf_get_pages( xfs_buf_t *bp, int page_count, xfs_buf_flags_t flags) { /* Make sure that we have a page list */ if (bp->b_pages == NULL) { bp->b_page_count = page_count; if (page_count <= XB_PAGES) { bp->b_pages = bp->b_page_array; } else { bp->b_pages = kmem_alloc(sizeof(struct page *) * page_count, KM_NOFS); if (bp->b_pages == NULL) return -ENOMEM; } memset(bp->b_pages, 0, sizeof(struct page *) * page_count); } return 0; } /* * Frees b_pages if it was allocated. */ STATIC void _xfs_buf_free_pages( xfs_buf_t *bp) { if (bp->b_pages != bp->b_page_array) { kmem_free(bp->b_pages); bp->b_pages = NULL; } } /* * Releases the specified buffer. * * The modification state of any associated pages is left unchanged. * The buffer most not be on any hash - use xfs_buf_rele instead for * hashed and refcounted buffers */ void xfs_buf_free( xfs_buf_t *bp) { trace_xfs_buf_free(bp, _RET_IP_); ASSERT(list_empty(&bp->b_lru)); if (bp->b_flags & _XBF_PAGES) { uint i; if (xfs_buf_is_vmapped(bp)) vm_unmap_ram(bp->b_addr - bp->b_offset, bp->b_page_count); for (i = 0; i < bp->b_page_count; i++) { struct page *page = bp->b_pages[i]; __free_page(page); } } else if (bp->b_flags & _XBF_KMEM) kmem_free(bp->b_addr); _xfs_buf_free_pages(bp); xfs_buf_free_maps(bp); kmem_zone_free(xfs_buf_zone, bp); } /* * Allocates all the pages for buffer in question and builds it's page list. */ STATIC int xfs_buf_allocate_memory( xfs_buf_t *bp, uint flags) { size_t size; size_t nbytes, offset; gfp_t gfp_mask = xb_to_gfp(flags); unsigned short page_count, i; xfs_off_t start, end; int error; /* * for buffers that are contained within a single page, just allocate * the memory from the heap - there's no need for the complexity of * page arrays to keep allocation down to order 0. */ size = BBTOB(bp->b_length); if (size < PAGE_SIZE) { bp->b_addr = kmem_alloc(size, KM_NOFS); if (!bp->b_addr) { /* low memory - use alloc_page loop instead */ goto use_alloc_page; } if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) != ((unsigned long)bp->b_addr & PAGE_MASK)) { /* b_addr spans two pages - use alloc_page instead */ kmem_free(bp->b_addr); bp->b_addr = NULL; goto use_alloc_page; } bp->b_offset = offset_in_page(bp->b_addr); bp->b_pages = bp->b_page_array; bp->b_pages[0] = virt_to_page(bp->b_addr); bp->b_page_count = 1; bp->b_flags |= _XBF_KMEM; return 0; } use_alloc_page: start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT; end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1) >> PAGE_SHIFT; page_count = end - start; error = _xfs_buf_get_pages(bp, page_count, flags); if (unlikely(error)) return error; offset = bp->b_offset; bp->b_flags |= _XBF_PAGES; for (i = 0; i < bp->b_page_count; i++) { struct page *page; uint retries = 0; retry: page = alloc_page(gfp_mask); if (unlikely(page == NULL)) { if (flags & XBF_READ_AHEAD) { bp->b_page_count = i; error = ENOMEM; goto out_free_pages; } /* * This could deadlock. * * But until all the XFS lowlevel code is revamped to * handle buffer allocation failures we can't do much. */ if (!(++retries % 100)) xfs_err(NULL, "possible memory allocation deadlock in %s (mode:0x%x)", __func__, gfp_mask); XFS_STATS_INC(xb_page_retries); congestion_wait(BLK_RW_ASYNC, HZ/50); goto retry; } XFS_STATS_INC(xb_page_found); nbytes = min_t(size_t, size, PAGE_SIZE - offset); size -= nbytes; bp->b_pages[i] = page; offset = 0; } return 0; out_free_pages: for (i = 0; i < bp->b_page_count; i++) __free_page(bp->b_pages[i]); return error; } /* * Map buffer into kernel address-space if necessary. */ STATIC int _xfs_buf_map_pages( xfs_buf_t *bp, uint flags) { ASSERT(bp->b_flags & _XBF_PAGES); if (bp->b_page_count == 1) { /* A single page buffer is always mappable */ bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; } else if (flags & XBF_UNMAPPED) { bp->b_addr = NULL; } else { int retried = 0; do { bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count, -1, PAGE_KERNEL); if (bp->b_addr) break; vm_unmap_aliases(); } while (retried++ <= 1); if (!bp->b_addr) return -ENOMEM; bp->b_addr += bp->b_offset; } return 0; } /* * Finding and Reading Buffers */ /* * Look up, and creates if absent, a lockable buffer for * a given range of an inode. The buffer is returned * locked. No I/O is implied by this call. */ xfs_buf_t * _xfs_buf_find( struct xfs_buftarg *btp, struct xfs_buf_map *map, int nmaps, xfs_buf_flags_t flags, xfs_buf_t *new_bp) { size_t numbytes; struct xfs_perag *pag; struct rb_node **rbp; struct rb_node *parent; xfs_buf_t *bp; xfs_daddr_t blkno = map[0].bm_bn; xfs_daddr_t eofs; int numblks = 0; int i; for (i = 0; i < nmaps; i++) numblks += map[i].bm_len; numbytes = BBTOB(numblks); /* Check for IOs smaller than the sector size / not sector aligned */ ASSERT(!(numbytes < (1 << btp->bt_sshift))); ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_smask)); /* * Corrupted block numbers can get through to here, unfortunately, so we * have to check that the buffer falls within the filesystem bounds. */ eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks); if (blkno >= eofs) { /* * XXX (dgc): we should really be returning EFSCORRUPTED here, * but none of the higher level infrastructure supports * returning a specific error on buffer lookup failures. */ xfs_alert(btp->bt_mount, "%s: Block out of range: block 0x%llx, EOFS 0x%llx ", __func__, blkno, eofs); return NULL; } /* get tree root */ pag = xfs_perag_get(btp->bt_mount, xfs_daddr_to_agno(btp->bt_mount, blkno)); /* walk tree */ spin_lock(&pag->pag_buf_lock); rbp = &pag->pag_buf_tree.rb_node; parent = NULL; bp = NULL; while (*rbp) { parent = *rbp; bp = rb_entry(parent, struct xfs_buf, b_rbnode); if (blkno < bp->b_bn) rbp = &(*rbp)->rb_left; else if (blkno > bp->b_bn) rbp = &(*rbp)->rb_right; else { /* * found a block number match. If the range doesn't * match, the only way this is allowed is if the buffer * in the cache is stale and the transaction that made * it stale has not yet committed. i.e. we are * reallocating a busy extent. Skip this buffer and * continue searching to the right for an exact match. */ if (bp->b_length != numblks) { ASSERT(bp->b_flags & XBF_STALE); rbp = &(*rbp)->rb_right; continue; } atomic_inc(&bp->b_hold); goto found; } } /* No match found */ if (new_bp) { rb_link_node(&new_bp->b_rbnode, parent, rbp); rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree); /* the buffer keeps the perag reference until it is freed */ new_bp->b_pag = pag; spin_unlock(&pag->pag_buf_lock); } else { XFS_STATS_INC(xb_miss_locked); spin_unlock(&pag->pag_buf_lock); xfs_perag_put(pag); } return new_bp; found: spin_unlock(&pag->pag_buf_lock); xfs_perag_put(pag); if (!xfs_buf_trylock(bp)) { if (flags & XBF_TRYLOCK) { xfs_buf_rele(bp); XFS_STATS_INC(xb_busy_locked); return NULL; } xfs_buf_lock(bp); XFS_STATS_INC(xb_get_locked_waited); } /* * if the buffer is stale, clear all the external state associated with * it. We need to keep flags such as how we allocated the buffer memory * intact here. */ if (bp->b_flags & XBF_STALE) { ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); ASSERT(bp->b_iodone == NULL); bp->b_flags &= _XBF_KMEM | _XBF_PAGES; bp->b_ops = NULL; } trace_xfs_buf_find(bp, flags, _RET_IP_); XFS_STATS_INC(xb_get_locked); return bp; } /* * Assembles a buffer covering the specified range. The code is optimised for * cache hits, as metadata intensive workloads will see 3 orders of magnitude * more hits than misses. */ struct xfs_buf * xfs_buf_get_map( struct xfs_buftarg *target, struct xfs_buf_map *map, int nmaps, xfs_buf_flags_t flags) { struct xfs_buf *bp; struct xfs_buf *new_bp; int error = 0; bp = _xfs_buf_find(target, map, nmaps, flags, NULL); if (likely(bp)) goto found; new_bp = _xfs_buf_alloc(target, map, nmaps, flags); if (unlikely(!new_bp)) return NULL; error = xfs_buf_allocate_memory(new_bp, flags); if (error) { xfs_buf_free(new_bp); return NULL; } bp = _xfs_buf_find(target, map, nmaps, flags, new_bp); if (!bp) { xfs_buf_free(new_bp); return NULL; } if (bp != new_bp) xfs_buf_free(new_bp); found: if (!bp->b_addr) { error = _xfs_buf_map_pages(bp, flags); if (unlikely(error)) { xfs_warn(target->bt_mount, "%s: failed to map pages\n", __func__); xfs_buf_relse(bp); return NULL; } } XFS_STATS_INC(xb_get); trace_xfs_buf_get(bp, flags, _RET_IP_); return bp; } STATIC int _xfs_buf_read( xfs_buf_t *bp, xfs_buf_flags_t flags) { ASSERT(!(flags & XBF_WRITE)); ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL); bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD); bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD); xfs_buf_iorequest(bp); if (flags & XBF_ASYNC) return 0; return xfs_buf_iowait(bp); } xfs_buf_t * xfs_buf_read_map( struct xfs_buftarg *target, struct xfs_buf_map *map, int nmaps, xfs_buf_flags_t flags, const struct xfs_buf_ops *ops) { struct xfs_buf *bp; flags |= XBF_READ; bp = xfs_buf_get_map(target, map, nmaps, flags); if (bp) { trace_xfs_buf_read(bp, flags, _RET_IP_); if (!XFS_BUF_ISDONE(bp)) { XFS_STATS_INC(xb_get_read); bp->b_ops = ops; _xfs_buf_read(bp, flags); } else if (flags & XBF_ASYNC) { /* * Read ahead call which is already satisfied, * drop the buffer */ xfs_buf_relse(bp); return NULL; } else { /* We do not want read in the flags */ bp->b_flags &= ~XBF_READ; } } return bp; } /* * If we are not low on memory then do the readahead in a deadlock * safe manner. */ void xfs_buf_readahead_map( struct xfs_buftarg *target, struct xfs_buf_map *map, int nmaps, const struct xfs_buf_ops *ops) { if (bdi_read_congested(target->bt_bdi)) return; xfs_buf_read_map(target, map, nmaps, XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops); } /* * Read an uncached buffer from disk. Allocates and returns a locked * buffer containing the disk contents or nothing. */ struct xfs_buf * xfs_buf_read_uncached( struct xfs_buftarg *target, xfs_daddr_t daddr, size_t numblks, int flags, const struct xfs_buf_ops *ops) { struct xfs_buf *bp; bp = xfs_buf_get_uncached(target, numblks, flags); if (!bp) return NULL; /* set up the buffer for a read IO */ ASSERT(bp->b_map_count == 1); bp->b_bn = daddr; bp->b_maps[0].bm_bn = daddr; bp->b_flags |= XBF_READ; bp->b_ops = ops; xfsbdstrat(target->bt_mount, bp); xfs_buf_iowait(bp); return bp; } /* * Return a buffer allocated as an empty buffer and associated to external * memory via xfs_buf_associate_memory() back to it's empty state. */ void xfs_buf_set_empty( struct xfs_buf *bp, size_t numblks) { if (bp->b_pages) _xfs_buf_free_pages(bp); bp->b_pages = NULL; bp->b_page_count = 0; bp->b_addr = NULL; bp->b_length = numblks; bp->b_io_length = numblks; ASSERT(bp->b_map_count == 1); bp->b_bn = XFS_BUF_DADDR_NULL; bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL; bp->b_maps[0].bm_len = bp->b_length; } static inline struct page * mem_to_page( void *addr) { if ((!is_vmalloc_addr(addr))) { return virt_to_page(addr); } else { return vmalloc_to_page(addr); } } int xfs_buf_associate_memory( xfs_buf_t *bp, void *mem, size_t len) { int rval; int i = 0; unsigned long pageaddr; unsigned long offset; size_t buflen; int page_count; pageaddr = (unsigned long)mem & PAGE_MASK; offset = (unsigned long)mem - pageaddr; buflen = PAGE_ALIGN(len + offset); page_count = buflen >> PAGE_SHIFT; /* Free any previous set of page pointers */ if (bp->b_pages) _xfs_buf_free_pages(bp); bp->b_pages = NULL; bp->b_addr = mem; rval = _xfs_buf_get_pages(bp, page_count, 0); if (rval) return rval; bp->b_offset = offset; for (i = 0; i < bp->b_page_count; i++) { bp->b_pages[i] = mem_to_page((void *)pageaddr); pageaddr += PAGE_SIZE; } bp->b_io_length = BTOBB(len); bp->b_length = BTOBB(buflen); return 0; } xfs_buf_t * xfs_buf_get_uncached( struct xfs_buftarg *target, size_t numblks, int flags) { unsigned long page_count; int error, i; struct xfs_buf *bp; DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks); bp = _xfs_buf_alloc(target, &map, 1, 0); if (unlikely(bp == NULL)) goto fail; page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT; error = _xfs_buf_get_pages(bp, page_count, 0); if (error) goto fail_free_buf; for (i = 0; i < page_count; i++) { bp->b_pages[i] = alloc_page(xb_to_gfp(flags)); if (!bp->b_pages[i]) goto fail_free_mem; } bp->b_flags |= _XBF_PAGES; error = _xfs_buf_map_pages(bp, 0); if (unlikely(error)) { xfs_warn(target->bt_mount, "%s: failed to map pages\n", __func__); goto fail_free_mem; } trace_xfs_buf_get_uncached(bp, _RET_IP_); return bp; fail_free_mem: while (--i >= 0) __free_page(bp->b_pages[i]); _xfs_buf_free_pages(bp); fail_free_buf: xfs_buf_free_maps(bp); kmem_zone_free(xfs_buf_zone, bp); fail: return NULL; } /* * Increment reference count on buffer, to hold the buffer concurrently * with another thread which may release (free) the buffer asynchronously. * Must hold the buffer already to call this function. */ void xfs_buf_hold( xfs_buf_t *bp) { trace_xfs_buf_hold(bp, _RET_IP_); atomic_inc(&bp->b_hold); } /* * Releases a hold on the specified buffer. If the * the hold count is 1, calls xfs_buf_free. */ void xfs_buf_rele( xfs_buf_t *bp) { struct xfs_perag *pag = bp->b_pag; trace_xfs_buf_rele(bp, _RET_IP_); if (!pag) { ASSERT(list_empty(&bp->b_lru)); ASSERT(RB_EMPTY_NODE(&bp->b_rbnode)); if (atomic_dec_and_test(&bp->b_hold)) xfs_buf_free(bp); return; } ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode)); ASSERT(atomic_read(&bp->b_hold) > 0); if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) { if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) { xfs_buf_lru_add(bp); spin_unlock(&pag->pag_buf_lock); } else { xfs_buf_lru_del(bp); ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); rb_erase(&bp->b_rbnode, &pag->pag_buf_tree); spin_unlock(&pag->pag_buf_lock); xfs_perag_put(pag); xfs_buf_free(bp); } } } /* * Lock a buffer object, if it is not already locked. * * If we come across a stale, pinned, locked buffer, we know that we are * being asked to lock a buffer that has been reallocated. Because it is * pinned, we know that the log has not been pushed to disk and hence it * will still be locked. Rather than continuing to have trylock attempts * fail until someone else pushes the log, push it ourselves before * returning. This means that the xfsaild will not get stuck trying * to push on stale inode buffers. */ int xfs_buf_trylock( struct xfs_buf *bp) { int locked; locked = down_trylock(&bp->b_sema) == 0; if (locked) XB_SET_OWNER(bp); else if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) xfs_log_force(bp->b_target->bt_mount, 0); trace_xfs_buf_trylock(bp, _RET_IP_); return locked; } /* * Lock a buffer object. * * If we come across a stale, pinned, locked buffer, we know that we * are being asked to lock a buffer that has been reallocated. Because * it is pinned, we know that the log has not been pushed to disk and * hence it will still be locked. Rather than sleeping until someone * else pushes the log, push it ourselves before trying to get the lock. */ void xfs_buf_lock( struct xfs_buf *bp) { trace_xfs_buf_lock(bp, _RET_IP_); if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) xfs_log_force(bp->b_target->bt_mount, 0); down(&bp->b_sema); XB_SET_OWNER(bp); trace_xfs_buf_lock_done(bp, _RET_IP_); } void xfs_buf_unlock( struct xfs_buf *bp) { XB_CLEAR_OWNER(bp); up(&bp->b_sema); trace_xfs_buf_unlock(bp, _RET_IP_); } STATIC void xfs_buf_wait_unpin( xfs_buf_t *bp) { DECLARE_WAITQUEUE (wait, current); if (atomic_read(&bp->b_pin_count) == 0) return; add_wait_queue(&bp->b_waiters, &wait); for (;;) { set_current_state(TASK_UNINTERRUPTIBLE); if (atomic_read(&bp->b_pin_count) == 0) break; io_schedule(); } remove_wait_queue(&bp->b_waiters, &wait); set_current_state(TASK_RUNNING); } /* * Buffer Utility Routines */ STATIC void xfs_buf_iodone_work( struct work_struct *work) { struct xfs_buf *bp = container_of(work, xfs_buf_t, b_iodone_work); bool read = !!(bp->b_flags & XBF_READ); bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD); if (read && bp->b_ops) bp->b_ops->verify_read(bp); if (bp->b_iodone) (*(bp->b_iodone))(bp); else if (bp->b_flags & XBF_ASYNC) xfs_buf_relse(bp); else { ASSERT(read && bp->b_ops); complete(&bp->b_iowait); } } void xfs_buf_ioend( struct xfs_buf *bp, int schedule) { bool read = !!(bp->b_flags & XBF_READ); trace_xfs_buf_iodone(bp, _RET_IP_); if (bp->b_error == 0) bp->b_flags |= XBF_DONE; if (bp->b_iodone || (read && bp->b_ops) || (bp->b_flags & XBF_ASYNC)) { if (schedule) { INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work); queue_work(xfslogd_workqueue, &bp->b_iodone_work); } else { xfs_buf_iodone_work(&bp->b_iodone_work); } } else { bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD); complete(&bp->b_iowait); } } void xfs_buf_ioerror( xfs_buf_t *bp, int error) { ASSERT(error >= 0 && error <= 0xffff); bp->b_error = (unsigned short)error; trace_xfs_buf_ioerror(bp, error, _RET_IP_); } void xfs_buf_ioerror_alert( struct xfs_buf *bp, const char *func) { xfs_alert(bp->b_target->bt_mount, "metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d", (__uint64_t)XFS_BUF_ADDR(bp), func, bp->b_error, bp->b_length); } /* * Called when we want to stop a buffer from getting written or read. * We attach the EIO error, muck with its flags, and call xfs_buf_ioend * so that the proper iodone callbacks get called. */ STATIC int xfs_bioerror( xfs_buf_t *bp) { #ifdef XFSERRORDEBUG ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone); #endif /* * No need to wait until the buffer is unpinned, we aren't flushing it. */ xfs_buf_ioerror(bp, EIO); /* * We're calling xfs_buf_ioend, so delete XBF_DONE flag. */ XFS_BUF_UNREAD(bp); XFS_BUF_UNDONE(bp); xfs_buf_stale(bp); xfs_buf_ioend(bp, 0); return EIO; } /* * Same as xfs_bioerror, except that we are releasing the buffer * here ourselves, and avoiding the xfs_buf_ioend call. * This is meant for userdata errors; metadata bufs come with * iodone functions attached, so that we can track down errors. */ STATIC int xfs_bioerror_relse( struct xfs_buf *bp) { int64_t fl = bp->b_flags; /* * No need to wait until the buffer is unpinned. * We aren't flushing it. * * chunkhold expects B_DONE to be set, whether * we actually finish the I/O or not. We don't want to * change that interface. */ XFS_BUF_UNREAD(bp); XFS_BUF_DONE(bp); xfs_buf_stale(bp); bp->b_iodone = NULL; if (!(fl & XBF_ASYNC)) { /* * Mark b_error and B_ERROR _both_. * Lot's of chunkcache code assumes that. * There's no reason to mark error for * ASYNC buffers. */ xfs_buf_ioerror(bp, EIO); complete(&bp->b_iowait); } else { xfs_buf_relse(bp); } return EIO; } STATIC int xfs_bdstrat_cb( struct xfs_buf *bp) { if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) { trace_xfs_bdstrat_shut(bp, _RET_IP_); /* * Metadata write that didn't get logged but * written delayed anyway. These aren't associated * with a transaction, and can be ignored. */ if (!bp->b_iodone && !XFS_BUF_ISREAD(bp)) return xfs_bioerror_relse(bp); else return xfs_bioerror(bp); } xfs_buf_iorequest(bp); return 0; } int xfs_bwrite( struct xfs_buf *bp) { int error; ASSERT(xfs_buf_islocked(bp)); bp->b_flags |= XBF_WRITE; bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q); xfs_bdstrat_cb(bp); error = xfs_buf_iowait(bp); if (error) { xfs_force_shutdown(bp->b_target->bt_mount, SHUTDOWN_META_IO_ERROR); } return error; } /* * Wrapper around bdstrat so that we can stop data from going to disk in case * we are shutting down the filesystem. Typically user data goes thru this * path; one of the exceptions is the superblock. */ void xfsbdstrat( struct xfs_mount *mp, struct xfs_buf *bp) { if (XFS_FORCED_SHUTDOWN(mp)) { trace_xfs_bdstrat_shut(bp, _RET_IP_); xfs_bioerror_relse(bp); return; } xfs_buf_iorequest(bp); } STATIC void _xfs_buf_ioend( xfs_buf_t *bp, int schedule) { if (atomic_dec_and_test(&bp->b_io_remaining) == 1) xfs_buf_ioend(bp, schedule); } STATIC void xfs_buf_bio_end_io( struct bio *bio, int error) { xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private; /* * don't overwrite existing errors - otherwise we can lose errors on * buffers that require multiple bios to complete. */ if (!bp->b_error) xfs_buf_ioerror(bp, -error); if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); _xfs_buf_ioend(bp, 1); bio_put(bio); } static void xfs_buf_ioapply_map( struct xfs_buf *bp, int map, int *buf_offset, int *count, int rw) { int page_index; int total_nr_pages = bp->b_page_count; int nr_pages; struct bio *bio; sector_t sector = bp->b_maps[map].bm_bn; int size; int offset; total_nr_pages = bp->b_page_count; /* skip the pages in the buffer before the start offset */ page_index = 0; offset = *buf_offset; while (offset >= PAGE_SIZE) { page_index++; offset -= PAGE_SIZE; } /* * Limit the IO size to the length of the current vector, and update the * remaining IO count for the next time around. */ size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count); *count -= size; *buf_offset += size; next_chunk: atomic_inc(&bp->b_io_remaining); nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT); if (nr_pages > total_nr_pages) nr_pages = total_nr_pages; bio = bio_alloc(GFP_NOIO, nr_pages); bio->bi_bdev = bp->b_target->bt_bdev; bio->bi_sector = sector; bio->bi_end_io = xfs_buf_bio_end_io; bio->bi_private = bp; for (; size && nr_pages; nr_pages--, page_index++) { int rbytes, nbytes = PAGE_SIZE - offset; if (nbytes > size) nbytes = size; rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes, offset); if (rbytes < nbytes) break; offset = 0; sector += BTOBB(nbytes); size -= nbytes; total_nr_pages--; } if (likely(bio->bi_size)) { if (xfs_buf_is_vmapped(bp)) { flush_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); } submit_bio(rw, bio); if (size) goto next_chunk; } else { /* * This is guaranteed not to be the last io reference count * because the caller (xfs_buf_iorequest) holds a count itself. */ atomic_dec(&bp->b_io_remaining); xfs_buf_ioerror(bp, EIO); bio_put(bio); } } STATIC void _xfs_buf_ioapply( struct xfs_buf *bp) { struct blk_plug plug; int rw; int offset; int size; int i; if (bp->b_flags & XBF_WRITE) { if (bp->b_flags & XBF_SYNCIO) rw = WRITE_SYNC; else rw = WRITE; if (bp->b_flags & XBF_FUA) rw |= REQ_FUA; if (bp->b_flags & XBF_FLUSH) rw |= REQ_FLUSH; /* * Run the write verifier callback function if it exists. If * this function fails it will mark the buffer with an error and * the IO should not be dispatched. */ if (bp->b_ops) { bp->b_ops->verify_write(bp); if (bp->b_error) { xfs_force_shutdown(bp->b_target->bt_mount, SHUTDOWN_CORRUPT_INCORE); return; } } } else if (bp->b_flags & XBF_READ_AHEAD) { rw = READA; } else { rw = READ; } /* we only use the buffer cache for meta-data */ rw |= REQ_META; /* * Walk all the vectors issuing IO on them. Set up the initial offset * into the buffer and the desired IO size before we start - * _xfs_buf_ioapply_vec() will modify them appropriately for each * subsequent call. */ offset = bp->b_offset; size = BBTOB(bp->b_io_length); blk_start_plug(&plug); for (i = 0; i < bp->b_map_count; i++) { xfs_buf_ioapply_map(bp, i, &offset, &size, rw); if (bp->b_error) break; if (size <= 0) break; /* all done */ } blk_finish_plug(&plug); } void xfs_buf_iorequest( xfs_buf_t *bp) { trace_xfs_buf_iorequest(bp, _RET_IP_); ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); if (bp->b_flags & XBF_WRITE) xfs_buf_wait_unpin(bp); xfs_buf_hold(bp); /* Set the count to 1 initially, this will stop an I/O * completion callout which happens before we have started * all the I/O from calling xfs_buf_ioend too early. */ atomic_set(&bp->b_io_remaining, 1); _xfs_buf_ioapply(bp); _xfs_buf_ioend(bp, 1); xfs_buf_rele(bp); } /* * Waits for I/O to complete on the buffer supplied. It returns immediately if * no I/O is pending or there is already a pending error on the buffer. It * returns the I/O error code, if any, or 0 if there was no error. */ int xfs_buf_iowait( xfs_buf_t *bp) { trace_xfs_buf_iowait(bp, _RET_IP_); if (!bp->b_error) wait_for_completion(&bp->b_iowait); trace_xfs_buf_iowait_done(bp, _RET_IP_); return bp->b_error; } xfs_caddr_t xfs_buf_offset( xfs_buf_t *bp, size_t offset) { struct page *page; if (bp->b_addr) return bp->b_addr + offset; offset += bp->b_offset; page = bp->b_pages[offset >> PAGE_SHIFT]; return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1)); } /* * Move data into or out of a buffer. */ void xfs_buf_iomove( xfs_buf_t *bp, /* buffer to process */ size_t boff, /* starting buffer offset */ size_t bsize, /* length to copy */ void *data, /* data address */ xfs_buf_rw_t mode) /* read/write/zero flag */ { size_t bend; bend = boff + bsize; while (boff < bend) { struct page *page; int page_index, page_offset, csize; page_index = (boff + bp->b_offset) >> PAGE_SHIFT; page_offset = (boff + bp->b_offset) & ~PAGE_MASK; page = bp->b_pages[page_index]; csize = min_t(size_t, PAGE_SIZE - page_offset, BBTOB(bp->b_io_length) - boff); ASSERT((csize + page_offset) <= PAGE_SIZE); switch (mode) { case XBRW_ZERO: memset(page_address(page) + page_offset, 0, csize); break; case XBRW_READ: memcpy(data, page_address(page) + page_offset, csize); break; case XBRW_WRITE: memcpy(page_address(page) + page_offset, data, csize); } boff += csize; data += csize; } } /* * Handling of buffer targets (buftargs). */ /* * Wait for any bufs with callbacks that have been submitted but have not yet * returned. These buffers will have an elevated hold count, so wait on those * while freeing all the buffers only held by the LRU. */ void xfs_wait_buftarg( struct xfs_buftarg *btp) { struct xfs_buf *bp; restart: spin_lock(&btp->bt_lru_lock); while (!list_empty(&btp->bt_lru)) { bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru); if (atomic_read(&bp->b_hold) > 1) { spin_unlock(&btp->bt_lru_lock); delay(100); goto restart; } /* * clear the LRU reference count so the buffer doesn't get * ignored in xfs_buf_rele(). */ atomic_set(&bp->b_lru_ref, 0); spin_unlock(&btp->bt_lru_lock); xfs_buf_rele(bp); spin_lock(&btp->bt_lru_lock); } spin_unlock(&btp->bt_lru_lock); } int xfs_buftarg_shrink( struct shrinker *shrink, struct shrink_control *sc) { struct xfs_buftarg *btp = container_of(shrink, struct xfs_buftarg, bt_shrinker); struct xfs_buf *bp; int nr_to_scan = sc->nr_to_scan; LIST_HEAD(dispose); if (!nr_to_scan) return btp->bt_lru_nr; spin_lock(&btp->bt_lru_lock); while (!list_empty(&btp->bt_lru)) { if (nr_to_scan-- <= 0) break; bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru); /* * Decrement the b_lru_ref count unless the value is already * zero. If the value is already zero, we need to reclaim the * buffer, otherwise it gets another trip through the LRU. */ if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) { list_move_tail(&bp->b_lru, &btp->bt_lru); continue; } /* * remove the buffer from the LRU now to avoid needing another * lock round trip inside xfs_buf_rele(). */ list_move(&bp->b_lru, &dispose); btp->bt_lru_nr--; bp->b_lru_flags |= _XBF_LRU_DISPOSE; } spin_unlock(&btp->bt_lru_lock); while (!list_empty(&dispose)) { bp = list_first_entry(&dispose, struct xfs_buf, b_lru); list_del_init(&bp->b_lru); xfs_buf_rele(bp); } return btp->bt_lru_nr; } void xfs_free_buftarg( struct xfs_mount *mp, struct xfs_buftarg *btp) { unregister_shrinker(&btp->bt_shrinker); if (mp->m_flags & XFS_MOUNT_BARRIER) xfs_blkdev_issue_flush(btp); kmem_free(btp); } STATIC int xfs_setsize_buftarg_flags( xfs_buftarg_t *btp, unsigned int blocksize, unsigned int sectorsize, int verbose) { btp->bt_bsize = blocksize; btp->bt_sshift = ffs(sectorsize) - 1; btp->bt_smask = sectorsize - 1; if (set_blocksize(btp->bt_bdev, sectorsize)) { char name[BDEVNAME_SIZE]; bdevname(btp->bt_bdev, name); xfs_warn(btp->bt_mount, "Cannot set_blocksize to %u on device %s\n", sectorsize, name); return EINVAL; } return 0; } /* * When allocating the initial buffer target we have not yet * read in the superblock, so don't know what sized sectors * are being used is at this early stage. Play safe. */ STATIC int xfs_setsize_buftarg_early( xfs_buftarg_t *btp, struct block_device *bdev) { return xfs_setsize_buftarg_flags(btp, PAGE_SIZE, bdev_logical_block_size(bdev), 0); } int xfs_setsize_buftarg( xfs_buftarg_t *btp, unsigned int blocksize, unsigned int sectorsize) { return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1); } xfs_buftarg_t * xfs_alloc_buftarg( struct xfs_mount *mp, struct block_device *bdev, int external, const char *fsname) { xfs_buftarg_t *btp; btp = kmem_zalloc(sizeof(*btp), KM_SLEEP); btp->bt_mount = mp; btp->bt_dev = bdev->bd_dev; btp->bt_bdev = bdev; btp->bt_bdi = blk_get_backing_dev_info(bdev); if (!btp->bt_bdi) goto error; INIT_LIST_HEAD(&btp->bt_lru); spin_lock_init(&btp->bt_lru_lock); if (xfs_setsize_buftarg_early(btp, bdev)) goto error; btp->bt_shrinker.shrink = xfs_buftarg_shrink; btp->bt_shrinker.seeks = DEFAULT_SEEKS; register_shrinker(&btp->bt_shrinker); return btp; error: kmem_free(btp); return NULL; } /* * Add a buffer to the delayed write list. * * This queues a buffer for writeout if it hasn't already been. Note that * neither this routine nor the buffer list submission functions perform * any internal synchronization. It is expected that the lists are thread-local * to the callers. * * Returns true if we queued up the buffer, or false if it already had * been on the buffer list. */ bool xfs_buf_delwri_queue( struct xfs_buf *bp, struct list_head *list) { ASSERT(xfs_buf_islocked(bp)); ASSERT(!(bp->b_flags & XBF_READ)); /* * If the buffer is already marked delwri it already is queued up * by someone else for imediate writeout. Just ignore it in that * case. */ if (bp->b_flags & _XBF_DELWRI_Q) { trace_xfs_buf_delwri_queued(bp, _RET_IP_); return false; } trace_xfs_buf_delwri_queue(bp, _RET_IP_); /* * If a buffer gets written out synchronously or marked stale while it * is on a delwri list we lazily remove it. To do this, the other party * clears the _XBF_DELWRI_Q flag but otherwise leaves the buffer alone. * It remains referenced and on the list. In a rare corner case it * might get readded to a delwri list after the synchronous writeout, in * which case we need just need to re-add the flag here. */ bp->b_flags |= _XBF_DELWRI_Q; if (list_empty(&bp->b_list)) { atomic_inc(&bp->b_hold); list_add_tail(&bp->b_list, list); } return true; } /* * Compare function is more complex than it needs to be because * the return value is only 32 bits and we are doing comparisons * on 64 bit values */ static int xfs_buf_cmp( void *priv, struct list_head *a, struct list_head *b) { struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list); struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list); xfs_daddr_t diff; diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn; if (diff < 0) return -1; if (diff > 0) return 1; return 0; } static int __xfs_buf_delwri_submit( struct list_head *buffer_list, struct list_head *io_list, bool wait) { struct blk_plug plug; struct xfs_buf *bp, *n; int pinned = 0; list_for_each_entry_safe(bp, n, buffer_list, b_list) { if (!wait) { if (xfs_buf_ispinned(bp)) { pinned++; continue; } if (!xfs_buf_trylock(bp)) continue; } else { xfs_buf_lock(bp); } /* * Someone else might have written the buffer synchronously or * marked it stale in the meantime. In that case only the * _XBF_DELWRI_Q flag got cleared, and we have to drop the * reference and remove it from the list here. */ if (!(bp->b_flags & _XBF_DELWRI_Q)) { list_del_init(&bp->b_list); xfs_buf_relse(bp); continue; } list_move_tail(&bp->b_list, io_list); trace_xfs_buf_delwri_split(bp, _RET_IP_); } list_sort(NULL, io_list, xfs_buf_cmp); blk_start_plug(&plug); list_for_each_entry_safe(bp, n, io_list, b_list) { bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC); bp->b_flags |= XBF_WRITE; if (!wait) { bp->b_flags |= XBF_ASYNC; list_del_init(&bp->b_list); } xfs_bdstrat_cb(bp); } blk_finish_plug(&plug); return pinned; } /* * Write out a buffer list asynchronously. * * This will take the @buffer_list, write all non-locked and non-pinned buffers * out and not wait for I/O completion on any of the buffers. This interface * is only safely useable for callers that can track I/O completion by higher * level means, e.g. AIL pushing as the @buffer_list is consumed in this * function. */ int xfs_buf_delwri_submit_nowait( struct list_head *buffer_list) { LIST_HEAD (io_list); return __xfs_buf_delwri_submit(buffer_list, &io_list, false); } /* * Write out a buffer list synchronously. * * This will take the @buffer_list, write all buffers out and wait for I/O * completion on all of the buffers. @buffer_list is consumed by the function, * so callers must have some other way of tracking buffers if they require such * functionality. */ int xfs_buf_delwri_submit( struct list_head *buffer_list) { LIST_HEAD (io_list); int error = 0, error2; struct xfs_buf *bp; __xfs_buf_delwri_submit(buffer_list, &io_list, true); /* Wait for IO to complete. */ while (!list_empty(&io_list)) { bp = list_first_entry(&io_list, struct xfs_buf, b_list); list_del_init(&bp->b_list); error2 = xfs_buf_iowait(bp); xfs_buf_relse(bp); if (!error) error = error2; } return error; } int __init xfs_buf_init(void) { xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf", KM_ZONE_HWALIGN, NULL); if (!xfs_buf_zone) goto out; xfslogd_workqueue = alloc_workqueue("xfslogd", WQ_MEM_RECLAIM | WQ_HIGHPRI, 1); if (!xfslogd_workqueue) goto out_free_buf_zone; return 0; out_free_buf_zone: kmem_zone_destroy(xfs_buf_zone); out: return -ENOMEM; } void xfs_buf_terminate(void) { destroy_workqueue(xfslogd_workqueue); kmem_zone_destroy(xfs_buf_zone); }
./CrossVul/dataset_final_sorted/CWE-20/c/good_5600_0
crossvul-cpp_data_good_3661_0
/* * An implementation of key value pair (KVP) functionality for Linux. * * * Copyright (C) 2010, Novell, Inc. * Author : K. Y. Srinivasan <ksrinivasan@novell.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * */ #include <sys/types.h> #include <sys/socket.h> #include <sys/poll.h> #include <sys/utsname.h> #include <linux/types.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <string.h> #include <errno.h> #include <arpa/inet.h> #include <linux/connector.h> #include <linux/hyperv.h> #include <linux/netlink.h> #include <ifaddrs.h> #include <netdb.h> #include <syslog.h> #include <sys/stat.h> #include <fcntl.h> /* * KVP protocol: The user mode component first registers with the * the kernel component. Subsequently, the kernel component requests, data * for the specified keys. In response to this message the user mode component * fills in the value corresponding to the specified key. We overload the * sequence field in the cn_msg header to define our KVP message types. * * We use this infrastructure for also supporting queries from user mode * application for state that may be maintained in the KVP kernel component. * */ enum key_index { FullyQualifiedDomainName = 0, IntegrationServicesVersion, /*This key is serviced in the kernel*/ NetworkAddressIPv4, NetworkAddressIPv6, OSBuildNumber, OSName, OSMajorVersion, OSMinorVersion, OSVersion, ProcessorArchitecture }; static char kvp_send_buffer[4096]; static char kvp_recv_buffer[4096]; static struct sockaddr_nl addr; static char *os_name = ""; static char *os_major = ""; static char *os_minor = ""; static char *processor_arch; static char *os_build; static char *lic_version; static struct utsname uts_buf; #define MAX_FILE_NAME 100 #define ENTRIES_PER_BLOCK 50 struct kvp_record { __u8 key[HV_KVP_EXCHANGE_MAX_KEY_SIZE]; __u8 value[HV_KVP_EXCHANGE_MAX_VALUE_SIZE]; }; struct kvp_file_state { int fd; int num_blocks; struct kvp_record *records; int num_records; __u8 fname[MAX_FILE_NAME]; }; static struct kvp_file_state kvp_file_info[KVP_POOL_COUNT]; static void kvp_acquire_lock(int pool) { struct flock fl = {F_WRLCK, SEEK_SET, 0, 0, 0}; fl.l_pid = getpid(); if (fcntl(kvp_file_info[pool].fd, F_SETLKW, &fl) == -1) { syslog(LOG_ERR, "Failed to acquire the lock pool: %d", pool); exit(-1); } } static void kvp_release_lock(int pool) { struct flock fl = {F_UNLCK, SEEK_SET, 0, 0, 0}; fl.l_pid = getpid(); if (fcntl(kvp_file_info[pool].fd, F_SETLK, &fl) == -1) { perror("fcntl"); syslog(LOG_ERR, "Failed to release the lock pool: %d", pool); exit(-1); } } static void kvp_update_file(int pool) { FILE *filep; size_t bytes_written; /* * We are going to write our in-memory registry out to * disk; acquire the lock first. */ kvp_acquire_lock(pool); filep = fopen(kvp_file_info[pool].fname, "w"); if (!filep) { kvp_release_lock(pool); syslog(LOG_ERR, "Failed to open file, pool: %d", pool); exit(-1); } bytes_written = fwrite(kvp_file_info[pool].records, sizeof(struct kvp_record), kvp_file_info[pool].num_records, filep); fflush(filep); kvp_release_lock(pool); } static void kvp_update_mem_state(int pool) { FILE *filep; size_t records_read = 0; struct kvp_record *record = kvp_file_info[pool].records; struct kvp_record *readp; int num_blocks = kvp_file_info[pool].num_blocks; int alloc_unit = sizeof(struct kvp_record) * ENTRIES_PER_BLOCK; kvp_acquire_lock(pool); filep = fopen(kvp_file_info[pool].fname, "r"); if (!filep) { kvp_release_lock(pool); syslog(LOG_ERR, "Failed to open file, pool: %d", pool); exit(-1); } while (!feof(filep)) { readp = &record[records_read]; records_read += fread(readp, sizeof(struct kvp_record), ENTRIES_PER_BLOCK * num_blocks, filep); if (!feof(filep)) { /* * We have more data to read. */ num_blocks++; record = realloc(record, alloc_unit * num_blocks); if (record == NULL) { syslog(LOG_ERR, "malloc failed"); exit(-1); } continue; } break; } kvp_file_info[pool].num_blocks = num_blocks; kvp_file_info[pool].records = record; kvp_file_info[pool].num_records = records_read; kvp_release_lock(pool); } static int kvp_file_init(void) { int ret, fd; FILE *filep; size_t records_read; __u8 *fname; struct kvp_record *record; struct kvp_record *readp; int num_blocks; int i; int alloc_unit = sizeof(struct kvp_record) * ENTRIES_PER_BLOCK; if (access("/var/opt/hyperv", F_OK)) { if (mkdir("/var/opt/hyperv", S_IRUSR | S_IWUSR | S_IROTH)) { syslog(LOG_ERR, " Failed to create /var/opt/hyperv"); exit(-1); } } for (i = 0; i < KVP_POOL_COUNT; i++) { fname = kvp_file_info[i].fname; records_read = 0; num_blocks = 1; sprintf(fname, "/var/opt/hyperv/.kvp_pool_%d", i); fd = open(fname, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR | S_IROTH); if (fd == -1) return 1; filep = fopen(fname, "r"); if (!filep) return 1; record = malloc(alloc_unit * num_blocks); if (record == NULL) { fclose(filep); return 1; } while (!feof(filep)) { readp = &record[records_read]; records_read += fread(readp, sizeof(struct kvp_record), ENTRIES_PER_BLOCK, filep); if (!feof(filep)) { /* * We have more data to read. */ num_blocks++; record = realloc(record, alloc_unit * num_blocks); if (record == NULL) { fclose(filep); return 1; } continue; } break; } kvp_file_info[i].fd = fd; kvp_file_info[i].num_blocks = num_blocks; kvp_file_info[i].records = record; kvp_file_info[i].num_records = records_read; fclose(filep); } return 0; } static int kvp_key_delete(int pool, __u8 *key, int key_size) { int i; int j, k; int num_records; struct kvp_record *record; /* * First update the in-memory state. */ kvp_update_mem_state(pool); num_records = kvp_file_info[pool].num_records; record = kvp_file_info[pool].records; for (i = 0; i < num_records; i++) { if (memcmp(key, record[i].key, key_size)) continue; /* * Found a match; just move the remaining * entries up. */ if (i == num_records) { kvp_file_info[pool].num_records--; kvp_update_file(pool); return 0; } j = i; k = j + 1; for (; k < num_records; k++) { strcpy(record[j].key, record[k].key); strcpy(record[j].value, record[k].value); j++; } kvp_file_info[pool].num_records--; kvp_update_file(pool); return 0; } return 1; } static int kvp_key_add_or_modify(int pool, __u8 *key, int key_size, __u8 *value, int value_size) { int i; int j, k; int num_records; struct kvp_record *record; int num_blocks; if ((key_size > HV_KVP_EXCHANGE_MAX_KEY_SIZE) || (value_size > HV_KVP_EXCHANGE_MAX_VALUE_SIZE)) return 1; /* * First update the in-memory state. */ kvp_update_mem_state(pool); num_records = kvp_file_info[pool].num_records; record = kvp_file_info[pool].records; num_blocks = kvp_file_info[pool].num_blocks; for (i = 0; i < num_records; i++) { if (memcmp(key, record[i].key, key_size)) continue; /* * Found a match; just update the value - * this is the modify case. */ memcpy(record[i].value, value, value_size); kvp_update_file(pool); return 0; } /* * Need to add a new entry; */ if (num_records == (ENTRIES_PER_BLOCK * num_blocks)) { /* Need to allocate a larger array for reg entries. */ record = realloc(record, sizeof(struct kvp_record) * ENTRIES_PER_BLOCK * (num_blocks + 1)); if (record == NULL) return 1; kvp_file_info[pool].num_blocks++; } memcpy(record[i].value, value, value_size); memcpy(record[i].key, key, key_size); kvp_file_info[pool].records = record; kvp_file_info[pool].num_records++; kvp_update_file(pool); return 0; } static int kvp_get_value(int pool, __u8 *key, int key_size, __u8 *value, int value_size) { int i; int num_records; struct kvp_record *record; if ((key_size > HV_KVP_EXCHANGE_MAX_KEY_SIZE) || (value_size > HV_KVP_EXCHANGE_MAX_VALUE_SIZE)) return 1; /* * First update the in-memory state. */ kvp_update_mem_state(pool); num_records = kvp_file_info[pool].num_records; record = kvp_file_info[pool].records; for (i = 0; i < num_records; i++) { if (memcmp(key, record[i].key, key_size)) continue; /* * Found a match; just copy the value out. */ memcpy(value, record[i].value, value_size); return 0; } return 1; } static void kvp_pool_enumerate(int pool, int index, __u8 *key, int key_size, __u8 *value, int value_size) { struct kvp_record *record; /* * First update our in-memory database. */ kvp_update_mem_state(pool); record = kvp_file_info[pool].records; if (index >= kvp_file_info[pool].num_records) { /* * This is an invalid index; terminate enumeration; * - a NULL value will do the trick. */ strcpy(value, ""); return; } memcpy(key, record[index].key, key_size); memcpy(value, record[index].value, value_size); } void kvp_get_os_info(void) { FILE *file; char *p, buf[512]; uname(&uts_buf); os_build = uts_buf.release; processor_arch = uts_buf.machine; /* * The current windows host (win7) expects the build * string to be of the form: x.y.z * Strip additional information we may have. */ p = strchr(os_build, '-'); if (p) *p = '\0'; file = fopen("/etc/SuSE-release", "r"); if (file != NULL) goto kvp_osinfo_found; file = fopen("/etc/redhat-release", "r"); if (file != NULL) goto kvp_osinfo_found; /* * Add code for other supported platforms. */ /* * We don't have information about the os. */ os_name = uts_buf.sysname; return; kvp_osinfo_found: /* up to three lines */ p = fgets(buf, sizeof(buf), file); if (p) { p = strchr(buf, '\n'); if (p) *p = '\0'; p = strdup(buf); if (!p) goto done; os_name = p; /* second line */ p = fgets(buf, sizeof(buf), file); if (p) { p = strchr(buf, '\n'); if (p) *p = '\0'; p = strdup(buf); if (!p) goto done; os_major = p; /* third line */ p = fgets(buf, sizeof(buf), file); if (p) { p = strchr(buf, '\n'); if (p) *p = '\0'; p = strdup(buf); if (p) os_minor = p; } } } done: fclose(file); return; } static int kvp_get_ip_address(int family, char *buffer, int length) { struct ifaddrs *ifap; struct ifaddrs *curp; int ipv4_len = strlen("255.255.255.255") + 1; int ipv6_len = strlen("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff")+1; int offset = 0; const char *str; char tmp[50]; int error = 0; /* * On entry into this function, the buffer is capable of holding the * maximum key value (2048 bytes). */ if (getifaddrs(&ifap)) { strcpy(buffer, "getifaddrs failed\n"); return 1; } curp = ifap; while (curp != NULL) { if ((curp->ifa_addr != NULL) && (curp->ifa_addr->sa_family == family)) { if (family == AF_INET) { struct sockaddr_in *addr = (struct sockaddr_in *) curp->ifa_addr; str = inet_ntop(family, &addr->sin_addr, tmp, 50); if (str == NULL) { strcpy(buffer, "inet_ntop failed\n"); error = 1; goto getaddr_done; } if (offset == 0) strcpy(buffer, tmp); else strcat(buffer, tmp); strcat(buffer, ";"); offset += strlen(str) + 1; if ((length - offset) < (ipv4_len + 1)) goto getaddr_done; } else { /* * We only support AF_INET and AF_INET6 * and the list of addresses is separated by a ";". */ struct sockaddr_in6 *addr = (struct sockaddr_in6 *) curp->ifa_addr; str = inet_ntop(family, &addr->sin6_addr.s6_addr, tmp, 50); if (str == NULL) { strcpy(buffer, "inet_ntop failed\n"); error = 1; goto getaddr_done; } if (offset == 0) strcpy(buffer, tmp); else strcat(buffer, tmp); strcat(buffer, ";"); offset += strlen(str) + 1; if ((length - offset) < (ipv6_len + 1)) goto getaddr_done; } } curp = curp->ifa_next; } getaddr_done: freeifaddrs(ifap); return error; } static int kvp_get_domain_name(char *buffer, int length) { struct addrinfo hints, *info ; int error = 0; gethostname(buffer, length); memset(&hints, 0, sizeof(hints)); hints.ai_family = AF_INET; /*Get only ipv4 addrinfo. */ hints.ai_socktype = SOCK_STREAM; hints.ai_flags = AI_CANONNAME; error = getaddrinfo(buffer, NULL, &hints, &info); if (error != 0) { strcpy(buffer, "getaddrinfo failed\n"); return error; } strcpy(buffer, info->ai_canonname); freeaddrinfo(info); return error; } static int netlink_send(int fd, struct cn_msg *msg) { struct nlmsghdr *nlh; unsigned int size; struct msghdr message; char buffer[64]; struct iovec iov[2]; size = NLMSG_SPACE(sizeof(struct cn_msg) + msg->len); nlh = (struct nlmsghdr *)buffer; nlh->nlmsg_seq = 0; nlh->nlmsg_pid = getpid(); nlh->nlmsg_type = NLMSG_DONE; nlh->nlmsg_len = NLMSG_LENGTH(size - sizeof(*nlh)); nlh->nlmsg_flags = 0; iov[0].iov_base = nlh; iov[0].iov_len = sizeof(*nlh); iov[1].iov_base = msg; iov[1].iov_len = size; memset(&message, 0, sizeof(message)); message.msg_name = &addr; message.msg_namelen = sizeof(addr); message.msg_iov = iov; message.msg_iovlen = 2; return sendmsg(fd, &message, 0); } int main(void) { int fd, len, sock_opt; int error; struct cn_msg *message; struct pollfd pfd; struct nlmsghdr *incoming_msg; struct cn_msg *incoming_cn_msg; struct hv_kvp_msg *hv_msg; char *p; char *key_value; char *key_name; daemon(1, 0); openlog("KVP", 0, LOG_USER); syslog(LOG_INFO, "KVP starting; pid is:%d", getpid()); /* * Retrieve OS release information. */ kvp_get_os_info(); if (kvp_file_init()) { syslog(LOG_ERR, "Failed to initialize the pools"); exit(-1); } fd = socket(AF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR); if (fd < 0) { syslog(LOG_ERR, "netlink socket creation failed; error:%d", fd); exit(-1); } addr.nl_family = AF_NETLINK; addr.nl_pad = 0; addr.nl_pid = 0; addr.nl_groups = CN_KVP_IDX; error = bind(fd, (struct sockaddr *)&addr, sizeof(addr)); if (error < 0) { syslog(LOG_ERR, "bind failed; error:%d", error); close(fd); exit(-1); } sock_opt = addr.nl_groups; setsockopt(fd, 270, 1, &sock_opt, sizeof(sock_opt)); /* * Register ourselves with the kernel. */ message = (struct cn_msg *)kvp_send_buffer; message->id.idx = CN_KVP_IDX; message->id.val = CN_KVP_VAL; hv_msg = (struct hv_kvp_msg *)message->data; hv_msg->kvp_hdr.operation = KVP_OP_REGISTER; message->ack = 0; message->len = sizeof(struct hv_kvp_msg); len = netlink_send(fd, message); if (len < 0) { syslog(LOG_ERR, "netlink_send failed; error:%d", len); close(fd); exit(-1); } pfd.fd = fd; while (1) { struct sockaddr *addr_p = (struct sockaddr *) &addr; socklen_t addr_l = sizeof(addr); pfd.events = POLLIN; pfd.revents = 0; poll(&pfd, 1, -1); len = recvfrom(fd, kvp_recv_buffer, sizeof(kvp_recv_buffer), 0, addr_p, &addr_l); if (len < 0 || addr.nl_pid) { syslog(LOG_ERR, "recvfrom failed; pid:%u error:%d %s", addr.nl_pid, errno, strerror(errno)); close(fd); return -1; } incoming_msg = (struct nlmsghdr *)kvp_recv_buffer; incoming_cn_msg = (struct cn_msg *)NLMSG_DATA(incoming_msg); hv_msg = (struct hv_kvp_msg *)incoming_cn_msg->data; switch (hv_msg->kvp_hdr.operation) { case KVP_OP_REGISTER: /* * Driver is registering with us; stash away the version * information. */ p = (char *)hv_msg->body.kvp_register.version; lic_version = malloc(strlen(p) + 1); if (lic_version) { strcpy(lic_version, p); syslog(LOG_INFO, "KVP LIC Version: %s", lic_version); } else { syslog(LOG_ERR, "malloc failed"); } continue; /* * The current protocol with the kernel component uses a * NULL key name to pass an error condition. * For the SET, GET and DELETE operations, * use the existing protocol to pass back error. */ case KVP_OP_SET: if (kvp_key_add_or_modify(hv_msg->kvp_hdr.pool, hv_msg->body.kvp_set.data.key, hv_msg->body.kvp_set.data.key_size, hv_msg->body.kvp_set.data.value, hv_msg->body.kvp_set.data.value_size)) strcpy(hv_msg->body.kvp_set.data.key, ""); break; case KVP_OP_GET: if (kvp_get_value(hv_msg->kvp_hdr.pool, hv_msg->body.kvp_set.data.key, hv_msg->body.kvp_set.data.key_size, hv_msg->body.kvp_set.data.value, hv_msg->body.kvp_set.data.value_size)) strcpy(hv_msg->body.kvp_set.data.key, ""); break; case KVP_OP_DELETE: if (kvp_key_delete(hv_msg->kvp_hdr.pool, hv_msg->body.kvp_delete.key, hv_msg->body.kvp_delete.key_size)) strcpy(hv_msg->body.kvp_delete.key, ""); break; default: break; } if (hv_msg->kvp_hdr.operation != KVP_OP_ENUMERATE) goto kvp_done; /* * If the pool is KVP_POOL_AUTO, dynamically generate * both the key and the value; if not read from the * appropriate pool. */ if (hv_msg->kvp_hdr.pool != KVP_POOL_AUTO) { kvp_pool_enumerate(hv_msg->kvp_hdr.pool, hv_msg->body.kvp_enum_data.index, hv_msg->body.kvp_enum_data.data.key, HV_KVP_EXCHANGE_MAX_KEY_SIZE, hv_msg->body.kvp_enum_data.data.value, HV_KVP_EXCHANGE_MAX_VALUE_SIZE); goto kvp_done; } hv_msg = (struct hv_kvp_msg *)incoming_cn_msg->data; key_name = (char *)hv_msg->body.kvp_enum_data.data.key; key_value = (char *)hv_msg->body.kvp_enum_data.data.value; switch (hv_msg->body.kvp_enum_data.index) { case FullyQualifiedDomainName: kvp_get_domain_name(key_value, HV_KVP_EXCHANGE_MAX_VALUE_SIZE); strcpy(key_name, "FullyQualifiedDomainName"); break; case IntegrationServicesVersion: strcpy(key_name, "IntegrationServicesVersion"); strcpy(key_value, lic_version); break; case NetworkAddressIPv4: kvp_get_ip_address(AF_INET, key_value, HV_KVP_EXCHANGE_MAX_VALUE_SIZE); strcpy(key_name, "NetworkAddressIPv4"); break; case NetworkAddressIPv6: kvp_get_ip_address(AF_INET6, key_value, HV_KVP_EXCHANGE_MAX_VALUE_SIZE); strcpy(key_name, "NetworkAddressIPv6"); break; case OSBuildNumber: strcpy(key_value, os_build); strcpy(key_name, "OSBuildNumber"); break; case OSName: strcpy(key_value, os_name); strcpy(key_name, "OSName"); break; case OSMajorVersion: strcpy(key_value, os_major); strcpy(key_name, "OSMajorVersion"); break; case OSMinorVersion: strcpy(key_value, os_minor); strcpy(key_name, "OSMinorVersion"); break; case OSVersion: strcpy(key_value, os_build); strcpy(key_name, "OSVersion"); break; case ProcessorArchitecture: strcpy(key_value, processor_arch); strcpy(key_name, "ProcessorArchitecture"); break; default: strcpy(key_value, "Unknown Key"); /* * We use a null key name to terminate enumeration. */ strcpy(key_name, ""); break; } /* * Send the value back to the kernel. The response is * already in the receive buffer. Update the cn_msg header to * reflect the key value that has been added to the message */ kvp_done: incoming_cn_msg->id.idx = CN_KVP_IDX; incoming_cn_msg->id.val = CN_KVP_VAL; incoming_cn_msg->ack = 0; incoming_cn_msg->len = sizeof(struct hv_kvp_msg); len = netlink_send(fd, incoming_cn_msg); if (len < 0) { syslog(LOG_ERR, "net_link send failed; error:%d", len); exit(-1); } } }
./CrossVul/dataset_final_sorted/CWE-20/c/good_3661_0
crossvul-cpp_data_bad_2753_0
#include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/export.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/if_vlan.h> #include <net/ip.h> #include <net/ipv6.h> #include <linux/igmp.h> #include <linux/icmp.h> #include <linux/sctp.h> #include <linux/dccp.h> #include <linux/if_tunnel.h> #include <linux/if_pppox.h> #include <linux/ppp_defs.h> #include <linux/stddef.h> #include <linux/if_ether.h> #include <linux/mpls.h> #include <net/flow_dissector.h> #include <scsi/fc/fc_fcoe.h> static bool skb_flow_dissector_uses_key(struct flow_dissector *flow_dissector, enum flow_dissector_key_id key_id) { return flow_dissector->used_keys & (1 << key_id); } static void skb_flow_dissector_set_key(struct flow_dissector *flow_dissector, enum flow_dissector_key_id key_id) { flow_dissector->used_keys |= (1 << key_id); } static void *skb_flow_dissector_target(struct flow_dissector *flow_dissector, enum flow_dissector_key_id key_id, void *target_container) { return ((char *) target_container) + flow_dissector->offset[key_id]; } void skb_flow_dissector_init(struct flow_dissector *flow_dissector, const struct flow_dissector_key *key, unsigned int key_count) { unsigned int i; memset(flow_dissector, 0, sizeof(*flow_dissector)); for (i = 0; i < key_count; i++, key++) { /* User should make sure that every key target offset is withing * boundaries of unsigned short. */ BUG_ON(key->offset > USHRT_MAX); BUG_ON(skb_flow_dissector_uses_key(flow_dissector, key->key_id)); skb_flow_dissector_set_key(flow_dissector, key->key_id); flow_dissector->offset[key->key_id] = key->offset; } /* Ensure that the dissector always includes control and basic key. * That way we are able to avoid handling lack of these in fast path. */ BUG_ON(!skb_flow_dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_CONTROL)); BUG_ON(!skb_flow_dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_BASIC)); } EXPORT_SYMBOL(skb_flow_dissector_init); /** * __skb_flow_get_ports - extract the upper layer ports and return them * @skb: sk_buff to extract the ports from * @thoff: transport header offset * @ip_proto: protocol for which to get port offset * @data: raw buffer pointer to the packet, if NULL use skb->data * @hlen: packet header length, if @data is NULL use skb_headlen(skb) * * The function will try to retrieve the ports at offset thoff + poff where poff * is the protocol port offset returned from proto_ports_offset */ __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto, void *data, int hlen) { int poff = proto_ports_offset(ip_proto); if (!data) { data = skb->data; hlen = skb_headlen(skb); } if (poff >= 0) { __be32 *ports, _ports; ports = __skb_header_pointer(skb, thoff + poff, sizeof(_ports), data, hlen, &_ports); if (ports) return *ports; } return 0; } EXPORT_SYMBOL(__skb_flow_get_ports); /** * __skb_flow_dissect - extract the flow_keys struct and return it * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified * @flow_dissector: list of keys to dissect * @target_container: target structure to put dissected values into * @data: raw buffer pointer to the packet, if NULL use skb->data * @proto: protocol for which to get the flow, if @data is NULL use skb->protocol * @nhoff: network header offset, if @data is NULL use skb_network_offset(skb) * @hlen: packet header length, if @data is NULL use skb_headlen(skb) * * The function will try to retrieve individual keys into target specified * by flow_dissector from either the skbuff or a raw buffer specified by the * rest parameters. * * Caller must take care of zeroing target container memory. */ bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_dissector *flow_dissector, void *target_container, void *data, __be16 proto, int nhoff, int hlen) { struct flow_dissector_key_control *key_control; struct flow_dissector_key_basic *key_basic; struct flow_dissector_key_addrs *key_addrs; struct flow_dissector_key_ports *key_ports; struct flow_dissector_key_tags *key_tags; struct flow_dissector_key_keyid *key_keyid; u8 ip_proto = 0; if (!data) { data = skb->data; proto = skb->protocol; nhoff = skb_network_offset(skb); hlen = skb_headlen(skb); } /* It is ensured by skb_flow_dissector_init() that control key will * be always present. */ key_control = skb_flow_dissector_target(flow_dissector, FLOW_DISSECTOR_KEY_CONTROL, target_container); /* It is ensured by skb_flow_dissector_init() that basic key will * be always present. */ key_basic = skb_flow_dissector_target(flow_dissector, FLOW_DISSECTOR_KEY_BASIC, target_container); if (skb_flow_dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { struct ethhdr *eth = eth_hdr(skb); struct flow_dissector_key_eth_addrs *key_eth_addrs; key_eth_addrs = skb_flow_dissector_target(flow_dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS, target_container); memcpy(key_eth_addrs, &eth->h_dest, sizeof(*key_eth_addrs)); } again: switch (proto) { case htons(ETH_P_IP): { const struct iphdr *iph; struct iphdr _iph; ip: iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph); if (!iph || iph->ihl < 5) return false; nhoff += iph->ihl * 4; ip_proto = iph->protocol; if (ip_is_fragment(iph)) ip_proto = 0; if (!skb_flow_dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) break; key_addrs = skb_flow_dissector_target(flow_dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS, target_container); memcpy(&key_addrs->v4addrs, &iph->saddr, sizeof(key_addrs->v4addrs)); key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; break; } case htons(ETH_P_IPV6): { const struct ipv6hdr *iph; struct ipv6hdr _iph; __be32 flow_label; ipv6: iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph); if (!iph) return false; ip_proto = iph->nexthdr; nhoff += sizeof(struct ipv6hdr); if (skb_flow_dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { struct flow_dissector_key_ipv6_addrs *key_ipv6_addrs; key_ipv6_addrs = skb_flow_dissector_target(flow_dissector, FLOW_DISSECTOR_KEY_IPV6_ADDRS, target_container); memcpy(key_ipv6_addrs, &iph->saddr, sizeof(*key_ipv6_addrs)); key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; } flow_label = ip6_flowlabel(iph); if (flow_label) { if (skb_flow_dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_FLOW_LABEL)) { key_tags = skb_flow_dissector_target(flow_dissector, FLOW_DISSECTOR_KEY_FLOW_LABEL, target_container); key_tags->flow_label = ntohl(flow_label); } } break; } case htons(ETH_P_8021AD): case htons(ETH_P_8021Q): { const struct vlan_hdr *vlan; struct vlan_hdr _vlan; vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan), data, hlen, &_vlan); if (!vlan) return false; if (skb_flow_dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_VLANID)) { key_tags = skb_flow_dissector_target(flow_dissector, FLOW_DISSECTOR_KEY_VLANID, target_container); key_tags->vlan_id = skb_vlan_tag_get_id(skb); } proto = vlan->h_vlan_encapsulated_proto; nhoff += sizeof(*vlan); goto again; } case htons(ETH_P_PPP_SES): { struct { struct pppoe_hdr hdr; __be16 proto; } *hdr, _hdr; hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr); if (!hdr) return false; proto = hdr->proto; nhoff += PPPOE_SES_HLEN; switch (proto) { case htons(PPP_IP): goto ip; case htons(PPP_IPV6): goto ipv6; default: return false; } } case htons(ETH_P_TIPC): { struct { __be32 pre[3]; __be32 srcnode; } *hdr, _hdr; hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr); if (!hdr) return false; key_basic->n_proto = proto; key_control->thoff = (u16)nhoff; if (skb_flow_dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_TIPC_ADDRS)) { key_addrs = skb_flow_dissector_target(flow_dissector, FLOW_DISSECTOR_KEY_TIPC_ADDRS, target_container); key_addrs->tipcaddrs.srcnode = hdr->srcnode; key_control->addr_type = FLOW_DISSECTOR_KEY_TIPC_ADDRS; } return true; } case htons(ETH_P_MPLS_UC): case htons(ETH_P_MPLS_MC): { struct mpls_label *hdr, _hdr[2]; mpls: hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr); if (!hdr) return false; if ((ntohl(hdr[0].entry) & MPLS_LS_LABEL_MASK) >> MPLS_LS_LABEL_SHIFT == MPLS_LABEL_ENTROPY) { if (skb_flow_dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS_ENTROPY)) { key_keyid = skb_flow_dissector_target(flow_dissector, FLOW_DISSECTOR_KEY_MPLS_ENTROPY, target_container); key_keyid->keyid = hdr[1].entry & htonl(MPLS_LS_LABEL_MASK); } key_basic->n_proto = proto; key_basic->ip_proto = ip_proto; key_control->thoff = (u16)nhoff; return true; } return true; } case htons(ETH_P_FCOE): key_control->thoff = (u16)(nhoff + FCOE_HEADER_LEN); /* fall through */ default: return false; } ip_proto_again: switch (ip_proto) { case IPPROTO_GRE: { struct gre_hdr { __be16 flags; __be16 proto; } *hdr, _hdr; hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr); if (!hdr) return false; /* * Only look inside GRE if version zero and no * routing */ if (hdr->flags & (GRE_VERSION | GRE_ROUTING)) break; proto = hdr->proto; nhoff += 4; if (hdr->flags & GRE_CSUM) nhoff += 4; if (hdr->flags & GRE_KEY) { const __be32 *keyid; __be32 _keyid; keyid = __skb_header_pointer(skb, nhoff, sizeof(_keyid), data, hlen, &_keyid); if (!keyid) return false; if (skb_flow_dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_GRE_KEYID)) { key_keyid = skb_flow_dissector_target(flow_dissector, FLOW_DISSECTOR_KEY_GRE_KEYID, target_container); key_keyid->keyid = *keyid; } nhoff += 4; } if (hdr->flags & GRE_SEQ) nhoff += 4; if (proto == htons(ETH_P_TEB)) { const struct ethhdr *eth; struct ethhdr _eth; eth = __skb_header_pointer(skb, nhoff, sizeof(_eth), data, hlen, &_eth); if (!eth) return false; proto = eth->h_proto; nhoff += sizeof(*eth); } goto again; } case NEXTHDR_HOP: case NEXTHDR_ROUTING: case NEXTHDR_DEST: { u8 _opthdr[2], *opthdr; if (proto != htons(ETH_P_IPV6)) break; opthdr = __skb_header_pointer(skb, nhoff, sizeof(_opthdr), data, hlen, &_opthdr); if (!opthdr) return false; ip_proto = opthdr[0]; nhoff += (opthdr[1] + 1) << 3; goto ip_proto_again; } case IPPROTO_IPIP: proto = htons(ETH_P_IP); goto ip; case IPPROTO_IPV6: proto = htons(ETH_P_IPV6); goto ipv6; case IPPROTO_MPLS: proto = htons(ETH_P_MPLS_UC); goto mpls; default: break; } key_basic->n_proto = proto; key_basic->ip_proto = ip_proto; key_control->thoff = (u16)nhoff; if (skb_flow_dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS)) { key_ports = skb_flow_dissector_target(flow_dissector, FLOW_DISSECTOR_KEY_PORTS, target_container); key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto, data, hlen); } return true; } EXPORT_SYMBOL(__skb_flow_dissect); static u32 hashrnd __read_mostly; static __always_inline void __flow_hash_secret_init(void) { net_get_random_once(&hashrnd, sizeof(hashrnd)); } static __always_inline u32 __flow_hash_words(u32 *words, u32 length, u32 keyval) { return jhash2(words, length, keyval); } static inline void *flow_keys_hash_start(struct flow_keys *flow) { BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % sizeof(u32)); return (void *)flow + FLOW_KEYS_HASH_OFFSET; } static inline size_t flow_keys_hash_length(struct flow_keys *flow) { size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs); BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32)); BUILD_BUG_ON(offsetof(typeof(*flow), addrs) != sizeof(*flow) - sizeof(flow->addrs)); switch (flow->control.addr_type) { case FLOW_DISSECTOR_KEY_IPV4_ADDRS: diff -= sizeof(flow->addrs.v4addrs); break; case FLOW_DISSECTOR_KEY_IPV6_ADDRS: diff -= sizeof(flow->addrs.v6addrs); break; case FLOW_DISSECTOR_KEY_TIPC_ADDRS: diff -= sizeof(flow->addrs.tipcaddrs); break; } return (sizeof(*flow) - diff) / sizeof(u32); } __be32 flow_get_u32_src(const struct flow_keys *flow) { switch (flow->control.addr_type) { case FLOW_DISSECTOR_KEY_IPV4_ADDRS: return flow->addrs.v4addrs.src; case FLOW_DISSECTOR_KEY_IPV6_ADDRS: return (__force __be32)ipv6_addr_hash( &flow->addrs.v6addrs.src); case FLOW_DISSECTOR_KEY_TIPC_ADDRS: return flow->addrs.tipcaddrs.srcnode; default: return 0; } } EXPORT_SYMBOL(flow_get_u32_src); __be32 flow_get_u32_dst(const struct flow_keys *flow) { switch (flow->control.addr_type) { case FLOW_DISSECTOR_KEY_IPV4_ADDRS: return flow->addrs.v4addrs.dst; case FLOW_DISSECTOR_KEY_IPV6_ADDRS: return (__force __be32)ipv6_addr_hash( &flow->addrs.v6addrs.dst); default: return 0; } } EXPORT_SYMBOL(flow_get_u32_dst); static inline void __flow_hash_consistentify(struct flow_keys *keys) { int addr_diff, i; switch (keys->control.addr_type) { case FLOW_DISSECTOR_KEY_IPV4_ADDRS: addr_diff = (__force u32)keys->addrs.v4addrs.dst - (__force u32)keys->addrs.v4addrs.src; if ((addr_diff < 0) || (addr_diff == 0 && ((__force u16)keys->ports.dst < (__force u16)keys->ports.src))) { swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst); swap(keys->ports.src, keys->ports.dst); } break; case FLOW_DISSECTOR_KEY_IPV6_ADDRS: addr_diff = memcmp(&keys->addrs.v6addrs.dst, &keys->addrs.v6addrs.src, sizeof(keys->addrs.v6addrs.dst)); if ((addr_diff < 0) || (addr_diff == 0 && ((__force u16)keys->ports.dst < (__force u16)keys->ports.src))) { for (i = 0; i < 4; i++) swap(keys->addrs.v6addrs.src.s6_addr32[i], keys->addrs.v6addrs.dst.s6_addr32[i]); swap(keys->ports.src, keys->ports.dst); } break; } } static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval) { u32 hash; __flow_hash_consistentify(keys); hash = __flow_hash_words((u32 *)flow_keys_hash_start(keys), flow_keys_hash_length(keys), keyval); if (!hash) hash = 1; return hash; } u32 flow_hash_from_keys(struct flow_keys *keys) { __flow_hash_secret_init(); return __flow_hash_from_keys(keys, hashrnd); } EXPORT_SYMBOL(flow_hash_from_keys); static inline u32 ___skb_get_hash(const struct sk_buff *skb, struct flow_keys *keys, u32 keyval) { if (!skb_flow_dissect_flow_keys(skb, keys)) return 0; return __flow_hash_from_keys(keys, keyval); } struct _flow_keys_digest_data { __be16 n_proto; u8 ip_proto; u8 padding; __be32 ports; __be32 src; __be32 dst; }; void make_flow_keys_digest(struct flow_keys_digest *digest, const struct flow_keys *flow) { struct _flow_keys_digest_data *data = (struct _flow_keys_digest_data *)digest; BUILD_BUG_ON(sizeof(*data) > sizeof(*digest)); memset(digest, 0, sizeof(*digest)); data->n_proto = flow->basic.n_proto; data->ip_proto = flow->basic.ip_proto; data->ports = flow->ports.ports; data->src = flow->addrs.v4addrs.src; data->dst = flow->addrs.v4addrs.dst; } EXPORT_SYMBOL(make_flow_keys_digest); /** * __skb_get_hash: calculate a flow hash * @skb: sk_buff to calculate flow hash from * * This function calculates a flow hash based on src/dst addresses * and src/dst port numbers. Sets hash in skb to non-zero hash value * on success, zero indicates no valid hash. Also, sets l4_hash in skb * if hash is a canonical 4-tuple hash over transport ports. */ void __skb_get_hash(struct sk_buff *skb) { struct flow_keys keys; u32 hash; __flow_hash_secret_init(); hash = ___skb_get_hash(skb, &keys, hashrnd); if (!hash) return; __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys)); } EXPORT_SYMBOL(__skb_get_hash); __u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb) { struct flow_keys keys; return ___skb_get_hash(skb, &keys, perturb); } EXPORT_SYMBOL(skb_get_hash_perturb); __u32 __skb_get_hash_flowi6(struct sk_buff *skb, struct flowi6 *fl6) { struct flow_keys keys; memset(&keys, 0, sizeof(keys)); memcpy(&keys.addrs.v6addrs.src, &fl6->saddr, sizeof(keys.addrs.v6addrs.src)); memcpy(&keys.addrs.v6addrs.dst, &fl6->daddr, sizeof(keys.addrs.v6addrs.dst)); keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; keys.ports.src = fl6->fl6_sport; keys.ports.dst = fl6->fl6_dport; keys.keyid.keyid = fl6->fl6_gre_key; keys.tags.flow_label = (__force u32)fl6->flowlabel; keys.basic.ip_proto = fl6->flowi6_proto; __skb_set_sw_hash(skb, flow_hash_from_keys(&keys), flow_keys_have_l4(&keys)); return skb->hash; } EXPORT_SYMBOL(__skb_get_hash_flowi6); __u32 __skb_get_hash_flowi4(struct sk_buff *skb, struct flowi4 *fl4) { struct flow_keys keys; memset(&keys, 0, sizeof(keys)); keys.addrs.v4addrs.src = fl4->saddr; keys.addrs.v4addrs.dst = fl4->daddr; keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; keys.ports.src = fl4->fl4_sport; keys.ports.dst = fl4->fl4_dport; keys.keyid.keyid = fl4->fl4_gre_key; keys.basic.ip_proto = fl4->flowi4_proto; __skb_set_sw_hash(skb, flow_hash_from_keys(&keys), flow_keys_have_l4(&keys)); return skb->hash; } EXPORT_SYMBOL(__skb_get_hash_flowi4); u32 __skb_get_poff(const struct sk_buff *skb, void *data, const struct flow_keys *keys, int hlen) { u32 poff = keys->control.thoff; switch (keys->basic.ip_proto) { case IPPROTO_TCP: { /* access doff as u8 to avoid unaligned access */ const u8 *doff; u8 _doff; doff = __skb_header_pointer(skb, poff + 12, sizeof(_doff), data, hlen, &_doff); if (!doff) return poff; poff += max_t(u32, sizeof(struct tcphdr), (*doff & 0xF0) >> 2); break; } case IPPROTO_UDP: case IPPROTO_UDPLITE: poff += sizeof(struct udphdr); break; /* For the rest, we do not really care about header * extensions at this point for now. */ case IPPROTO_ICMP: poff += sizeof(struct icmphdr); break; case IPPROTO_ICMPV6: poff += sizeof(struct icmp6hdr); break; case IPPROTO_IGMP: poff += sizeof(struct igmphdr); break; case IPPROTO_DCCP: poff += sizeof(struct dccp_hdr); break; case IPPROTO_SCTP: poff += sizeof(struct sctphdr); break; } return poff; } /** * skb_get_poff - get the offset to the payload * @skb: sk_buff to get the payload offset from * * The function will get the offset to the payload as far as it could * be dissected. The main user is currently BPF, so that we can dynamically * truncate packets without needing to push actual payload to the user * space and can analyze headers only, instead. */ u32 skb_get_poff(const struct sk_buff *skb) { struct flow_keys keys; if (!skb_flow_dissect_flow_keys(skb, &keys)) return 0; return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb)); } static const struct flow_dissector_key flow_keys_dissector_keys[] = { { .key_id = FLOW_DISSECTOR_KEY_CONTROL, .offset = offsetof(struct flow_keys, control), }, { .key_id = FLOW_DISSECTOR_KEY_BASIC, .offset = offsetof(struct flow_keys, basic), }, { .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS, .offset = offsetof(struct flow_keys, addrs.v4addrs), }, { .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS, .offset = offsetof(struct flow_keys, addrs.v6addrs), }, { .key_id = FLOW_DISSECTOR_KEY_TIPC_ADDRS, .offset = offsetof(struct flow_keys, addrs.tipcaddrs), }, { .key_id = FLOW_DISSECTOR_KEY_PORTS, .offset = offsetof(struct flow_keys, ports), }, { .key_id = FLOW_DISSECTOR_KEY_VLANID, .offset = offsetof(struct flow_keys, tags), }, { .key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL, .offset = offsetof(struct flow_keys, tags), }, { .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID, .offset = offsetof(struct flow_keys, keyid), }, }; static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = { { .key_id = FLOW_DISSECTOR_KEY_CONTROL, .offset = offsetof(struct flow_keys, control), }, { .key_id = FLOW_DISSECTOR_KEY_BASIC, .offset = offsetof(struct flow_keys, basic), }, }; struct flow_dissector flow_keys_dissector __read_mostly; EXPORT_SYMBOL(flow_keys_dissector); struct flow_dissector flow_keys_buf_dissector __read_mostly; static int __init init_default_flow_dissectors(void) { skb_flow_dissector_init(&flow_keys_dissector, flow_keys_dissector_keys, ARRAY_SIZE(flow_keys_dissector_keys)); skb_flow_dissector_init(&flow_keys_buf_dissector, flow_keys_buf_dissector_keys, ARRAY_SIZE(flow_keys_buf_dissector_keys)); return 0; } late_initcall_sync(init_default_flow_dissectors);
./CrossVul/dataset_final_sorted/CWE-20/c/bad_2753_0
crossvul-cpp_data_good_1221_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS V V GGGG % % SS V V G % % SSS V V G GG % % SS V V G G % % SSSSS V GGG % % % % % % Read/Write Scalable Vector Graphics Format % % % % Software Design % % Cristy % % William Radcliffe % % March 2000 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/constitute.h" #include "MagickCore/composite-private.h" #include "MagickCore/delegate.h" #include "MagickCore/delegate-private.h" #include "MagickCore/draw.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/module.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #if defined(MAGICKCORE_XML_DELEGATE) # if defined(MAGICKCORE_WINDOWS_SUPPORT) # if !defined(__MINGW32__) # include <win32config.h> # endif # endif # include <libxml/xmlmemory.h> # include <libxml/parserInternals.h> # include <libxml/xmlerror.h> #endif #if defined(MAGICKCORE_AUTOTRACE_DELEGATE) #include "autotrace/autotrace.h" #endif #if defined(MAGICKCORE_RSVG_DELEGATE) #include "librsvg/rsvg.h" #if !defined(LIBRSVG_CHECK_VERSION) #include "librsvg/rsvg-cairo.h" #include "librsvg/librsvg-features.h" #elif !LIBRSVG_CHECK_VERSION(2,36,2) #include "librsvg/rsvg-cairo.h" #include "librsvg/librsvg-features.h" #endif #endif /* Define declarations. */ #define DefaultSVGDensity 96.0 /* Typedef declarations. */ typedef struct _BoundingBox { double x, y, width, height; } BoundingBox; typedef struct _ElementInfo { double cx, cy, major, minor, angle; } ElementInfo; typedef struct _SVGInfo { FILE *file; ExceptionInfo *exception; Image *image; const ImageInfo *image_info; AffineMatrix affine; size_t width, height; char *size, *title, *comment; int n; double *scale, pointsize; ElementInfo element; SegmentInfo segment; BoundingBox bounds, text_offset, view_box; PointInfo radius; char *stop_color, *offset, *text, *vertices, *url; #if defined(MAGICKCORE_XML_DELEGATE) xmlParserCtxtPtr parser; xmlDocPtr document; #endif ssize_t svgDepth; } SVGInfo; /* Static declarations. */ static char SVGDensityGeometry[] = "96.0x96.0"; /* Forward declarations. */ static MagickBooleanType WriteSVGImage(const ImageInfo *,Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s S V G % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsSVG()() returns MagickTrue if the image format type, identified by the % magick string, is SVG. % % The format of the IsSVG method is: % % MagickBooleanType IsSVG(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsSVG(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((const char *) magick+1,"svg",3) == 0) return(MagickTrue); if (length < 5) return(MagickFalse); if (LocaleNCompare((const char *) magick+1,"?xml",4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d S V G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadSVGImage() reads a Scalable Vector Gaphics file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadSVGImage method is: % % Image *ReadSVGImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static Image *RenderSVGImage(const ImageInfo *image_info,Image *image, ExceptionInfo *exception) { char background[MagickPathExtent], command[MagickPathExtent], *density, input_filename[MagickPathExtent], opacity[MagickPathExtent], output_filename[MagickPathExtent], unique[MagickPathExtent]; const DelegateInfo *delegate_info; Image *next; int status; struct stat attributes; /* Our best hope for compliance with the SVG standard. */ delegate_info=GetDelegateInfo("svg:decode",(char *) NULL,exception); if (delegate_info == (const DelegateInfo *) NULL) return((Image *) NULL); status=AcquireUniqueSymbolicLink(image->filename,input_filename); (void) AcquireUniqueFilename(output_filename); (void) AcquireUniqueFilename(unique); density=AcquireString(""); (void) FormatLocaleString(density,MagickPathExtent,"%.20g,%.20g", image->resolution.x,image->resolution.y); (void) FormatLocaleString(background,MagickPathExtent, "rgb(%.20g%%,%.20g%%,%.20g%%)", 100.0*QuantumScale*image->background_color.red, 100.0*QuantumScale*image->background_color.green, 100.0*QuantumScale*image->background_color.blue); (void) FormatLocaleString(opacity,MagickPathExtent,"%.20g",QuantumScale* image->background_color.alpha); (void) FormatLocaleString(command,MagickPathExtent, GetDelegateCommands(delegate_info),input_filename,output_filename,density, background,opacity,unique); density=DestroyString(density); status=ExternalDelegateCommand(MagickFalse,image_info->verbose,command, (char *) NULL,exception); (void) RelinquishUniqueFileResource(unique); (void) RelinquishUniqueFileResource(input_filename); if ((status == 0) && (stat(output_filename,&attributes) == 0) && (attributes.st_size > 0)) { Image *svg_image; ImageInfo *read_info; read_info=CloneImageInfo(image_info); (void) CopyMagickString(read_info->filename,output_filename, MagickPathExtent); svg_image=ReadImage(read_info,exception); read_info=DestroyImageInfo(read_info); if (svg_image != (Image *) NULL) { (void) RelinquishUniqueFileResource(output_filename); for (next=GetFirstImageInList(svg_image); next != (Image *) NULL; ) { (void) CopyMagickString(next->filename,image->filename, MaxTextExtent); (void) CopyMagickString(next->magick,image->magick,MaxTextExtent); next=GetNextImageInList(next); } return(svg_image); } } (void) RelinquishUniqueFileResource(output_filename); return((Image *) NULL); } #if defined(MAGICKCORE_XML_DELEGATE) static SVGInfo *AcquireSVGInfo(void) { SVGInfo *svg_info; svg_info=(SVGInfo *) AcquireMagickMemory(sizeof(*svg_info)); if (svg_info == (SVGInfo *) NULL) return((SVGInfo *) NULL); (void) memset(svg_info,0,sizeof(*svg_info)); svg_info->text=AcquireString(""); svg_info->scale=(double *) AcquireCriticalMemory(sizeof(*svg_info->scale)); GetAffineMatrix(&svg_info->affine); svg_info->scale[0]=ExpandAffine(&svg_info->affine); return(svg_info); } static SVGInfo *DestroySVGInfo(SVGInfo *svg_info) { if (svg_info->text != (char *) NULL) svg_info->text=DestroyString(svg_info->text); if (svg_info->scale != (double *) NULL) svg_info->scale=(double *) RelinquishMagickMemory(svg_info->scale); if (svg_info->title != (char *) NULL) svg_info->title=DestroyString(svg_info->title); if (svg_info->comment != (char *) NULL) svg_info->comment=DestroyString(svg_info->comment); return((SVGInfo *) RelinquishMagickMemory(svg_info)); } static double GetUserSpaceCoordinateValue(const SVGInfo *svg_info,int type, const char *string) { char *next_token, token[MagickPathExtent]; const char *p; double value; (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",string); assert(string != (const char *) NULL); p=(const char *) string; (void) GetNextToken(p,&p,MagickPathExtent,token); value=StringToDouble(token,&next_token); if (strchr(token,'%') != (char *) NULL) { double alpha, beta; if (type > 0) { if (svg_info->view_box.width == 0.0) return(0.0); return(svg_info->view_box.width*value/100.0); } if (type < 0) { if (svg_info->view_box.height == 0.0) return(0.0); return(svg_info->view_box.height*value/100.0); } alpha=value-svg_info->view_box.width; beta=value-svg_info->view_box.height; return(hypot(alpha,beta)/sqrt(2.0)/100.0); } (void) GetNextToken(p,&p,MagickPathExtent,token); if (LocaleNCompare(token,"cm",2) == 0) return(DefaultSVGDensity*svg_info->scale[0]/2.54*value); if (LocaleNCompare(token,"em",2) == 0) return(svg_info->pointsize*value); if (LocaleNCompare(token,"ex",2) == 0) return(svg_info->pointsize*value/2.0); if (LocaleNCompare(token,"in",2) == 0) return(DefaultSVGDensity*svg_info->scale[0]*value); if (LocaleNCompare(token,"mm",2) == 0) return(DefaultSVGDensity*svg_info->scale[0]/25.4*value); if (LocaleNCompare(token,"pc",2) == 0) return(DefaultSVGDensity*svg_info->scale[0]/6.0*value); if (LocaleNCompare(token,"pt",2) == 0) return(svg_info->scale[0]*value); if (LocaleNCompare(token,"px",2) == 0) return(value); return(value); } #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int SVGIsStandalone(void *context) { SVGInfo *svg_info; /* Is this document tagged standalone? */ (void) LogMagickEvent(CoderEvent,GetMagickModule()," SAX.SVGIsStandalone()"); svg_info=(SVGInfo *) context; return(svg_info->document->standalone == 1); } static int SVGHasInternalSubset(void *context) { SVGInfo *svg_info; /* Does this document has an internal subset? */ (void) LogMagickEvent(CoderEvent,GetMagickModule(), " SAX.SVGHasInternalSubset()"); svg_info=(SVGInfo *) context; return(svg_info->document->intSubset != NULL); } static int SVGHasExternalSubset(void *context) { SVGInfo *svg_info; /* Does this document has an external subset? */ (void) LogMagickEvent(CoderEvent,GetMagickModule(), " SAX.SVGHasExternalSubset()"); svg_info=(SVGInfo *) context; return(svg_info->document->extSubset != NULL); } static void SVGInternalSubset(void *context,const xmlChar *name, const xmlChar *external_id,const xmlChar *system_id) { SVGInfo *svg_info; /* Does this document has an internal subset? */ (void) LogMagickEvent(CoderEvent,GetMagickModule(), " SAX.internalSubset(%s, %s, %s)",(const char *) name, (external_id != (const xmlChar *) NULL ? (const char *) external_id : "none"), (system_id != (const xmlChar *) NULL ? (const char *) system_id : "none")); svg_info=(SVGInfo *) context; (void) xmlCreateIntSubset(svg_info->document,name,external_id,system_id); } static xmlParserInputPtr SVGResolveEntity(void *context, const xmlChar *public_id,const xmlChar *system_id) { SVGInfo *svg_info; xmlParserInputPtr stream; /* Special entity resolver, better left to the parser, it has more context than the application layer. The default behaviour is to not resolve the entities, in that case the ENTITY_REF nodes are built in the structure (and the parameter values). */ (void) LogMagickEvent(CoderEvent,GetMagickModule(), " SAX.resolveEntity(%s, %s)", (public_id != (const xmlChar *) NULL ? (const char *) public_id : "none"), (system_id != (const xmlChar *) NULL ? (const char *) system_id : "none")); svg_info=(SVGInfo *) context; stream=xmlLoadExternalEntity((const char *) system_id,(const char *) public_id,svg_info->parser); return(stream); } static xmlEntityPtr SVGGetEntity(void *context,const xmlChar *name) { SVGInfo *svg_info; /* Get an entity by name. */ (void) LogMagickEvent(CoderEvent,GetMagickModule()," SAX.SVGGetEntity(%s)", name); svg_info=(SVGInfo *) context; return(xmlGetDocEntity(svg_info->document,name)); } static xmlEntityPtr SVGGetParameterEntity(void *context,const xmlChar *name) { SVGInfo *svg_info; /* Get a parameter entity by name. */ (void) LogMagickEvent(CoderEvent,GetMagickModule(), " SAX.getParameterEntity(%s)",name); svg_info=(SVGInfo *) context; return(xmlGetParameterEntity(svg_info->document,name)); } static void SVGEntityDeclaration(void *context,const xmlChar *name,int type, const xmlChar *public_id,const xmlChar *system_id,xmlChar *content) { SVGInfo *svg_info; /* An entity definition has been parsed. */ (void) LogMagickEvent(CoderEvent,GetMagickModule(), " SAX.entityDecl(%s, %d, %s, %s, %s)",name,type, public_id != (xmlChar *) NULL ? (const char *) public_id : "none", system_id != (xmlChar *) NULL ? (const char *) system_id : "none",content); svg_info=(SVGInfo *) context; if (svg_info->parser->inSubset == 1) (void) xmlAddDocEntity(svg_info->document,name,type,public_id,system_id, content); else if (svg_info->parser->inSubset == 2) (void) xmlAddDtdEntity(svg_info->document,name,type,public_id,system_id, content); } static void SVGAttributeDeclaration(void *context,const xmlChar *element, const xmlChar *name,int type,int value,const xmlChar *default_value, xmlEnumerationPtr tree) { SVGInfo *svg_info; xmlChar *fullname, *prefix; xmlParserCtxtPtr parser; /* An attribute definition has been parsed. */ (void) LogMagickEvent(CoderEvent,GetMagickModule(), " SAX.attributeDecl(%s, %s, %d, %d, %s, ...)",element,name,type,value, default_value); svg_info=(SVGInfo *) context; fullname=(xmlChar *) NULL; prefix=(xmlChar *) NULL; parser=svg_info->parser; fullname=(xmlChar *) xmlSplitQName(parser,name,&prefix); if (parser->inSubset == 1) (void) xmlAddAttributeDecl(&parser->vctxt,svg_info->document->intSubset, element,fullname,prefix,(xmlAttributeType) type, (xmlAttributeDefault) value,default_value,tree); else if (parser->inSubset == 2) (void) xmlAddAttributeDecl(&parser->vctxt,svg_info->document->extSubset, element,fullname,prefix,(xmlAttributeType) type, (xmlAttributeDefault) value,default_value,tree); if (prefix != (xmlChar *) NULL) xmlFree(prefix); if (fullname != (xmlChar *) NULL) xmlFree(fullname); } static void SVGElementDeclaration(void *context,const xmlChar *name,int type, xmlElementContentPtr content) { SVGInfo *svg_info; xmlParserCtxtPtr parser; /* An element definition has been parsed. */ (void) LogMagickEvent(CoderEvent,GetMagickModule(), " SAX.elementDecl(%s, %d, ...)",name,type); svg_info=(SVGInfo *) context; parser=svg_info->parser; if (parser->inSubset == 1) (void) xmlAddElementDecl(&parser->vctxt,svg_info->document->intSubset, name,(xmlElementTypeVal) type,content); else if (parser->inSubset == 2) (void) xmlAddElementDecl(&parser->vctxt,svg_info->document->extSubset, name,(xmlElementTypeVal) type,content); } static void SVGStripString(const MagickBooleanType trim,char *message) { register char *p, *q; size_t length; assert(message != (char *) NULL); if (*message == '\0') return; /* Remove comment. */ q=message; for (p=message; *p != '\0'; p++) { if ((*p == '/') && (*(p+1) == '*')) { for ( ; *p != '\0'; p++) if ((*p == '*') && (*(p+1) == '/')) { p+=2; break; } if (*p == '\0') break; } *q++=(*p); } *q='\0'; length=strlen(message); if ((trim != MagickFalse) && (length != 0)) { /* Remove whitespace. */ p=message; while (isspace((int) ((unsigned char) *p)) != 0) p++; if ((*p == '\'') || (*p == '"')) p++; q=message+length-1; while ((isspace((int) ((unsigned char) *q)) != 0) && (q > p)) q--; if (q > p) if ((*q == '\'') || (*q == '"')) q--; (void) memmove(message,p,(size_t) (q-p+1)); message[q-p+1]='\0'; } /* Convert newlines to a space. */ for (p=message; *p != '\0'; p++) if (*p == '\n') *p=' '; } static char **SVGKeyValuePairs(void *context,const int key_sentinel, const int value_sentinel,const char *text,size_t *number_tokens) { char **tokens; register const char *p, *q; register ssize_t i; size_t extent; SVGInfo *svg_info; svg_info=(SVGInfo *) context; *number_tokens=0; if (text == (const char *) NULL) return((char **) NULL); extent=8; tokens=(char **) AcquireQuantumMemory(extent+2UL,sizeof(*tokens)); if (tokens == (char **) NULL) { (void) ThrowMagickException(svg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",text); return((char **) NULL); } /* Convert string to an ASCII list. */ i=0; p=text; for (q=p; *q != '\0'; q++) { if ((*q != key_sentinel) && (*q != value_sentinel) && (*q != '\0')) continue; if (i == (ssize_t) extent) { extent<<=1; tokens=(char **) ResizeQuantumMemory(tokens,extent+2,sizeof(*tokens)); if (tokens == (char **) NULL) { (void) ThrowMagickException(svg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",text); return((char **) NULL); } } tokens[i]=AcquireString(p); (void) CopyMagickString(tokens[i],p,(size_t) (q-p+1)); SVGStripString(MagickTrue,tokens[i]); i++; p=q+1; } tokens[i]=AcquireString(p); (void) CopyMagickString(tokens[i],p,(size_t) (q-p+1)); SVGStripString(MagickTrue,tokens[i++]); tokens[i]=(char *) NULL; *number_tokens=(size_t) i; return(tokens); } static void SVGNotationDeclaration(void *context,const xmlChar *name, const xmlChar *public_id,const xmlChar *system_id) { SVGInfo *svg_info; xmlParserCtxtPtr parser; /* What to do when a notation declaration has been parsed. */ (void) LogMagickEvent(CoderEvent,GetMagickModule(), " SAX.notationDecl(%s, %s, %s)",name, public_id != (const xmlChar *) NULL ? (const char *) public_id : "none", system_id != (const xmlChar *) NULL ? (const char *) system_id : "none"); svg_info=(SVGInfo *) context; parser=svg_info->parser; if (parser->inSubset == 1) (void) xmlAddNotationDecl(&parser->vctxt,svg_info->document->intSubset, name,public_id,system_id); else if (parser->inSubset == 2) (void) xmlAddNotationDecl(&parser->vctxt,svg_info->document->intSubset, name,public_id,system_id); } static void SVGProcessStyleElement(void *context,const xmlChar *name, const char *style) { char background[MagickPathExtent], *color, *keyword, *units, *value; char **tokens; register ssize_t i; size_t number_tokens; SVGInfo *svg_info; (void) LogMagickEvent(CoderEvent,GetMagickModule()," "); svg_info=(SVGInfo *) context; tokens=SVGKeyValuePairs(context,':',';',style,&number_tokens); if (tokens == (char **) NULL) return; for (i=0; i < (ssize_t) (number_tokens-1); i+=2) { keyword=(char *) tokens[i]; value=(char *) tokens[i+1]; if (LocaleCompare(keyword,"font-size") != 0) continue; svg_info->pointsize=GetUserSpaceCoordinateValue(svg_info,0,value); (void) FormatLocaleFile(svg_info->file,"font-size %g\n", svg_info->pointsize); } color=AcquireString("none"); units=AcquireString("userSpaceOnUse"); for (i=0; i < (ssize_t) (number_tokens-1); i+=2) { keyword=(char *) tokens[i]; value=(char *) tokens[i+1]; (void) LogMagickEvent(CoderEvent,GetMagickModule()," %s: %s",keyword, value); switch (*keyword) { case 'B': case 'b': { if (LocaleCompare((const char *) name,"background") == 0) { if (LocaleCompare((const char *) name,"svg") == 0) (void) CopyMagickString(background,value,MagickPathExtent); break; } break; } case 'C': case 'c': { if (LocaleCompare(keyword,"clip-path") == 0) { (void) FormatLocaleFile(svg_info->file,"clip-path \"%s\"\n",value); break; } if (LocaleCompare(keyword,"clip-rule") == 0) { (void) FormatLocaleFile(svg_info->file,"clip-rule \"%s\"\n",value); break; } if (LocaleCompare(keyword,"clipPathUnits") == 0) { (void) CloneString(&units,value); (void) FormatLocaleFile(svg_info->file,"clip-units \"%s\"\n", value); break; } if (LocaleCompare(keyword,"color") == 0) { (void) CloneString(&color,value); break; } break; } case 'F': case 'f': { if (LocaleCompare(keyword,"fill") == 0) { if (LocaleCompare(value,"currentColor") == 0) { (void) FormatLocaleFile(svg_info->file,"fill \"%s\"\n",color); break; } if (LocaleCompare(value,"#000000ff") == 0) (void) FormatLocaleFile(svg_info->file,"fill '#000000'\n"); else (void) FormatLocaleFile(svg_info->file,"fill \"%s\"\n",value); break; } if (LocaleCompare(keyword,"fillcolor") == 0) { (void) FormatLocaleFile(svg_info->file,"fill \"%s\"\n",value); break; } if (LocaleCompare(keyword,"fill-rule") == 0) { (void) FormatLocaleFile(svg_info->file,"fill-rule \"%s\"\n",value); break; } if (LocaleCompare(keyword,"fill-opacity") == 0) { (void) FormatLocaleFile(svg_info->file,"fill-opacity \"%s\"\n", value); break; } if (LocaleCompare(keyword,"font") == 0) { char family[MagickPathExtent], size[MagickPathExtent], style[MagickPathExtent]; if (sscanf(value,"%2048s %2048s %2048s",style,size,family) != 3) break; if (GetUserSpaceCoordinateValue(svg_info,0,style) == 0) (void) FormatLocaleFile(svg_info->file,"font-style \"%s\"\n", style); else if (sscanf(value,"%2048s %2048s",size,family) != 2) break; (void) FormatLocaleFile(svg_info->file,"font-size \"%s\"\n",size); (void) FormatLocaleFile(svg_info->file,"font-family \"%s\"\n", family); break; } if (LocaleCompare(keyword,"font-family") == 0) { (void) FormatLocaleFile(svg_info->file,"font-family \"%s\"\n", value); break; } if (LocaleCompare(keyword,"font-stretch") == 0) { (void) FormatLocaleFile(svg_info->file,"font-stretch \"%s\"\n", value); break; } if (LocaleCompare(keyword,"font-style") == 0) { (void) FormatLocaleFile(svg_info->file,"font-style \"%s\"\n",value); break; } if (LocaleCompare(keyword,"font-size") == 0) { svg_info->pointsize=GetUserSpaceCoordinateValue(svg_info,0,value); (void) FormatLocaleFile(svg_info->file,"font-size %g\n", svg_info->pointsize); break; } if (LocaleCompare(keyword,"font-weight") == 0) { (void) FormatLocaleFile(svg_info->file,"font-weight \"%s\"\n", value); break; } break; } case 'K': case 'k': { if (LocaleCompare(keyword,"kerning") == 0) { (void) FormatLocaleFile(svg_info->file,"kerning \"%s\"\n",value); break; } break; } case 'L': case 'l': { if (LocaleCompare(keyword,"letter-spacing") == 0) { (void) FormatLocaleFile(svg_info->file,"letter-spacing \"%s\"\n", value); break; } break; } case 'M': case 'm': { if (LocaleCompare(keyword,"mask") == 0) { (void) FormatLocaleFile(svg_info->file,"mask \"%s\"\n",value); break; } break; } case 'O': case 'o': { if (LocaleCompare(keyword,"offset") == 0) { (void) FormatLocaleFile(svg_info->file,"offset %g\n", GetUserSpaceCoordinateValue(svg_info,1,value)); break; } if (LocaleCompare(keyword,"opacity") == 0) { (void) FormatLocaleFile(svg_info->file,"opacity \"%s\"\n",value); break; } break; } case 'S': case 's': { if (LocaleCompare(keyword,"stop-color") == 0) { (void) CloneString(&svg_info->stop_color,value); break; } if (LocaleCompare(keyword,"stroke") == 0) { if (LocaleCompare(value,"currentColor") == 0) { (void) FormatLocaleFile(svg_info->file,"stroke \"%s\"\n",color); break; } if (LocaleCompare(value,"#000000ff") == 0) (void) FormatLocaleFile(svg_info->file,"fill '#000000'\n"); else (void) FormatLocaleFile(svg_info->file, "stroke \"%s\"\n",value); break; } if (LocaleCompare(keyword,"stroke-antialiasing") == 0) { (void) FormatLocaleFile(svg_info->file,"stroke-antialias %d\n", LocaleCompare(value,"true") == 0); break; } if (LocaleCompare(keyword,"stroke-dasharray") == 0) { (void) FormatLocaleFile(svg_info->file,"stroke-dasharray %s\n", value); break; } if (LocaleCompare(keyword,"stroke-dashoffset") == 0) { (void) FormatLocaleFile(svg_info->file,"stroke-dashoffset %g\n", GetUserSpaceCoordinateValue(svg_info,1,value)); break; } if (LocaleCompare(keyword,"stroke-linecap") == 0) { (void) FormatLocaleFile(svg_info->file,"stroke-linecap \"%s\"\n", value); break; } if (LocaleCompare(keyword,"stroke-linejoin") == 0) { (void) FormatLocaleFile(svg_info->file,"stroke-linejoin \"%s\"\n", value); break; } if (LocaleCompare(keyword,"stroke-miterlimit") == 0) { (void) FormatLocaleFile(svg_info->file,"stroke-miterlimit \"%s\"\n", value); break; } if (LocaleCompare(keyword,"stroke-opacity") == 0) { (void) FormatLocaleFile(svg_info->file,"stroke-opacity \"%s\"\n", value); break; } if (LocaleCompare(keyword,"stroke-width") == 0) { (void) FormatLocaleFile(svg_info->file,"stroke-width %g\n", GetUserSpaceCoordinateValue(svg_info,1,value)); break; } break; } case 't': case 'T': { if (LocaleCompare(keyword,"text-align") == 0) { (void) FormatLocaleFile(svg_info->file,"text-align \"%s\"\n",value); break; } if (LocaleCompare(keyword,"text-anchor") == 0) { (void) FormatLocaleFile(svg_info->file,"text-anchor \"%s\"\n", value); break; } if (LocaleCompare(keyword,"text-decoration") == 0) { if (LocaleCompare(value,"underline") == 0) (void) FormatLocaleFile(svg_info->file,"decorate underline\n"); if (LocaleCompare(value,"line-through") == 0) (void) FormatLocaleFile(svg_info->file,"decorate line-through\n"); if (LocaleCompare(value,"overline") == 0) (void) FormatLocaleFile(svg_info->file,"decorate overline\n"); break; } if (LocaleCompare(keyword,"text-antialiasing") == 0) { (void) FormatLocaleFile(svg_info->file,"text-antialias %d\n", LocaleCompare(value,"true") == 0); break; } break; } default: break; } } if (units != (char *) NULL) units=DestroyString(units); if (color != (char *) NULL) color=DestroyString(color); for (i=0; tokens[i] != (char *) NULL; i++) tokens[i]=DestroyString(tokens[i]); tokens=(char **) RelinquishMagickMemory(tokens); } static void SVGUnparsedEntityDeclaration(void *context,const xmlChar *name, const xmlChar *public_id,const xmlChar *system_id,const xmlChar *notation) { SVGInfo *svg_info; /* What to do when an unparsed entity declaration is parsed. */ (void) LogMagickEvent(CoderEvent,GetMagickModule(), " SAX.unparsedEntityDecl(%s, %s, %s, %s)",name, public_id != (xmlChar *) NULL ? (const char *) public_id : "none", system_id != (xmlChar *) NULL ? (const char *) system_id : "none",notation); svg_info=(SVGInfo *) context; (void) xmlAddDocEntity(svg_info->document,name, XML_EXTERNAL_GENERAL_UNPARSED_ENTITY,public_id,system_id,notation); } static void SVGSetDocumentLocator(void *context,xmlSAXLocatorPtr location) { SVGInfo *svg_info; /* Receive the document locator at startup, actually xmlDefaultSAXLocator. */ (void) location; (void) LogMagickEvent(CoderEvent,GetMagickModule(), " SAX.setDocumentLocator()"); svg_info=(SVGInfo *) context; (void) svg_info; } static void SVGStartDocument(void *context) { SVGInfo *svg_info; xmlParserCtxtPtr parser; /* Called when the document start being processed. */ (void) LogMagickEvent(CoderEvent,GetMagickModule()," SAX.startDocument()"); svg_info=(SVGInfo *) context; parser=svg_info->parser; svg_info->document=xmlNewDoc(parser->version); if (svg_info->document == (xmlDocPtr) NULL) return; if (parser->encoding == NULL) svg_info->document->encoding=(const xmlChar *) NULL; else svg_info->document->encoding=xmlStrdup(parser->encoding); svg_info->document->standalone=parser->standalone; } static void SVGEndDocument(void *context) { SVGInfo *svg_info; /* Called when the document end has been detected. */ (void) LogMagickEvent(CoderEvent,GetMagickModule()," SAX.endDocument()"); svg_info=(SVGInfo *) context; if (svg_info->offset != (char *) NULL) svg_info->offset=DestroyString(svg_info->offset); if (svg_info->stop_color != (char *) NULL) svg_info->stop_color=DestroyString(svg_info->stop_color); if (svg_info->scale != (double *) NULL) svg_info->scale=(double *) RelinquishMagickMemory(svg_info->scale); if (svg_info->text != (char *) NULL) svg_info->text=DestroyString(svg_info->text); if (svg_info->vertices != (char *) NULL) svg_info->vertices=DestroyString(svg_info->vertices); if (svg_info->url != (char *) NULL) svg_info->url=DestroyString(svg_info->url); #if defined(MAGICKCORE_XML_DELEGATE) if (svg_info->document != (xmlDocPtr) NULL) { xmlFreeDoc(svg_info->document); svg_info->document=(xmlDocPtr) NULL; } #endif } static void SVGStartElement(void *context,const xmlChar *name, const xmlChar **attributes) { #define PushGraphicContext(id) \ { \ if (*id == '\0') \ (void) FormatLocaleFile(svg_info->file,"push graphic-context\n"); \ else \ (void) FormatLocaleFile(svg_info->file,"push graphic-context \"%s\"\n", \ id); \ } char *color, background[MagickPathExtent], id[MagickPathExtent], *next_token, token[MagickPathExtent], **tokens, *units; const char *keyword, *p, *value; register ssize_t i, j; size_t number_tokens; SVGInfo *svg_info; /* Called when an opening tag has been processed. */ (void) LogMagickEvent(CoderEvent,GetMagickModule()," SAX.startElement(%s", name); svg_info=(SVGInfo *) context; svg_info->n++; svg_info->scale=(double *) ResizeQuantumMemory(svg_info->scale, svg_info->n+1UL,sizeof(*svg_info->scale)); if (svg_info->scale == (double *) NULL) { (void) ThrowMagickException(svg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",name); return; } svg_info->scale[svg_info->n]=svg_info->scale[svg_info->n-1]; color=AcquireString("none"); units=AcquireString("userSpaceOnUse"); *id='\0'; *token='\0'; *background='\0'; value=(const char *) NULL; if ((LocaleCompare((char *) name,"image") == 0) || (LocaleCompare((char *) name,"pattern") == 0) || (LocaleCompare((char *) name,"rect") == 0) || (LocaleCompare((char *) name,"text") == 0) || (LocaleCompare((char *) name,"use") == 0)) { svg_info->bounds.x=0.0; svg_info->bounds.y=0.0; } if (attributes != (const xmlChar **) NULL) for (i=0; (attributes[i] != (const xmlChar *) NULL); i+=2) { keyword=(const char *) attributes[i]; value=(const char *) attributes[i+1]; switch (*keyword) { case 'C': case 'c': { if (LocaleCompare(keyword,"cx") == 0) { svg_info->element.cx= GetUserSpaceCoordinateValue(svg_info,1,value); break; } if (LocaleCompare(keyword,"cy") == 0) { svg_info->element.cy= GetUserSpaceCoordinateValue(svg_info,-1,value); break; } break; } case 'F': case 'f': { if (LocaleCompare(keyword,"fx") == 0) { svg_info->element.major= GetUserSpaceCoordinateValue(svg_info,1,value); break; } if (LocaleCompare(keyword,"fy") == 0) { svg_info->element.minor= GetUserSpaceCoordinateValue(svg_info,-1,value); break; } break; } case 'H': case 'h': { if (LocaleCompare(keyword,"height") == 0) { svg_info->bounds.height= GetUserSpaceCoordinateValue(svg_info,-1,value); break; } break; } case 'I': case 'i': { if (LocaleCompare(keyword,"id") == 0) { (void) CopyMagickString(id,value,MagickPathExtent); break; } break; } case 'R': case 'r': { if (LocaleCompare(keyword,"r") == 0) { svg_info->element.angle= GetUserSpaceCoordinateValue(svg_info,0,value); break; } break; } case 'W': case 'w': { if (LocaleCompare(keyword,"width") == 0) { svg_info->bounds.width= GetUserSpaceCoordinateValue(svg_info,1,value); break; } break; } case 'X': case 'x': { if (LocaleCompare(keyword,"x") == 0) { svg_info->bounds.x=GetUserSpaceCoordinateValue(svg_info,1,value); break; } if (LocaleCompare(keyword,"x1") == 0) { svg_info->segment.x1=GetUserSpaceCoordinateValue(svg_info,1, value); break; } if (LocaleCompare(keyword,"x2") == 0) { svg_info->segment.x2=GetUserSpaceCoordinateValue(svg_info,1, value); break; } break; } case 'Y': case 'y': { if (LocaleCompare(keyword,"y") == 0) { svg_info->bounds.y=GetUserSpaceCoordinateValue(svg_info,-1,value); break; } if (LocaleCompare(keyword,"y1") == 0) { svg_info->segment.y1=GetUserSpaceCoordinateValue(svg_info,-1, value); break; } if (LocaleCompare(keyword,"y2") == 0) { svg_info->segment.y2=GetUserSpaceCoordinateValue(svg_info,-1, value); break; } break; } default: break; } } if (strchr((char *) name,':') != (char *) NULL) { /* Skip over namespace. */ for ( ; *name != ':'; name++) ; name++; } switch (*name) { case 'C': case 'c': { if (LocaleCompare((const char *) name,"circle") == 0) { PushGraphicContext(id); break; } if (LocaleCompare((const char *) name,"clipPath") == 0) { (void) FormatLocaleFile(svg_info->file,"push clip-path \"%s\"\n",id); break; } break; } case 'D': case 'd': { if (LocaleCompare((const char *) name,"defs") == 0) { (void) FormatLocaleFile(svg_info->file,"push defs\n"); break; } break; } case 'E': case 'e': { if (LocaleCompare((const char *) name,"ellipse") == 0) { PushGraphicContext(id); break; } break; } case 'F': case 'f': { if (LocaleCompare((const char *) name,"foreignObject") == 0) { PushGraphicContext(id); break; } break; } case 'G': case 'g': { if (LocaleCompare((const char *) name,"g") == 0) { PushGraphicContext(id); break; } break; } case 'I': case 'i': { if (LocaleCompare((const char *) name,"image") == 0) { PushGraphicContext(id); break; } break; } case 'L': case 'l': { if (LocaleCompare((const char *) name,"line") == 0) { PushGraphicContext(id); break; } if (LocaleCompare((const char *) name,"linearGradient") == 0) { (void) FormatLocaleFile(svg_info->file, "push gradient \"%s\" linear %g,%g %g,%g\n",id, svg_info->segment.x1,svg_info->segment.y1,svg_info->segment.x2, svg_info->segment.y2); break; } break; } case 'M': case 'm': { if (LocaleCompare((const char *) name,"mask") == 0) { (void) FormatLocaleFile(svg_info->file,"push mask \"%s\"\n",id); break; } break; } case 'P': case 'p': { if (LocaleCompare((const char *) name,"path") == 0) { PushGraphicContext(id); break; } if (LocaleCompare((const char *) name,"pattern") == 0) { (void) FormatLocaleFile(svg_info->file, "push pattern \"%s\" %g,%g %g,%g\n",id, svg_info->bounds.x,svg_info->bounds.y,svg_info->bounds.width, svg_info->bounds.height); break; } if (LocaleCompare((const char *) name,"polygon") == 0) { PushGraphicContext(id); break; } if (LocaleCompare((const char *) name,"polyline") == 0) { PushGraphicContext(id); break; } break; } case 'R': case 'r': { if (LocaleCompare((const char *) name,"radialGradient") == 0) { (void) FormatLocaleFile(svg_info->file, "push gradient \"%s\" radial %g,%g %g,%g %g\n", id,svg_info->element.cx,svg_info->element.cy, svg_info->element.major,svg_info->element.minor, svg_info->element.angle); break; } if (LocaleCompare((const char *) name,"rect") == 0) { PushGraphicContext(id); break; } break; } case 'S': case 's': { if (LocaleCompare((char *) name,"style") == 0) break; if (LocaleCompare((const char *) name,"svg") == 0) { svg_info->svgDepth++; PushGraphicContext(id); (void) FormatLocaleFile(svg_info->file,"compliance \"SVG\"\n"); (void) FormatLocaleFile(svg_info->file,"fill \"black\"\n"); (void) FormatLocaleFile(svg_info->file,"fill-opacity 1\n"); (void) FormatLocaleFile(svg_info->file,"stroke \"none\"\n"); (void) FormatLocaleFile(svg_info->file,"stroke-width 1\n"); (void) FormatLocaleFile(svg_info->file,"stroke-opacity 1\n"); (void) FormatLocaleFile(svg_info->file,"fill-rule nonzero\n"); break; } if (LocaleCompare((const char *) name,"symbol") == 0) { (void) FormatLocaleFile(svg_info->file,"push symbol\n"); break; } break; } case 'T': case 't': { if (LocaleCompare((const char *) name,"text") == 0) { PushGraphicContext(id); (void) FormatLocaleFile(svg_info->file,"class \"text\"\n"); svg_info->text_offset.x=svg_info->bounds.x; svg_info->text_offset.y=svg_info->bounds.y; svg_info->bounds.x=0.0; svg_info->bounds.y=0.0; svg_info->bounds.width=0.0; svg_info->bounds.height=0.0; break; } if (LocaleCompare((const char *) name,"tspan") == 0) { if (*svg_info->text != '\0') { char *text; text=EscapeString(svg_info->text,'\"'); (void) FormatLocaleFile(svg_info->file,"text %g,%g \"%s\"\n", svg_info->text_offset.x,svg_info->text_offset.y,text); text=DestroyString(text); *svg_info->text='\0'; } PushGraphicContext(id); break; } break; } case 'U': case 'u': { if (LocaleCompare((char *) name,"use") == 0) { PushGraphicContext(id); break; } break; } default: break; } if (attributes != (const xmlChar **) NULL) for (i=0; (attributes[i] != (const xmlChar *) NULL); i+=2) { keyword=(const char *) attributes[i]; value=(const char *) attributes[i+1]; (void) LogMagickEvent(CoderEvent,GetMagickModule(), " %s = %s",keyword,value); switch (*keyword) { case 'A': case 'a': { if (LocaleCompare(keyword,"angle") == 0) { (void) FormatLocaleFile(svg_info->file,"angle %g\n", GetUserSpaceCoordinateValue(svg_info,0,value)); break; } break; } case 'C': case 'c': { if (LocaleCompare(keyword,"class") == 0) { const char *p; for (p=value; ; ) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token != '\0') { (void) FormatLocaleFile(svg_info->file,"class \"%s\"\n", value); break; } } break; } if (LocaleCompare(keyword,"clip-path") == 0) { (void) FormatLocaleFile(svg_info->file,"clip-path \"%s\"\n", value); break; } if (LocaleCompare(keyword,"clip-rule") == 0) { (void) FormatLocaleFile(svg_info->file,"clip-rule \"%s\"\n", value); break; } if (LocaleCompare(keyword,"clipPathUnits") == 0) { (void) CloneString(&units,value); (void) FormatLocaleFile(svg_info->file,"clip-units \"%s\"\n", value); break; } if (LocaleCompare(keyword,"color") == 0) { (void) CloneString(&color,value); break; } if (LocaleCompare(keyword,"cx") == 0) { svg_info->element.cx= GetUserSpaceCoordinateValue(svg_info,1,value); break; } if (LocaleCompare(keyword,"cy") == 0) { svg_info->element.cy= GetUserSpaceCoordinateValue(svg_info,-1,value); break; } break; } case 'D': case 'd': { if (LocaleCompare(keyword,"d") == 0) { (void) CloneString(&svg_info->vertices,value); break; } if (LocaleCompare(keyword,"dx") == 0) { double dx; dx=GetUserSpaceCoordinateValue(svg_info,1,value); svg_info->bounds.x+=dx; svg_info->text_offset.x+=dx; if (LocaleCompare((char *) name,"text") == 0) (void) FormatLocaleFile(svg_info->file,"translate %g,0.0\n",dx); break; } if (LocaleCompare(keyword,"dy") == 0) { double dy; dy=GetUserSpaceCoordinateValue(svg_info,-1,value); svg_info->bounds.y+=dy; svg_info->text_offset.y+=dy; if (LocaleCompare((char *) name,"text") == 0) (void) FormatLocaleFile(svg_info->file,"translate 0.0,%g\n",dy); break; } break; } case 'F': case 'f': { if (LocaleCompare(keyword,"fill") == 0) { if (LocaleCompare(value,"currentColor") == 0) { (void) FormatLocaleFile(svg_info->file,"fill \"%s\"\n",color); break; } (void) FormatLocaleFile(svg_info->file,"fill \"%s\"\n",value); break; } if (LocaleCompare(keyword,"fillcolor") == 0) { (void) FormatLocaleFile(svg_info->file,"fill \"%s\"\n",value); break; } if (LocaleCompare(keyword,"fill-rule") == 0) { (void) FormatLocaleFile(svg_info->file,"fill-rule \"%s\"\n", value); break; } if (LocaleCompare(keyword,"fill-opacity") == 0) { (void) FormatLocaleFile(svg_info->file,"fill-opacity \"%s\"\n", value); break; } if (LocaleCompare(keyword,"font-family") == 0) { (void) FormatLocaleFile(svg_info->file,"font-family \"%s\"\n", value); break; } if (LocaleCompare(keyword,"font-stretch") == 0) { (void) FormatLocaleFile(svg_info->file,"font-stretch \"%s\"\n", value); break; } if (LocaleCompare(keyword,"font-style") == 0) { (void) FormatLocaleFile(svg_info->file,"font-style \"%s\"\n", value); break; } if (LocaleCompare(keyword,"font-size") == 0) { if (LocaleCompare(value,"xx-small") == 0) svg_info->pointsize=6.144; else if (LocaleCompare(value,"x-small") == 0) svg_info->pointsize=7.68; else if (LocaleCompare(value,"small") == 0) svg_info->pointsize=9.6; else if (LocaleCompare(value,"medium") == 0) svg_info->pointsize=12.0; else if (LocaleCompare(value,"large") == 0) svg_info->pointsize=14.4; else if (LocaleCompare(value,"x-large") == 0) svg_info->pointsize=17.28; else if (LocaleCompare(value,"xx-large") == 0) svg_info->pointsize=20.736; else svg_info->pointsize=GetUserSpaceCoordinateValue(svg_info,0, value); (void) FormatLocaleFile(svg_info->file,"font-size %g\n", svg_info->pointsize); break; } if (LocaleCompare(keyword,"font-weight") == 0) { (void) FormatLocaleFile(svg_info->file,"font-weight \"%s\"\n", value); break; } break; } case 'G': case 'g': { if (LocaleCompare(keyword,"gradientTransform") == 0) { AffineMatrix affine, current, transform; GetAffineMatrix(&transform); (void) LogMagickEvent(CoderEvent,GetMagickModule()," "); tokens=SVGKeyValuePairs(context,'(',')',value,&number_tokens); if (tokens == (char **) NULL) break; for (j=0; j < (ssize_t) (number_tokens-1); j+=2) { keyword=(char *) tokens[j]; if (keyword == (char *) NULL) continue; value=(char *) tokens[j+1]; (void) LogMagickEvent(CoderEvent,GetMagickModule(), " %s: %s",keyword,value); current=transform; GetAffineMatrix(&affine); switch (*keyword) { case 'M': case 'm': { if (LocaleCompare(keyword,"matrix") == 0) { p=(const char *) value; (void) GetNextToken(p,&p,MagickPathExtent,token); affine.sx=StringToDouble(value,(char **) NULL); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); affine.rx=StringToDouble(token,&next_token); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); affine.ry=StringToDouble(token,&next_token); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); affine.sy=StringToDouble(token,&next_token); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); affine.tx=StringToDouble(token,&next_token); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); affine.ty=StringToDouble(token,&next_token); break; } break; } case 'R': case 'r': { if (LocaleCompare(keyword,"rotate") == 0) { double angle; angle=GetUserSpaceCoordinateValue(svg_info,0,value); affine.sx=cos(DegreesToRadians(fmod(angle,360.0))); affine.rx=sin(DegreesToRadians(fmod(angle,360.0))); affine.ry=(-sin(DegreesToRadians(fmod(angle,360.0)))); affine.sy=cos(DegreesToRadians(fmod(angle,360.0))); break; } break; } case 'S': case 's': { if (LocaleCompare(keyword,"scale") == 0) { for (p=(const char *) value; *p != '\0'; p++) if ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) break; affine.sx=GetUserSpaceCoordinateValue(svg_info,1,value); affine.sy=affine.sx; if (*p != '\0') affine.sy= GetUserSpaceCoordinateValue(svg_info,-1,p+1); svg_info->scale[svg_info->n]=ExpandAffine(&affine); break; } if (LocaleCompare(keyword,"skewX") == 0) { affine.sx=svg_info->affine.sx; affine.ry=tan(DegreesToRadians(fmod( GetUserSpaceCoordinateValue(svg_info,1,value), 360.0))); affine.sy=svg_info->affine.sy; break; } if (LocaleCompare(keyword,"skewY") == 0) { affine.sx=svg_info->affine.sx; affine.rx=tan(DegreesToRadians(fmod( GetUserSpaceCoordinateValue(svg_info,-1,value), 360.0))); affine.sy=svg_info->affine.sy; break; } break; } case 'T': case 't': { if (LocaleCompare(keyword,"translate") == 0) { for (p=(const char *) value; *p != '\0'; p++) if ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) break; affine.tx=GetUserSpaceCoordinateValue(svg_info,1,value); affine.ty=affine.tx; if (*p != '\0') affine.ty= GetUserSpaceCoordinateValue(svg_info,-1,p+1); break; } break; } default: break; } transform.sx=affine.sx*current.sx+affine.ry*current.rx; transform.rx=affine.rx*current.sx+affine.sy*current.rx; transform.ry=affine.sx*current.ry+affine.ry*current.sy; transform.sy=affine.rx*current.ry+affine.sy*current.sy; transform.tx=affine.tx*current.sx+affine.ty*current.ry+ current.tx; transform.ty=affine.tx*current.rx+affine.ty*current.sy+ current.ty; } (void) FormatLocaleFile(svg_info->file, "affine %g %g %g %g %g %g\n",transform.sx, transform.rx,transform.ry,transform.sy,transform.tx, transform.ty); for (j=0; tokens[j] != (char *) NULL; j++) tokens[j]=DestroyString(tokens[j]); tokens=(char **) RelinquishMagickMemory(tokens); break; } if (LocaleCompare(keyword,"gradientUnits") == 0) { (void) CloneString(&units,value); (void) FormatLocaleFile(svg_info->file,"gradient-units \"%s\"\n", value); break; } break; } case 'H': case 'h': { if (LocaleCompare(keyword,"height") == 0) { svg_info->bounds.height= GetUserSpaceCoordinateValue(svg_info,-1,value); break; } if (LocaleCompare(keyword,"href") == 0) { (void) CloneString(&svg_info->url,value); break; } break; } case 'K': case 'k': { if (LocaleCompare(keyword,"kerning") == 0) { (void) FormatLocaleFile(svg_info->file,"kerning \"%s\"\n", value); break; } break; } case 'L': case 'l': { if (LocaleCompare(keyword,"letter-spacing") == 0) { (void) FormatLocaleFile(svg_info->file,"letter-spacing \"%s\"\n", value); break; } break; } case 'M': case 'm': { if (LocaleCompare(keyword,"major") == 0) { svg_info->element.major= GetUserSpaceCoordinateValue(svg_info,1,value); break; } if (LocaleCompare(keyword,"mask") == 0) { (void) FormatLocaleFile(svg_info->file,"mask \"%s\"\n",value); break; } if (LocaleCompare(keyword,"minor") == 0) { svg_info->element.minor= GetUserSpaceCoordinateValue(svg_info,-1,value); break; } break; } case 'O': case 'o': { if (LocaleCompare(keyword,"offset") == 0) { (void) CloneString(&svg_info->offset,value); break; } if (LocaleCompare(keyword,"opacity") == 0) { (void) FormatLocaleFile(svg_info->file,"opacity \"%s\"\n",value); break; } break; } case 'P': case 'p': { if (LocaleCompare(keyword,"path") == 0) { (void) CloneString(&svg_info->url,value); break; } if (LocaleCompare(keyword,"points") == 0) { (void) CloneString(&svg_info->vertices,value); break; } break; } case 'R': case 'r': { if (LocaleCompare(keyword,"r") == 0) { svg_info->element.major= GetUserSpaceCoordinateValue(svg_info,1,value); svg_info->element.minor= GetUserSpaceCoordinateValue(svg_info,-1,value); break; } if (LocaleCompare(keyword,"rotate") == 0) { double angle; angle=GetUserSpaceCoordinateValue(svg_info,0,value); (void) FormatLocaleFile(svg_info->file,"translate %g,%g\n", svg_info->bounds.x,svg_info->bounds.y); svg_info->bounds.x=0; svg_info->bounds.y=0; (void) FormatLocaleFile(svg_info->file,"rotate %g\n",angle); break; } if (LocaleCompare(keyword,"rx") == 0) { if (LocaleCompare((const char *) name,"ellipse") == 0) svg_info->element.major= GetUserSpaceCoordinateValue(svg_info,1,value); else svg_info->radius.x= GetUserSpaceCoordinateValue(svg_info,1,value); break; } if (LocaleCompare(keyword,"ry") == 0) { if (LocaleCompare((const char *) name,"ellipse") == 0) svg_info->element.minor= GetUserSpaceCoordinateValue(svg_info,-1,value); else svg_info->radius.y= GetUserSpaceCoordinateValue(svg_info,-1,value); break; } break; } case 'S': case 's': { if (LocaleCompare(keyword,"stop-color") == 0) { (void) CloneString(&svg_info->stop_color,value); break; } if (LocaleCompare(keyword,"stroke") == 0) { if (LocaleCompare(value,"currentColor") == 0) { (void) FormatLocaleFile(svg_info->file,"stroke \"%s\"\n", color); break; } (void) FormatLocaleFile(svg_info->file,"stroke \"%s\"\n",value); break; } if (LocaleCompare(keyword,"stroke-antialiasing") == 0) { (void) FormatLocaleFile(svg_info->file,"stroke-antialias %d\n", LocaleCompare(value,"true") == 0); break; } if (LocaleCompare(keyword,"stroke-dasharray") == 0) { (void) FormatLocaleFile(svg_info->file,"stroke-dasharray %s\n", value); break; } if (LocaleCompare(keyword,"stroke-dashoffset") == 0) { (void) FormatLocaleFile(svg_info->file,"stroke-dashoffset %g\n", GetUserSpaceCoordinateValue(svg_info,1,value)); break; } if (LocaleCompare(keyword,"stroke-linecap") == 0) { (void) FormatLocaleFile(svg_info->file,"stroke-linecap \"%s\"\n", value); break; } if (LocaleCompare(keyword,"stroke-linejoin") == 0) { (void) FormatLocaleFile(svg_info->file,"stroke-linejoin \"%s\"\n", value); break; } if (LocaleCompare(keyword,"stroke-miterlimit") == 0) { (void) FormatLocaleFile(svg_info->file, "stroke-miterlimit \"%s\"\n",value); break; } if (LocaleCompare(keyword,"stroke-opacity") == 0) { (void) FormatLocaleFile(svg_info->file,"stroke-opacity \"%s\"\n", value); break; } if (LocaleCompare(keyword,"stroke-width") == 0) { (void) FormatLocaleFile(svg_info->file,"stroke-width %g\n", GetUserSpaceCoordinateValue(svg_info,1,value)); break; } if (LocaleCompare(keyword,"style") == 0) { SVGProcessStyleElement(context,name,value); break; } break; } case 'T': case 't': { if (LocaleCompare(keyword,"text-align") == 0) { (void) FormatLocaleFile(svg_info->file,"text-align \"%s\"\n", value); break; } if (LocaleCompare(keyword,"text-anchor") == 0) { (void) FormatLocaleFile(svg_info->file,"text-anchor \"%s\"\n", value); break; } if (LocaleCompare(keyword,"text-decoration") == 0) { if (LocaleCompare(value,"underline") == 0) (void) FormatLocaleFile(svg_info->file,"decorate underline\n"); if (LocaleCompare(value,"line-through") == 0) (void) FormatLocaleFile(svg_info->file, "decorate line-through\n"); if (LocaleCompare(value,"overline") == 0) (void) FormatLocaleFile(svg_info->file,"decorate overline\n"); break; } if (LocaleCompare(keyword,"text-antialiasing") == 0) { (void) FormatLocaleFile(svg_info->file,"text-antialias %d\n", LocaleCompare(value,"true") == 0); break; } if (LocaleCompare(keyword,"transform") == 0) { AffineMatrix affine, current, transform; GetAffineMatrix(&transform); (void) LogMagickEvent(CoderEvent,GetMagickModule()," "); tokens=SVGKeyValuePairs(context,'(',')',value,&number_tokens); if (tokens == (char **) NULL) break; for (j=0; j < (ssize_t) (number_tokens-1); j+=2) { keyword=(char *) tokens[j]; value=(char *) tokens[j+1]; (void) LogMagickEvent(CoderEvent,GetMagickModule(), " %s: %s",keyword,value); current=transform; GetAffineMatrix(&affine); switch (*keyword) { case 'M': case 'm': { if (LocaleCompare(keyword,"matrix") == 0) { p=(const char *) value; (void) GetNextToken(p,&p,MagickPathExtent,token); affine.sx=StringToDouble(value,(char **) NULL); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); affine.rx=StringToDouble(token,&next_token); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); affine.ry=StringToDouble(token,&next_token); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); affine.sy=StringToDouble(token,&next_token); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); affine.tx=StringToDouble(token,&next_token); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); affine.ty=StringToDouble(token,&next_token); break; } break; } case 'R': case 'r': { if (LocaleCompare(keyword,"rotate") == 0) { double angle, x, y; p=(const char *) value; (void) GetNextToken(p,&p,MagickPathExtent,token); angle=StringToDouble(value,(char **) NULL); affine.sx=cos(DegreesToRadians(fmod(angle,360.0))); affine.rx=sin(DegreesToRadians(fmod(angle,360.0))); affine.ry=(-sin(DegreesToRadians(fmod(angle,360.0)))); affine.sy=cos(DegreesToRadians(fmod(angle,360.0))); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); affine.tx=svg_info->bounds.x+x* cos(DegreesToRadians(fmod(angle,360.0)))+y* sin(DegreesToRadians(fmod(angle,360.0))); affine.ty=svg_info->bounds.y-x* sin(DegreesToRadians(fmod(angle,360.0)))+y* cos(DegreesToRadians(fmod(angle,360.0))); affine.tx-=x; affine.ty-=y; break; } break; } case 'S': case 's': { if (LocaleCompare(keyword,"scale") == 0) { for (p=(const char *) value; *p != '\0'; p++) if ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) break; affine.sx=GetUserSpaceCoordinateValue(svg_info,1,value); affine.sy=affine.sx; if (*p != '\0') affine.sy=GetUserSpaceCoordinateValue(svg_info,-1, p+1); svg_info->scale[svg_info->n]=ExpandAffine(&affine); break; } if (LocaleCompare(keyword,"skewX") == 0) { affine.sx=svg_info->affine.sx; affine.ry=tan(DegreesToRadians(fmod( GetUserSpaceCoordinateValue(svg_info,1,value), 360.0))); affine.sy=svg_info->affine.sy; break; } if (LocaleCompare(keyword,"skewY") == 0) { affine.sx=svg_info->affine.sx; affine.rx=tan(DegreesToRadians(fmod( GetUserSpaceCoordinateValue(svg_info,-1,value), 360.0))); affine.sy=svg_info->affine.sy; break; } break; } case 'T': case 't': { if (LocaleCompare(keyword,"translate") == 0) { for (p=(const char *) value; *p != '\0'; p++) if ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) break; affine.tx=GetUserSpaceCoordinateValue(svg_info,1,value); affine.ty=0; if (*p != '\0') affine.ty=GetUserSpaceCoordinateValue(svg_info,-1, p+1); break; } break; } default: break; } transform.sx=affine.sx*current.sx+affine.ry*current.rx; transform.rx=affine.rx*current.sx+affine.sy*current.rx; transform.ry=affine.sx*current.ry+affine.ry*current.sy; transform.sy=affine.rx*current.ry+affine.sy*current.sy; transform.tx=affine.tx*current.sx+affine.ty*current.ry+ current.tx; transform.ty=affine.tx*current.rx+affine.ty*current.sy+ current.ty; } (void) FormatLocaleFile(svg_info->file, "affine %g %g %g %g %g %g\n",transform.sx,transform.rx, transform.ry,transform.sy,transform.tx,transform.ty); for (j=0; tokens[j] != (char *) NULL; j++) tokens[j]=DestroyString(tokens[j]); tokens=(char **) RelinquishMagickMemory(tokens); break; } break; } case 'V': case 'v': { if (LocaleCompare(keyword,"verts") == 0) { (void) CloneString(&svg_info->vertices,value); break; } if (LocaleCompare(keyword,"viewBox") == 0) { p=(const char *) value; (void) GetNextToken(p,&p,MagickPathExtent,token); svg_info->view_box.x=StringToDouble(token,&next_token); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); svg_info->view_box.y=StringToDouble(token,&next_token); (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); svg_info->view_box.width=StringToDouble(token, (char **) NULL); if (svg_info->bounds.width == 0) svg_info->bounds.width=svg_info->view_box.width; (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); svg_info->view_box.height=StringToDouble(token, (char **) NULL); if (svg_info->bounds.height == 0) svg_info->bounds.height=svg_info->view_box.height; break; } break; } case 'W': case 'w': { if (LocaleCompare(keyword,"width") == 0) { svg_info->bounds.width= GetUserSpaceCoordinateValue(svg_info,1,value); break; } break; } case 'X': case 'x': { if (LocaleCompare(keyword,"x") == 0) { svg_info->bounds.x=GetUserSpaceCoordinateValue(svg_info,1,value); break; } if (LocaleCompare(keyword,"xlink:href") == 0) { (void) CloneString(&svg_info->url,value); break; } if (LocaleCompare(keyword,"x1") == 0) { svg_info->segment.x1= GetUserSpaceCoordinateValue(svg_info,1,value); break; } if (LocaleCompare(keyword,"x2") == 0) { svg_info->segment.x2= GetUserSpaceCoordinateValue(svg_info,1,value); break; } break; } case 'Y': case 'y': { if (LocaleCompare(keyword,"y") == 0) { svg_info->bounds.y=GetUserSpaceCoordinateValue(svg_info,-1,value); break; } if (LocaleCompare(keyword,"y1") == 0) { svg_info->segment.y1= GetUserSpaceCoordinateValue(svg_info,-1,value); break; } if (LocaleCompare(keyword,"y2") == 0) { svg_info->segment.y2= GetUserSpaceCoordinateValue(svg_info,-1,value); break; } break; } default: break; } } if (LocaleCompare((const char *) name,"svg") == 0) { if (svg_info->document->encoding != (const xmlChar *) NULL) (void) FormatLocaleFile(svg_info->file,"encoding \"%s\"\n", (const char *) svg_info->document->encoding); if (attributes != (const xmlChar **) NULL) { double sx, sy, tx, ty; if ((svg_info->view_box.width == 0.0) || (svg_info->view_box.height == 0.0)) svg_info->view_box=svg_info->bounds; svg_info->width=0; if (svg_info->bounds.width > 0.0) svg_info->width=(size_t) floor(svg_info->bounds.width+0.5); svg_info->height=0; if (svg_info->bounds.height > 0.0) svg_info->height=(size_t) floor(svg_info->bounds.height+0.5); (void) FormatLocaleFile(svg_info->file,"viewbox 0 0 %.20g %.20g\n", (double) svg_info->width,(double) svg_info->height); sx=PerceptibleReciprocal(svg_info->view_box.width)*svg_info->width; sy=PerceptibleReciprocal(svg_info->view_box.height)*svg_info->height; tx=svg_info->view_box.x != 0.0 ? (double) -sx*svg_info->view_box.x : 0.0; ty=svg_info->view_box.y != 0.0 ? (double) -sy*svg_info->view_box.y : 0.0; (void) FormatLocaleFile(svg_info->file,"affine %g 0 0 %g %g %g\n", sx,sy,tx,ty); if ((svg_info->svgDepth == 1) && (*background != '\0')) { PushGraphicContext(id); (void) FormatLocaleFile(svg_info->file,"fill %s\n",background); (void) FormatLocaleFile(svg_info->file, "rectangle 0,0 %g,%g\n",svg_info->view_box.width, svg_info->view_box.height); (void) FormatLocaleFile(svg_info->file,"pop graphic-context\n"); } } } (void) LogMagickEvent(CoderEvent,GetMagickModule()," )"); if (units != (char *) NULL) units=DestroyString(units); if (color != (char *) NULL) color=DestroyString(color); } static void SVGEndElement(void *context,const xmlChar *name) { SVGInfo *svg_info; /* Called when the end of an element has been detected. */ (void) LogMagickEvent(CoderEvent,GetMagickModule(), " SAX.endElement(%s)",name); svg_info=(SVGInfo *) context; if (strchr((char *) name,':') != (char *) NULL) { /* Skip over namespace. */ for ( ; *name != ':'; name++) ; name++; } switch (*name) { case 'C': case 'c': { if (LocaleCompare((const char *) name,"circle") == 0) { (void) FormatLocaleFile(svg_info->file,"class \"circle\"\n"); (void) FormatLocaleFile(svg_info->file,"circle %g,%g %g,%g\n", svg_info->element.cx,svg_info->element.cy,svg_info->element.cx, svg_info->element.cy+svg_info->element.minor); (void) FormatLocaleFile(svg_info->file,"pop graphic-context\n"); break; } if (LocaleCompare((const char *) name,"clipPath") == 0) { (void) FormatLocaleFile(svg_info->file,"pop clip-path\n"); break; } break; } case 'D': case 'd': { if (LocaleCompare((const char *) name,"defs") == 0) { (void) FormatLocaleFile(svg_info->file,"pop defs\n"); break; } if (LocaleCompare((const char *) name,"desc") == 0) { register char *p; if (*svg_info->text == '\0') break; (void) fputc('#',svg_info->file); for (p=svg_info->text; *p != '\0'; p++) { (void) fputc(*p,svg_info->file); if (*p == '\n') (void) fputc('#',svg_info->file); } (void) fputc('\n',svg_info->file); *svg_info->text='\0'; break; } break; } case 'E': case 'e': { if (LocaleCompare((const char *) name,"ellipse") == 0) { double angle; (void) FormatLocaleFile(svg_info->file,"class \"ellipse\"\n"); angle=svg_info->element.angle; (void) FormatLocaleFile(svg_info->file,"ellipse %g,%g %g,%g 0,360\n", svg_info->element.cx,svg_info->element.cy, angle == 0.0 ? svg_info->element.major : svg_info->element.minor, angle == 0.0 ? svg_info->element.minor : svg_info->element.major); (void) FormatLocaleFile(svg_info->file,"pop graphic-context\n"); break; } break; } case 'F': case 'f': { if (LocaleCompare((const char *) name,"foreignObject") == 0) { (void) FormatLocaleFile(svg_info->file,"pop graphic-context\n"); break; } break; } case 'G': case 'g': { if (LocaleCompare((const char *) name,"g") == 0) { (void) FormatLocaleFile(svg_info->file,"pop graphic-context\n"); break; } break; } case 'I': case 'i': { if (LocaleCompare((const char *) name,"image") == 0) { (void) FormatLocaleFile(svg_info->file, "image Over %g,%g %g,%g \"%s\"\n",svg_info->bounds.x, svg_info->bounds.y,svg_info->bounds.width,svg_info->bounds.height, svg_info->url); (void) FormatLocaleFile(svg_info->file,"pop graphic-context\n"); break; } break; } case 'L': case 'l': { if (LocaleCompare((const char *) name,"line") == 0) { (void) FormatLocaleFile(svg_info->file,"class \"line\"\n"); (void) FormatLocaleFile(svg_info->file,"line %g,%g %g,%g\n", svg_info->segment.x1,svg_info->segment.y1,svg_info->segment.x2, svg_info->segment.y2); (void) FormatLocaleFile(svg_info->file,"pop graphic-context\n"); break; } if (LocaleCompare((const char *) name,"linearGradient") == 0) { (void) FormatLocaleFile(svg_info->file,"pop gradient\n"); break; } break; } case 'M': case 'm': { if (LocaleCompare((const char *) name,"mask") == 0) { (void) FormatLocaleFile(svg_info->file,"pop mask\n"); break; } break; } case 'P': case 'p': { if (LocaleCompare((const char *) name,"pattern") == 0) { (void) FormatLocaleFile(svg_info->file,"pop pattern\n"); break; } if (LocaleCompare((const char *) name,"path") == 0) { (void) FormatLocaleFile(svg_info->file,"class \"path\"\n"); (void) FormatLocaleFile(svg_info->file,"path \"%s\"\n", svg_info->vertices); (void) FormatLocaleFile(svg_info->file,"pop graphic-context\n"); break; } if (LocaleCompare((const char *) name,"polygon") == 0) { (void) FormatLocaleFile(svg_info->file,"class \"polygon\"\n"); (void) FormatLocaleFile(svg_info->file,"polygon %s\n", svg_info->vertices); (void) FormatLocaleFile(svg_info->file,"pop graphic-context\n"); break; } if (LocaleCompare((const char *) name,"polyline") == 0) { (void) FormatLocaleFile(svg_info->file,"class \"polyline\"\n"); (void) FormatLocaleFile(svg_info->file,"polyline %s\n", svg_info->vertices); (void) FormatLocaleFile(svg_info->file,"pop graphic-context\n"); break; } break; } case 'R': case 'r': { if (LocaleCompare((const char *) name,"radialGradient") == 0) { (void) FormatLocaleFile(svg_info->file,"pop gradient\n"); break; } if (LocaleCompare((const char *) name,"rect") == 0) { if ((svg_info->radius.x == 0.0) && (svg_info->radius.y == 0.0)) { (void) FormatLocaleFile(svg_info->file,"class \"rect\"\n"); if ((fabs(svg_info->bounds.width-1.0) < MagickEpsilon) && (fabs(svg_info->bounds.height-1.0) < MagickEpsilon)) (void) FormatLocaleFile(svg_info->file,"point %g,%g\n", svg_info->bounds.x,svg_info->bounds.y); else (void) FormatLocaleFile(svg_info->file, "rectangle %g,%g %g,%g\n",svg_info->bounds.x, svg_info->bounds.y,svg_info->bounds.x+svg_info->bounds.width, svg_info->bounds.y+svg_info->bounds.height); (void) FormatLocaleFile(svg_info->file,"pop graphic-context\n"); break; } if (svg_info->radius.x == 0.0) svg_info->radius.x=svg_info->radius.y; if (svg_info->radius.y == 0.0) svg_info->radius.y=svg_info->radius.x; (void) FormatLocaleFile(svg_info->file, "roundRectangle %g,%g %g,%g %g,%g\n", svg_info->bounds.x,svg_info->bounds.y,svg_info->bounds.x+ svg_info->bounds.width,svg_info->bounds.y+svg_info->bounds.height, svg_info->radius.x,svg_info->radius.y); svg_info->radius.x=0.0; svg_info->radius.y=0.0; (void) FormatLocaleFile(svg_info->file,"pop graphic-context\n"); break; } break; } case 'S': case 's': { if (LocaleCompare((const char *) name,"stop") == 0) { (void) FormatLocaleFile(svg_info->file,"stop-color \"%s\" %s\n", svg_info->stop_color,svg_info->offset); break; } if (LocaleCompare((char *) name,"style") == 0) { char *keyword, **tokens, *value; register ssize_t j; size_t number_tokens; /* Find style definitions in svg_info->text. */ tokens=SVGKeyValuePairs(context,'{','}',svg_info->text, &number_tokens); if (tokens == (char **) NULL) break; for (j=0; j < (ssize_t) (number_tokens-1); j+=2) { keyword=(char *) tokens[j]; value=(char *) tokens[j+1]; (void) FormatLocaleFile(svg_info->file,"push class \"%s\"\n", *keyword == '.' ? keyword+1 : keyword); SVGProcessStyleElement(context,name,value); (void) FormatLocaleFile(svg_info->file,"pop class\n"); } for (j=0; tokens[j] != (char *) NULL; j++) tokens[j]=DestroyString(tokens[j]); tokens=(char **) RelinquishMagickMemory(tokens); break; } if (LocaleCompare((const char *) name,"svg") == 0) { (void) FormatLocaleFile(svg_info->file,"pop graphic-context\n"); svg_info->svgDepth--; break; } if (LocaleCompare((const char *) name,"symbol") == 0) { (void) FormatLocaleFile(svg_info->file,"pop symbol\n"); break; } break; } case 'T': case 't': { if (LocaleCompare((const char *) name,"text") == 0) { if (*svg_info->text != '\0') { char *text; SVGStripString(MagickTrue,svg_info->text); text=EscapeString(svg_info->text,'\"'); (void) FormatLocaleFile(svg_info->file,"text %g,%g \"%s\"\n", svg_info->text_offset.x,svg_info->text_offset.y,text); text=DestroyString(text); *svg_info->text='\0'; } (void) FormatLocaleFile(svg_info->file,"pop graphic-context\n"); break; } if (LocaleCompare((const char *) name,"tspan") == 0) { if (*svg_info->text != '\0') { char *text; (void) FormatLocaleFile(svg_info->file,"class \"tspan\"\n"); text=EscapeString(svg_info->text,'\"'); (void) FormatLocaleFile(svg_info->file,"text %g,%g \"%s\"\n", svg_info->bounds.x,svg_info->bounds.y,text); text=DestroyString(text); *svg_info->text='\0'; } (void) FormatLocaleFile(svg_info->file,"pop graphic-context\n"); break; } if (LocaleCompare((const char *) name,"title") == 0) { if (*svg_info->text == '\0') break; (void) CloneString(&svg_info->title,svg_info->text); *svg_info->text='\0'; break; } break; } case 'U': case 'u': { if (LocaleCompare((char *) name,"use") == 0) { if ((svg_info->bounds.x != 0.0) || (svg_info->bounds.y != 0.0)) (void) FormatLocaleFile(svg_info->file,"translate %g,%g\n", svg_info->bounds.x,svg_info->bounds.y); (void) FormatLocaleFile(svg_info->file,"use \"url(%s)\"\n", svg_info->url); (void) FormatLocaleFile(svg_info->file,"pop graphic-context\n"); break; } break; } default: break; } *svg_info->text='\0'; (void) memset(&svg_info->element,0,sizeof(svg_info->element)); (void) memset(&svg_info->segment,0,sizeof(svg_info->segment)); svg_info->n--; } static void SVGCharacters(void *context,const xmlChar *c,int length) { char *text; register char *p; register ssize_t i; SVGInfo *svg_info; /* Receiving some characters from the parser. */ (void) LogMagickEvent(CoderEvent,GetMagickModule(), " SAX.characters(%s,%.20g)",c,(double) length); svg_info=(SVGInfo *) context; text=(char *) AcquireQuantumMemory(length+1,sizeof(*text)); if (text == (char *) NULL) return; p=text; for (i=0; i < (ssize_t) length; i++) *p++=c[i]; *p='\0'; SVGStripString(MagickFalse,text); if (svg_info->text == (char *) NULL) svg_info->text=text; else { (void) ConcatenateString(&svg_info->text,text); text=DestroyString(text); } } static void SVGReference(void *context,const xmlChar *name) { SVGInfo *svg_info; xmlParserCtxtPtr parser; /* Called when an entity reference is detected. */ (void) LogMagickEvent(CoderEvent,GetMagickModule()," SAX.reference(%s)", name); svg_info=(SVGInfo *) context; parser=svg_info->parser; if (parser == (xmlParserCtxtPtr) NULL) return; if (parser->node == (xmlNodePtr) NULL) return; if (*name == '#') (void) xmlAddChild(parser->node,xmlNewCharRef(svg_info->document,name)); else (void) xmlAddChild(parser->node,xmlNewReference(svg_info->document,name)); } static void SVGIgnorableWhitespace(void *context,const xmlChar *c,int length) { SVGInfo *svg_info; /* Receiving some ignorable whitespaces from the parser. */ (void) LogMagickEvent(CoderEvent,GetMagickModule(), " SAX.ignorableWhitespace(%.30s, %d)",c,length); svg_info=(SVGInfo *) context; (void) svg_info; } static void SVGProcessingInstructions(void *context,const xmlChar *target, const xmlChar *data) { SVGInfo *svg_info; /* A processing instruction has been parsed. */ (void) LogMagickEvent(CoderEvent,GetMagickModule(), " SAX.processingInstruction(%s, %s)",target,data); svg_info=(SVGInfo *) context; (void) svg_info; } static void SVGComment(void *context,const xmlChar *value) { SVGInfo *svg_info; /* A comment has been parsed. */ (void) LogMagickEvent(CoderEvent,GetMagickModule()," SAX.comment(%s)", value); svg_info=(SVGInfo *) context; if (svg_info->comment != (char *) NULL) (void) ConcatenateString(&svg_info->comment,"\n"); (void) ConcatenateString(&svg_info->comment,(const char *) value); } static void SVGWarning(void *,const char *,...) magick_attribute((__format__ (__printf__,2,3))); static void SVGWarning(void *context,const char *format,...) { char *message, reason[MagickPathExtent]; SVGInfo *svg_info; va_list operands; /** Display and format a warning messages, gives file, line, position and extra parameters. */ va_start(operands,format); svg_info=(SVGInfo *) context; (void) LogMagickEvent(CoderEvent,GetMagickModule()," SAX.warning: "); (void) LogMagickEvent(CoderEvent,GetMagickModule(),format,operands); #if !defined(MAGICKCORE_HAVE_VSNPRINTF) (void) vsprintf(reason,format,operands); #else (void) vsnprintf(reason,MagickPathExtent,format,operands); #endif message=GetExceptionMessage(errno); (void) ThrowMagickException(svg_info->exception,GetMagickModule(), DelegateWarning,reason,"`%s`",message); message=DestroyString(message); va_end(operands); } static void SVGError(void *,const char *,...) magick_attribute((__format__ (__printf__,2,3))); static void SVGError(void *context,const char *format,...) { char *message, reason[MagickPathExtent]; SVGInfo *svg_info; va_list operands; /* Display and format a error formats, gives file, line, position and extra parameters. */ va_start(operands,format); svg_info=(SVGInfo *) context; (void) LogMagickEvent(CoderEvent,GetMagickModule()," SAX.error: "); (void) LogMagickEvent(CoderEvent,GetMagickModule(),format,operands); #if !defined(MAGICKCORE_HAVE_VSNPRINTF) (void) vsprintf(reason,format,operands); #else (void) vsnprintf(reason,MagickPathExtent,format,operands); #endif message=GetExceptionMessage(errno); (void) ThrowMagickException(svg_info->exception,GetMagickModule(),CoderError, reason,"`%s`",message); message=DestroyString(message); va_end(operands); } static void SVGCDataBlock(void *context,const xmlChar *value,int length) { SVGInfo *svg_info; xmlNodePtr child; xmlParserCtxtPtr parser; /* Called when a pcdata block has been parsed. */ (void) LogMagickEvent(CoderEvent,GetMagickModule()," SAX.pcdata(%s, %d)", value,length); svg_info=(SVGInfo *) context; parser=svg_info->parser; child=xmlGetLastChild(parser->node); if ((child != (xmlNodePtr) NULL) && (child->type == XML_CDATA_SECTION_NODE)) { xmlTextConcat(child,value,length); return; } (void) xmlAddChild(parser->node,xmlNewCDataBlock(parser->myDoc,value,length)); } static void SVGExternalSubset(void *context,const xmlChar *name, const xmlChar *external_id,const xmlChar *system_id) { SVGInfo *svg_info; xmlParserCtxt parser_context; xmlParserCtxtPtr parser; xmlParserInputPtr input; /* Does this document has an external subset? */ (void) LogMagickEvent(CoderEvent,GetMagickModule(), " SAX.externalSubset(%s, %s, %s)",name, (external_id != (const xmlChar *) NULL ? (const char *) external_id : "none"), (system_id != (const xmlChar *) NULL ? (const char *) system_id : "none")); svg_info=(SVGInfo *) context; parser=svg_info->parser; if (((external_id == NULL) && (system_id == NULL)) || ((parser->validate == 0) || (parser->wellFormed == 0) || (svg_info->document == 0))) return; input=SVGResolveEntity(context,external_id,system_id); if (input == NULL) return; (void) xmlNewDtd(svg_info->document,name,external_id,system_id); parser_context=(*parser); parser->inputTab=(xmlParserInputPtr *) xmlMalloc(5*sizeof(*parser->inputTab)); if (parser->inputTab == (xmlParserInputPtr *) NULL) { parser->errNo=XML_ERR_NO_MEMORY; parser->input=parser_context.input; parser->inputNr=parser_context.inputNr; parser->inputMax=parser_context.inputMax; parser->inputTab=parser_context.inputTab; return; } parser->inputNr=0; parser->inputMax=5; parser->input=NULL; xmlPushInput(parser,input); (void) xmlSwitchEncoding(parser,xmlDetectCharEncoding(parser->input->cur,4)); if (input->filename == (char *) NULL) input->filename=(char *) xmlStrdup(system_id); input->line=1; input->col=1; input->base=parser->input->cur; input->cur=parser->input->cur; input->free=NULL; xmlParseExternalSubset(parser,external_id,system_id); while (parser->inputNr > 1) (void) xmlPopInput(parser); xmlFreeInputStream(parser->input); xmlFree(parser->inputTab); parser->input=parser_context.input; parser->inputNr=parser_context.inputNr; parser->inputMax=parser_context.inputMax; parser->inputTab=parser_context.inputTab; } #if defined(__cplusplus) || defined(c_plusplus) } #endif static Image *ReadSVGImage(const ImageInfo *image_info,ExceptionInfo *exception) { char filename[MagickPathExtent]; FILE *file; Image *image, *next; int status, unique_file; ssize_t n; SVGInfo *svg_info; unsigned char message[MagickPathExtent]; xmlSAXHandler sax_modules; xmlSAXHandlerPtr sax_handler; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } if ((fabs(image->resolution.x) < MagickEpsilon) || (fabs(image->resolution.y) < MagickEpsilon)) { GeometryInfo geometry_info; int flags; flags=ParseGeometry(SVGDensityGeometry,&geometry_info); image->resolution.x=geometry_info.rho; image->resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->resolution.y=image->resolution.x; } if (LocaleCompare(image_info->magick,"MSVG") != 0) { Image *svg_image; svg_image=RenderSVGImage(image_info,image,exception); if (svg_image != (Image *) NULL) { image=DestroyImageList(image); return(svg_image); } { #if defined(MAGICKCORE_RSVG_DELEGATE) #if defined(MAGICKCORE_CAIRO_DELEGATE) cairo_surface_t *cairo_surface; cairo_t *cairo_image; MagickBooleanType apply_density; MemoryInfo *pixel_info; register unsigned char *p; RsvgDimensionData dimension_info; unsigned char *pixels; #else GdkPixbuf *pixel_buffer; register const guchar *p; #endif GError *error; PixelInfo fill_color; register ssize_t x; register Quantum *q; RsvgHandle *svg_handle; ssize_t y; svg_handle=rsvg_handle_new(); if (svg_handle == (RsvgHandle *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); rsvg_handle_set_base_uri(svg_handle,image_info->filename); if ((fabs(image->resolution.x) > MagickEpsilon) && (fabs(image->resolution.y) > MagickEpsilon)) rsvg_handle_set_dpi_x_y(svg_handle,image->resolution.x, image->resolution.y); while ((n=ReadBlob(image,MagickPathExtent-1,message)) != 0) { message[n]='\0'; error=(GError *) NULL; (void) rsvg_handle_write(svg_handle,message,n,&error); if (error != (GError *) NULL) g_error_free(error); } error=(GError *) NULL; rsvg_handle_close(svg_handle,&error); if (error != (GError *) NULL) g_error_free(error); #if defined(MAGICKCORE_CAIRO_DELEGATE) apply_density=MagickTrue; rsvg_handle_get_dimensions(svg_handle,&dimension_info); if ((image->resolution.x > 0.0) && (image->resolution.y > 0.0)) { RsvgDimensionData dpi_dimension_info; /* We should not apply the density when the internal 'factor' is 'i'. This can be checked by using the trick below. */ rsvg_handle_set_dpi_x_y(svg_handle,image->resolution.x*256, image->resolution.y*256); rsvg_handle_get_dimensions(svg_handle,&dpi_dimension_info); if ((dpi_dimension_info.width != dimension_info.width) || (dpi_dimension_info.height != dimension_info.height)) apply_density=MagickFalse; rsvg_handle_set_dpi_x_y(svg_handle,image->resolution.x, image->resolution.y); } if (image_info->size != (char *) NULL) { (void) GetGeometry(image_info->size,(ssize_t *) NULL, (ssize_t *) NULL,&image->columns,&image->rows); if ((image->columns != 0) || (image->rows != 0)) { image->resolution.x=DefaultSVGDensity*image->columns/ dimension_info.width; image->resolution.y=DefaultSVGDensity*image->rows/ dimension_info.height; if (fabs(image->resolution.x) < MagickEpsilon) image->resolution.x=image->resolution.y; else if (fabs(image->resolution.y) < MagickEpsilon) image->resolution.y=image->resolution.x; else image->resolution.x=image->resolution.y=MagickMin( image->resolution.x,image->resolution.y); apply_density=MagickTrue; } } if (apply_density != MagickFalse) { image->columns=image->resolution.x*dimension_info.width/ DefaultSVGDensity; image->rows=image->resolution.y*dimension_info.height/ DefaultSVGDensity; } else { image->columns=dimension_info.width; image->rows=dimension_info.height; } pixel_info=(MemoryInfo *) NULL; #else pixel_buffer=rsvg_handle_get_pixbuf(svg_handle); rsvg_handle_free(svg_handle); image->columns=gdk_pixbuf_get_width(pixel_buffer); image->rows=gdk_pixbuf_get_height(pixel_buffer); #endif image->alpha_trait=BlendPixelTrait; if (image_info->ping == MagickFalse) { #if defined(MAGICKCORE_CAIRO_DELEGATE) size_t stride; #endif status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) { #if !defined(MAGICKCORE_CAIRO_DELEGATE) g_object_unref(G_OBJECT(pixel_buffer)); #endif g_object_unref(svg_handle); ThrowReaderException(MissingDelegateError, "NoDecodeDelegateForThisImageFormat"); } #if defined(MAGICKCORE_CAIRO_DELEGATE) stride=4*image->columns; #if defined(MAGICKCORE_PANGOCAIRO_DELEGATE) stride=(size_t) cairo_format_stride_for_width(CAIRO_FORMAT_ARGB32, (int) image->columns); #endif pixel_info=AcquireVirtualMemory(stride,image->rows*sizeof(*pixels)); if (pixel_info == (MemoryInfo *) NULL) { g_object_unref(svg_handle); ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); } pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info); #endif (void) SetImageBackgroundColor(image,exception); #if defined(MAGICKCORE_CAIRO_DELEGATE) cairo_surface=cairo_image_surface_create_for_data(pixels, CAIRO_FORMAT_ARGB32,(int) image->columns,(int) image->rows,(int) stride); if ((cairo_surface == (cairo_surface_t *) NULL) || (cairo_surface_status(cairo_surface) != CAIRO_STATUS_SUCCESS)) { if (cairo_surface != (cairo_surface_t *) NULL) cairo_surface_destroy(cairo_surface); pixel_info=RelinquishVirtualMemory(pixel_info); g_object_unref(svg_handle); ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); } cairo_image=cairo_create(cairo_surface); cairo_set_operator(cairo_image,CAIRO_OPERATOR_CLEAR); cairo_paint(cairo_image); cairo_set_operator(cairo_image,CAIRO_OPERATOR_OVER); if (apply_density != MagickFalse) cairo_scale(cairo_image,image->resolution.x/DefaultSVGDensity, image->resolution.y/DefaultSVGDensity); rsvg_handle_render_cairo(svg_handle,cairo_image); cairo_destroy(cairo_image); cairo_surface_destroy(cairo_surface); g_object_unref(svg_handle); p=pixels; #else p=gdk_pixbuf_get_pixels(pixel_buffer); #endif GetPixelInfo(image,&fill_color); for (y=0; y < (ssize_t) image->rows; y++) { q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { #if defined(MAGICKCORE_CAIRO_DELEGATE) fill_color.blue=ScaleCharToQuantum(*p++); fill_color.green=ScaleCharToQuantum(*p++); fill_color.red=ScaleCharToQuantum(*p++); #else fill_color.red=ScaleCharToQuantum(*p++); fill_color.green=ScaleCharToQuantum(*p++); fill_color.blue=ScaleCharToQuantum(*p++); #endif fill_color.alpha=ScaleCharToQuantum(*p++); #if defined(MAGICKCORE_CAIRO_DELEGATE) { double gamma; gamma=QuantumScale*fill_color.alpha; gamma=PerceptibleReciprocal(gamma); fill_color.blue*=gamma; fill_color.green*=gamma; fill_color.red*=gamma; } #endif CompositePixelOver(image,&fill_color,fill_color.alpha,q,(double) GetPixelAlpha(image,q),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } } #if defined(MAGICKCORE_CAIRO_DELEGATE) if (pixel_info != (MemoryInfo *) NULL) pixel_info=RelinquishVirtualMemory(pixel_info); #else g_object_unref(G_OBJECT(pixel_buffer)); #endif (void) CloseBlob(image); for (next=GetFirstImageInList(image); next != (Image *) NULL; ) { (void) CopyMagickString(next->filename,image->filename,MaxTextExtent); (void) CopyMagickString(next->magick,image->magick,MaxTextExtent); next=GetNextImageInList(next); } return(GetFirstImageInList(image)); #endif } } /* Open draw file. */ file=(FILE *) NULL; unique_file=AcquireUniqueFileResource(filename); if (unique_file != -1) file=fdopen(unique_file,"w"); if ((unique_file == -1) || (file == (FILE *) NULL)) { (void) CopyMagickString(image->filename,filename,MagickPathExtent); ThrowFileException(exception,FileOpenError,"UnableToCreateTemporaryFile", image->filename); image=DestroyImageList(image); return((Image *) NULL); } /* Parse SVG file. */ svg_info=AcquireSVGInfo(); if (svg_info == (SVGInfo *) NULL) { (void) fclose(file); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } svg_info->file=file; svg_info->exception=exception; svg_info->image=image; svg_info->image_info=image_info; svg_info->bounds.width=image->columns; svg_info->bounds.height=image->rows; svg_info->svgDepth=0; if (image_info->size != (char *) NULL) (void) CloneString(&svg_info->size,image_info->size); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"begin SAX"); xmlInitParser(); (void) xmlSubstituteEntitiesDefault(1); (void) memset(&sax_modules,0,sizeof(sax_modules)); sax_modules.internalSubset=SVGInternalSubset; sax_modules.isStandalone=SVGIsStandalone; sax_modules.hasInternalSubset=SVGHasInternalSubset; sax_modules.hasExternalSubset=SVGHasExternalSubset; sax_modules.resolveEntity=SVGResolveEntity; sax_modules.getEntity=SVGGetEntity; sax_modules.entityDecl=SVGEntityDeclaration; sax_modules.notationDecl=SVGNotationDeclaration; sax_modules.attributeDecl=SVGAttributeDeclaration; sax_modules.elementDecl=SVGElementDeclaration; sax_modules.unparsedEntityDecl=SVGUnparsedEntityDeclaration; sax_modules.setDocumentLocator=SVGSetDocumentLocator; sax_modules.startDocument=SVGStartDocument; sax_modules.endDocument=SVGEndDocument; sax_modules.startElement=SVGStartElement; sax_modules.endElement=SVGEndElement; sax_modules.reference=SVGReference; sax_modules.characters=SVGCharacters; sax_modules.ignorableWhitespace=SVGIgnorableWhitespace; sax_modules.processingInstruction=SVGProcessingInstructions; sax_modules.comment=SVGComment; sax_modules.warning=SVGWarning; sax_modules.error=SVGError; sax_modules.fatalError=SVGError; sax_modules.getParameterEntity=SVGGetParameterEntity; sax_modules.cdataBlock=SVGCDataBlock; sax_modules.externalSubset=SVGExternalSubset; sax_handler=(&sax_modules); n=ReadBlob(image,MagickPathExtent-1,message); message[n]='\0'; if (n > 0) { const char *value; svg_info->parser=xmlCreatePushParserCtxt(sax_handler,svg_info,(char *) message,n,image->filename); value=GetImageOption(image_info,"svg:xml-parse-huge"); if ((value != (char *) NULL) && (IsStringTrue(value) != MagickFalse)) (void) xmlCtxtUseOptions(svg_info->parser,XML_PARSE_HUGE); while ((n=ReadBlob(image,MagickPathExtent-1,message)) != 0) { message[n]='\0'; status=xmlParseChunk(svg_info->parser,(char *) message,(int) n,0); if (status != 0) break; } } (void) xmlParseChunk(svg_info->parser,(char *) message,0,1); SVGEndDocument(svg_info); xmlFreeParserCtxt(svg_info->parser); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"end SAX"); (void) fclose(file); (void) CloseBlob(image); image->columns=svg_info->width; image->rows=svg_info->height; if (exception->severity >= ErrorException) { svg_info=DestroySVGInfo(svg_info); (void) RelinquishUniqueFileResource(filename); image=DestroyImage(image); return((Image *) NULL); } if (image_info->ping == MagickFalse) { ImageInfo *read_info; /* Draw image. */ image=DestroyImage(image); image=(Image *) NULL; read_info=CloneImageInfo(image_info); SetImageInfoBlob(read_info,(void *) NULL,0); (void) FormatLocaleString(read_info->filename,MagickPathExtent,"mvg:%s", filename); image=ReadImage(read_info,exception); read_info=DestroyImageInfo(read_info); if (image != (Image *) NULL) (void) CopyMagickString(image->filename,image_info->filename, MagickPathExtent); } /* Relinquish resources. */ if (image != (Image *) NULL) { if (svg_info->title != (char *) NULL) (void) SetImageProperty(image,"svg:title",svg_info->title,exception); if (svg_info->comment != (char *) NULL) (void) SetImageProperty(image,"svg:comment",svg_info->comment, exception); } for (next=GetFirstImageInList(image); next != (Image *) NULL; ) { (void) CopyMagickString(next->filename,image->filename,MaxTextExtent); (void) CopyMagickString(next->magick,image->magick,MaxTextExtent); next=GetNextImageInList(next); } svg_info=DestroySVGInfo(svg_info); (void) RelinquishUniqueFileResource(filename); return(GetFirstImageInList(image)); } #else static Image *ReadSVGImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image, *svg_image; MagickBooleanType status; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } if ((fabs(image->resolution.x) < MagickEpsilon) || (fabs(image->resolution.y) < MagickEpsilon)) { GeometryInfo geometry_info; MagickStatusType flags; flags=ParseGeometry(SVGDensityGeometry,&geometry_info); image->resolution.x=geometry_info.rho; image->resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->resolution.y=image->resolution.x; } svg_image=RenderSVGImage(image_info,image,exception); image=DestroyImage(image); return(svg_image); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r S V G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterSVGImage() adds attributes for the SVG image format to % the list of supported formats. The attributes include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterSVGImage method is: % % size_t RegisterSVGImage(void) % */ ModuleExport size_t RegisterSVGImage(void) { char version[MagickPathExtent]; MagickInfo *entry; *version='\0'; #if defined(LIBXML_DOTTED_VERSION) (void) CopyMagickString(version,"XML " LIBXML_DOTTED_VERSION, MagickPathExtent); #endif #if defined(MAGICKCORE_RSVG_DELEGATE) #if !GLIB_CHECK_VERSION(2,35,0) g_type_init(); #endif (void) FormatLocaleString(version,MagickPathExtent,"RSVG %d.%d.%d", LIBRSVG_MAJOR_VERSION,LIBRSVG_MINOR_VERSION,LIBRSVG_MICRO_VERSION); #endif entry=AcquireMagickInfo("SVG","SVG","Scalable Vector Graphics"); entry->decoder=(DecodeImageHandler *) ReadSVGImage; entry->encoder=(EncodeImageHandler *) WriteSVGImage; entry->flags^=CoderBlobSupportFlag; #if defined(MAGICKCORE_RSVG_DELEGATE) entry->flags^=CoderDecoderThreadSupportFlag; #endif entry->mime_type=ConstantString("image/svg+xml"); if (*version != '\0') entry->version=ConstantString(version); entry->magick=(IsImageFormatHandler *) IsSVG; (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("SVG","SVGZ","Compressed Scalable Vector Graphics"); #if defined(MAGICKCORE_XML_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadSVGImage; #endif entry->encoder=(EncodeImageHandler *) WriteSVGImage; entry->flags^=CoderBlobSupportFlag; #if defined(MAGICKCORE_RSVG_DELEGATE) entry->flags^=CoderDecoderThreadSupportFlag; #endif entry->mime_type=ConstantString("image/svg+xml"); if (*version != '\0') entry->version=ConstantString(version); entry->magick=(IsImageFormatHandler *) IsSVG; (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("SVG","MSVG", "ImageMagick's own SVG internal renderer"); #if defined(MAGICKCORE_XML_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadSVGImage; #endif entry->encoder=(EncodeImageHandler *) WriteSVGImage; entry->flags^=CoderBlobSupportFlag; #if defined(MAGICKCORE_RSVG_DELEGATE) entry->flags^=CoderDecoderThreadSupportFlag; #endif entry->magick=(IsImageFormatHandler *) IsSVG; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r S V G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterSVGImage() removes format registrations made by the % SVG module from the list of supported formats. % % The format of the UnregisterSVGImage method is: % % UnregisterSVGImage(void) % */ ModuleExport void UnregisterSVGImage(void) { (void) UnregisterMagickInfo("SVGZ"); (void) UnregisterMagickInfo("SVG"); (void) UnregisterMagickInfo("MSVG"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e S V G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteSVGImage() writes a image in the SVG - XML based W3C standard % format. % % The format of the WriteSVGImage method is: % % MagickBooleanType WriteSVGImage(const ImageInfo *image_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ static void AffineToTransform(Image *image,AffineMatrix *affine) { char transform[MagickPathExtent]; if ((fabs(affine->tx) < MagickEpsilon) && (fabs(affine->ty) < MagickEpsilon)) { if ((fabs(affine->rx) < MagickEpsilon) && (fabs(affine->ry) < MagickEpsilon)) { if ((fabs(affine->sx-1.0) < MagickEpsilon) && (fabs(affine->sy-1.0) < MagickEpsilon)) { (void) WriteBlobString(image,"\">\n"); return; } (void) FormatLocaleString(transform,MagickPathExtent, "\" transform=\"scale(%g,%g)\">\n",affine->sx,affine->sy); (void) WriteBlobString(image,transform); return; } else { if ((fabs(affine->sx-affine->sy) < MagickEpsilon) && (fabs(affine->rx+affine->ry) < MagickEpsilon) && (fabs(affine->sx*affine->sx+affine->rx*affine->rx-1.0) < 2*MagickEpsilon)) { double theta; theta=(180.0/MagickPI)*atan2(affine->rx,affine->sx); (void) FormatLocaleString(transform,MagickPathExtent, "\" transform=\"rotate(%g)\">\n",theta); (void) WriteBlobString(image,transform); return; } } } else { if ((fabs(affine->sx-1.0) < MagickEpsilon) && (fabs(affine->rx) < MagickEpsilon) && (fabs(affine->ry) < MagickEpsilon) && (fabs(affine->sy-1.0) < MagickEpsilon)) { (void) FormatLocaleString(transform,MagickPathExtent, "\" transform=\"translate(%g,%g)\">\n",affine->tx,affine->ty); (void) WriteBlobString(image,transform); return; } } (void) FormatLocaleString(transform,MagickPathExtent, "\" transform=\"matrix(%g %g %g %g %g %g)\">\n", affine->sx,affine->rx,affine->ry,affine->sy,affine->tx,affine->ty); (void) WriteBlobString(image,transform); } static MagickBooleanType IsPoint(const char *point) { char *p; ssize_t value; value=(ssize_t) strtol(point,&p,10); (void) value; return(p != point ? MagickTrue : MagickFalse); } static MagickBooleanType TraceSVGImage(Image *image,ExceptionInfo *exception) { #if defined(MAGICKCORE_AUTOTRACE_DELEGATE) { at_bitmap_type *trace; at_fitting_opts_type *fitting_options; at_output_opts_type *output_options; at_splines_type *splines; ImageType type; register const Quantum *p; register ssize_t i, x; size_t number_planes; ssize_t y; /* Trace image and write as SVG. */ fitting_options=at_fitting_opts_new(); output_options=at_output_opts_new(); (void) SetImageGray(image,exception); type=GetImageType(image); number_planes=3; if ((type == BilevelType) || (type == GrayscaleType)) number_planes=1; trace=at_bitmap_new(image->columns,image->rows,number_planes); i=0; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { trace->bitmap[i++]=GetPixelRed(image,p); if (number_planes == 3) { trace->bitmap[i++]=GetPixelGreen(image,p); trace->bitmap[i++]=GetPixelBlue(image,p); } p+=GetPixelChannels(image); } } splines=at_splines_new_full(trace,fitting_options,NULL,NULL,NULL,NULL,NULL, NULL); at_splines_write(at_output_get_handler_by_suffix((char *) "svg"), GetBlobFileHandle(image),image->filename,output_options,splines,NULL, NULL); /* Free resources. */ at_splines_free(splines); at_bitmap_free(trace); at_output_opts_free(output_options); at_fitting_opts_free(fitting_options); } #else { char *base64, filename[MagickPathExtent], message[MagickPathExtent]; const DelegateInfo *delegate_info; Image *clone_image; ImageInfo *image_info; MagickBooleanType status; register char *p; size_t blob_length, encode_length; ssize_t i; unsigned char *blob; delegate_info=GetDelegateInfo((char *) NULL,"TRACE",exception); if (delegate_info != (DelegateInfo *) NULL) { /* Trace SVG with tracing delegate. */ image_info=AcquireImageInfo(); (void) CopyMagickString(image_info->magick,"TRACE",MagickPathExtent); (void) FormatLocaleString(filename,MagickPathExtent,"trace:%s", image_info->filename); (void) CopyMagickString(image_info->filename,filename,MagickPathExtent); status=WriteImage(image_info,image,exception); image_info=DestroyImageInfo(image_info); return(status); } (void) WriteBlobString(image, "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>\n"); (void) WriteBlobString(image, "<!DOCTYPE svg PUBLIC \"-//W3C//DTD SVG 1.1//EN\""); (void) WriteBlobString(image, " \"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd\">\n"); (void) FormatLocaleString(message,MagickPathExtent, "<svg version=\"1.1\" id=\"Layer_1\" " "xmlns=\"http://www.w3.org/2000/svg\" " "xmlns:xlink=\"http://www.w3.org/1999/xlink\" x=\"0px\" y=\"0px\" " "width=\"%.20gpx\" height=\"%.20gpx\" viewBox=\"0 0 %.20g %.20g\" " "enable-background=\"new 0 0 %.20g %.20g\" xml:space=\"preserve\">", (double) image->columns,(double) image->rows, (double) image->columns,(double) image->rows, (double) image->columns,(double) image->rows); (void) WriteBlobString(image,message); clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return(MagickFalse); image_info=AcquireImageInfo(); (void) CopyMagickString(image_info->magick,"PNG",MagickPathExtent); blob_length=2048; blob=(unsigned char *) ImageToBlob(image_info,clone_image,&blob_length, exception); clone_image=DestroyImage(clone_image); image_info=DestroyImageInfo(image_info); if (blob == (unsigned char *) NULL) return(MagickFalse); encode_length=0; base64=Base64Encode(blob,blob_length,&encode_length); blob=(unsigned char *) RelinquishMagickMemory(blob); (void) FormatLocaleString(message,MagickPathExtent, " <image id=\"image%.20g\" width=\"%.20g\" height=\"%.20g\" " "x=\"%.20g\" y=\"%.20g\"\n href=\"data:image/png;base64,", (double) image->scene,(double) image->columns,(double) image->rows, (double) image->page.x,(double) image->page.y); (void) WriteBlobString(image,message); p=base64; for (i=(ssize_t) encode_length; i > 0; i-=76) { (void) FormatLocaleString(message,MagickPathExtent,"%.76s",p); (void) WriteBlobString(image,message); p+=76; if (i > 76) (void) WriteBlobString(image,"\n"); } base64=DestroyString(base64); (void) WriteBlobString(image,"\" />\n"); (void) WriteBlobString(image,"</svg>\n"); } #endif (void) CloseBlob(image); return(MagickTrue); } static MagickBooleanType WriteSVGImage(const ImageInfo *image_info,Image *image, ExceptionInfo *exception) { #define BezierQuantum 200 AffineMatrix affine; char keyword[MagickPathExtent], message[MagickPathExtent], name[MagickPathExtent], *next_token, *token, type[MagickPathExtent]; const char *p, *q, *value; int n; ssize_t j; MagickBooleanType active, status; PointInfo point; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; register ssize_t x; register ssize_t i; size_t extent, length, number_points; SVGInfo svg_info; /* Open output image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); value=GetImageArtifact(image,"SVG"); if (value != (char *) NULL) { (void) WriteBlobString(image,value); (void) CloseBlob(image); return(MagickTrue); } value=GetImageArtifact(image,"mvg:vector-graphics"); if (value == (char *) NULL) return(TraceSVGImage(image,exception)); /* Write SVG header. */ (void) WriteBlobString(image,"<?xml version=\"1.0\" standalone=\"no\"?>\n"); (void) WriteBlobString(image, "<!DOCTYPE svg PUBLIC \"-//W3C//DTD SVG 20010904//EN\"\n"); (void) WriteBlobString(image, " \"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd\">\n"); (void) FormatLocaleString(message,MagickPathExtent, "<svg width=\"%.20g\" height=\"%.20g\">\n",(double) image->columns,(double) image->rows); (void) WriteBlobString(image,message); /* Allocate primitive info memory. */ number_points=2047; primitive_info=(PrimitiveInfo *) AcquireQuantumMemory(number_points, sizeof(*primitive_info)); if (primitive_info == (PrimitiveInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); GetAffineMatrix(&affine); token=AcquireString(value); extent=strlen(token)+MagickPathExtent; active=MagickFalse; n=0; status=MagickTrue; for (q=(const char *) value; *q != '\0'; ) { /* Interpret graphic primitive. */ (void) GetNextToken(q,&q,MagickPathExtent,keyword); if (*keyword == '\0') break; if (*keyword == '#') { /* Comment. */ if (active != MagickFalse) { AffineToTransform(image,&affine); active=MagickFalse; } (void) WriteBlobString(image,"<desc>"); (void) WriteBlobString(image,keyword+1); for ( ; (*q != '\n') && (*q != '\0'); q++) switch (*q) { case '<': (void) WriteBlobString(image,"&lt;"); break; case '>': (void) WriteBlobString(image,"&gt;"); break; case '&': (void) WriteBlobString(image,"&amp;"); break; default: (void) WriteBlobByte(image,(unsigned char) *q); break; } (void) WriteBlobString(image,"</desc>\n"); continue; } primitive_type=UndefinedPrimitive; switch (*keyword) { case ';': break; case 'a': case 'A': { if (LocaleCompare("affine",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.sx=StringToDouble(token,&next_token); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.rx=StringToDouble(token,&next_token); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ry=StringToDouble(token,&next_token); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.sy=StringToDouble(token,&next_token); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.tx=StringToDouble(token,&next_token); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ty=StringToDouble(token,&next_token); break; } if (LocaleCompare("alpha",keyword) == 0) { primitive_type=AlphaPrimitive; break; } if (LocaleCompare("angle",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.rx=StringToDouble(token,&next_token); affine.ry=StringToDouble(token,&next_token); break; } if (LocaleCompare("arc",keyword) == 0) { primitive_type=ArcPrimitive; break; } status=MagickFalse; break; } case 'b': case 'B': { if (LocaleCompare("bezier",keyword) == 0) { primitive_type=BezierPrimitive; break; } status=MagickFalse; break; } case 'c': case 'C': { if (LocaleCompare("clip-path",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) FormatLocaleString(message,MagickPathExtent, "clip-path:url(#%s);",token); (void) WriteBlobString(image,message); break; } if (LocaleCompare("clip-rule",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) FormatLocaleString(message,MagickPathExtent,"clip-rule:%s;", token); (void) WriteBlobString(image,message); break; } if (LocaleCompare("clip-units",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) FormatLocaleString(message,MagickPathExtent, "clipPathUnits=%s;",token); (void) WriteBlobString(image,message); break; } if (LocaleCompare("circle",keyword) == 0) { primitive_type=CirclePrimitive; break; } if (LocaleCompare("color",keyword) == 0) { primitive_type=ColorPrimitive; break; } status=MagickFalse; break; } case 'd': case 'D': { if (LocaleCompare("decorate",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) FormatLocaleString(message,MagickPathExtent, "text-decoration:%s;",token); (void) WriteBlobString(image,message); break; } status=MagickFalse; break; } case 'e': case 'E': { if (LocaleCompare("ellipse",keyword) == 0) { primitive_type=EllipsePrimitive; break; } status=MagickFalse; break; } case 'f': case 'F': { if (LocaleCompare("fill",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) FormatLocaleString(message,MagickPathExtent,"fill:%s;", token); (void) WriteBlobString(image,message); break; } if (LocaleCompare("fill-rule",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) FormatLocaleString(message,MagickPathExtent, "fill-rule:%s;",token); (void) WriteBlobString(image,message); break; } if (LocaleCompare("fill-opacity",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) FormatLocaleString(message,MagickPathExtent, "fill-opacity:%s;",token); (void) WriteBlobString(image,message); break; } if (LocaleCompare("font-family",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) FormatLocaleString(message,MagickPathExtent, "font-family:%s;",token); (void) WriteBlobString(image,message); break; } if (LocaleCompare("font-stretch",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) FormatLocaleString(message,MagickPathExtent, "font-stretch:%s;",token); (void) WriteBlobString(image,message); break; } if (LocaleCompare("font-style",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) FormatLocaleString(message,MagickPathExtent, "font-style:%s;",token); (void) WriteBlobString(image,message); break; } if (LocaleCompare("font-size",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) FormatLocaleString(message,MagickPathExtent, "font-size:%s;",token); (void) WriteBlobString(image,message); break; } if (LocaleCompare("font-weight",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) FormatLocaleString(message,MagickPathExtent, "font-weight:%s;",token); (void) WriteBlobString(image,message); break; } status=MagickFalse; break; } case 'g': case 'G': { if (LocaleCompare("gradient-units",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("text-align",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) FormatLocaleString(message,MagickPathExtent, "text-align %s ",token); (void) WriteBlobString(image,message); break; } if (LocaleCompare("text-anchor",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) FormatLocaleString(message,MagickPathExtent, "text-anchor %s ",token); (void) WriteBlobString(image,message); break; } status=MagickFalse; break; } case 'i': case 'I': { if (LocaleCompare("image",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); primitive_type=ImagePrimitive; break; } status=MagickFalse; break; } case 'k': case 'K': { if (LocaleCompare("kerning",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) FormatLocaleString(message,MagickPathExtent,"kerning:%s;", token); (void) WriteBlobString(image,message); } break; } case 'l': case 'L': { if (LocaleCompare("letter-spacing",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) FormatLocaleString(message,MagickPathExtent, "letter-spacing:%s;",token); (void) WriteBlobString(image,message); break; } if (LocaleCompare("line",keyword) == 0) { primitive_type=LinePrimitive; break; } status=MagickFalse; break; } case 'o': case 'O': { if (LocaleCompare("opacity",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) FormatLocaleString(message,MagickPathExtent,"opacity %s ", token); (void) WriteBlobString(image,message); break; } status=MagickFalse; break; } case 'p': case 'P': { if (LocaleCompare("path",keyword) == 0) { primitive_type=PathPrimitive; break; } if (LocaleCompare("point",keyword) == 0) { primitive_type=PointPrimitive; break; } if (LocaleCompare("polyline",keyword) == 0) { primitive_type=PolylinePrimitive; break; } if (LocaleCompare("polygon",keyword) == 0) { primitive_type=PolygonPrimitive; break; } if (LocaleCompare("pop",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (LocaleCompare("clip-path",token) == 0) { (void) WriteBlobString(image,"</clipPath>\n"); break; } if (LocaleCompare("defs",token) == 0) { (void) WriteBlobString(image,"</defs>\n"); break; } if (LocaleCompare("gradient",token) == 0) { (void) FormatLocaleString(message,MagickPathExtent, "</%sGradient>\n",type); (void) WriteBlobString(image,message); break; } if (LocaleCompare("graphic-context",token) == 0) { n--; if (n < 0) ThrowWriterException(DrawError, "UnbalancedGraphicContextPushPop"); (void) WriteBlobString(image,"</g>\n"); } if (LocaleCompare("pattern",token) == 0) { (void) WriteBlobString(image,"</pattern>\n"); break; } if (LocaleCompare("symbol",token) == 0) { (void) WriteBlobString(image,"</symbol>\n"); break; } if ((LocaleCompare("defs",token) == 0) || (LocaleCompare("symbol",token) == 0)) (void) WriteBlobString(image,"</g>\n"); break; } if (LocaleCompare("push",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (LocaleCompare("clip-path",token) == 0) { (void) GetNextToken(q,&q,extent,token); (void) FormatLocaleString(message,MagickPathExtent, "<clipPath id=\"%s\">\n",token); (void) WriteBlobString(image,message); break; } if (LocaleCompare("defs",token) == 0) { (void) WriteBlobString(image,"<defs>\n"); break; } if (LocaleCompare("gradient",token) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(type,token,MagickPathExtent); (void) GetNextToken(q,&q,extent,token); svg_info.segment.x1=StringToDouble(token,&next_token); svg_info.element.cx=StringToDouble(token,&next_token); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); svg_info.segment.y1=StringToDouble(token,&next_token); svg_info.element.cy=StringToDouble(token,&next_token); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); svg_info.segment.x2=StringToDouble(token,&next_token); svg_info.element.major=StringToDouble(token, (char **) NULL); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); svg_info.segment.y2=StringToDouble(token,&next_token); svg_info.element.minor=StringToDouble(token, (char **) NULL); (void) FormatLocaleString(message,MagickPathExtent, "<%sGradient id=\"%s\" x1=\"%g\" y1=\"%g\" x2=\"%g\" " "y2=\"%g\">\n",type,name,svg_info.segment.x1, svg_info.segment.y1,svg_info.segment.x2,svg_info.segment.y2); if (LocaleCompare(type,"radial") == 0) { (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); svg_info.element.angle=StringToDouble(token, (char **) NULL); (void) FormatLocaleString(message,MagickPathExtent, "<%sGradient id=\"%s\" cx=\"%g\" cy=\"%g\" r=\"%g\" " "fx=\"%g\" fy=\"%g\">\n",type,name, svg_info.element.cx,svg_info.element.cy, svg_info.element.angle,svg_info.element.major, svg_info.element.minor); } (void) WriteBlobString(image,message); break; } if (LocaleCompare("graphic-context",token) == 0) { n++; if (active) { AffineToTransform(image,&affine); active=MagickFalse; } (void) WriteBlobString(image,"<g style=\""); active=MagickTrue; } if (LocaleCompare("pattern",token) == 0) { (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); (void) GetNextToken(q,&q,extent,token); svg_info.bounds.x=StringToDouble(token,&next_token); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); svg_info.bounds.y=StringToDouble(token,&next_token); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); svg_info.bounds.width=StringToDouble(token, (char **) NULL); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); svg_info.bounds.height=StringToDouble(token,(char **) NULL); (void) FormatLocaleString(message,MagickPathExtent, "<pattern id=\"%s\" x=\"%g\" y=\"%g\" width=\"%g\" " "height=\"%g\">\n",name,svg_info.bounds.x,svg_info.bounds.y, svg_info.bounds.width,svg_info.bounds.height); (void) WriteBlobString(image,message); break; } if (LocaleCompare("symbol",token) == 0) { (void) WriteBlobString(image,"<symbol>\n"); break; } break; } status=MagickFalse; break; } case 'r': case 'R': { if (LocaleCompare("rectangle",keyword) == 0) { primitive_type=RectanglePrimitive; break; } if (LocaleCompare("roundRectangle",keyword) == 0) { primitive_type=RoundRectanglePrimitive; break; } if (LocaleCompare("rotate",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) FormatLocaleString(message,MagickPathExtent,"rotate(%s) ", token); (void) WriteBlobString(image,message); break; } status=MagickFalse; break; } case 's': case 'S': { if (LocaleCompare("scale",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.sx=StringToDouble(token,&next_token); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.sy=StringToDouble(token,&next_token); break; } if (LocaleCompare("skewX",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) FormatLocaleString(message,MagickPathExtent,"skewX(%s) ", token); (void) WriteBlobString(image,message); break; } if (LocaleCompare("skewY",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) FormatLocaleString(message,MagickPathExtent,"skewY(%s) ", token); (void) WriteBlobString(image,message); break; } if (LocaleCompare("stop-color",keyword) == 0) { char color[MagickPathExtent]; (void) GetNextToken(q,&q,extent,token); (void) CopyMagickString(color,token,MagickPathExtent); (void) GetNextToken(q,&q,extent,token); (void) FormatLocaleString(message,MagickPathExtent, " <stop offset=\"%s\" stop-color=\"%s\" />\n",token,color); (void) WriteBlobString(image,message); break; } if (LocaleCompare("stroke",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) FormatLocaleString(message,MagickPathExtent,"stroke:%s;", token); (void) WriteBlobString(image,message); break; } if (LocaleCompare("stroke-antialias",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) FormatLocaleString(message,MagickPathExtent, "stroke-antialias:%s;",token); (void) WriteBlobString(image,message); break; } if (LocaleCompare("stroke-dasharray",keyword) == 0) { if (IsPoint(q)) { ssize_t k; p=q; (void) GetNextToken(p,&p,extent,token); for (k=0; IsPoint(token); k++) (void) GetNextToken(p,&p,extent,token); (void) WriteBlobString(image,"stroke-dasharray:"); for (j=0; j < k; j++) { (void) GetNextToken(q,&q,extent,token); (void) FormatLocaleString(message,MagickPathExtent,"%s ", token); (void) WriteBlobString(image,message); } (void) WriteBlobString(image,";"); break; } (void) GetNextToken(q,&q,extent,token); (void) FormatLocaleString(message,MagickPathExtent, "stroke-dasharray:%s;",token); (void) WriteBlobString(image,message); break; } if (LocaleCompare("stroke-dashoffset",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) FormatLocaleString(message,MagickPathExtent, "stroke-dashoffset:%s;",token); (void) WriteBlobString(image,message); break; } if (LocaleCompare("stroke-linecap",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) FormatLocaleString(message,MagickPathExtent, "stroke-linecap:%s;",token); (void) WriteBlobString(image,message); break; } if (LocaleCompare("stroke-linejoin",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) FormatLocaleString(message,MagickPathExtent, "stroke-linejoin:%s;",token); (void) WriteBlobString(image,message); break; } if (LocaleCompare("stroke-miterlimit",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) FormatLocaleString(message,MagickPathExtent, "stroke-miterlimit:%s;",token); (void) WriteBlobString(image,message); break; } if (LocaleCompare("stroke-opacity",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) FormatLocaleString(message,MagickPathExtent, "stroke-opacity:%s;",token); (void) WriteBlobString(image,message); break; } if (LocaleCompare("stroke-width",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) FormatLocaleString(message,MagickPathExtent, "stroke-width:%s;",token); (void) WriteBlobString(image,message); continue; } status=MagickFalse; break; } case 't': case 'T': { if (LocaleCompare("text",keyword) == 0) { primitive_type=TextPrimitive; break; } if (LocaleCompare("text-antialias",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); (void) FormatLocaleString(message,MagickPathExtent, "text-antialias:%s;",token); (void) WriteBlobString(image,message); break; } if (LocaleCompare("tspan",keyword) == 0) { primitive_type=TextPrimitive; break; } if (LocaleCompare("translate",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); affine.tx=StringToDouble(token,&next_token); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); affine.ty=StringToDouble(token,&next_token); break; } status=MagickFalse; break; } case 'v': case 'V': { if (LocaleCompare("viewbox",keyword) == 0) { (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); (void) GetNextToken(q,&q,extent,token); break; } status=MagickFalse; break; } default: { status=MagickFalse; break; } } if (status == MagickFalse) break; if (primitive_type == UndefinedPrimitive) continue; /* Parse the primitive attributes. */ i=0; j=0; for (x=0; *q != '\0'; x++) { /* Define points. */ if (IsPoint(q) == MagickFalse) break; (void) GetNextToken(q,&q,extent,token); point.x=StringToDouble(token,&next_token); (void) GetNextToken(q,&q,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); point.y=StringToDouble(token,&next_token); (void) GetNextToken(q,(const char **) NULL,extent,token); if (*token == ',') (void) GetNextToken(q,&q,extent,token); primitive_info[i].primitive=primitive_type; primitive_info[i].point=point; primitive_info[i].coordinates=0; primitive_info[i].method=FloodfillMethod; i++; if (i < (ssize_t) (number_points-6*BezierQuantum-360)) continue; number_points+=6*BezierQuantum+360; primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(primitive_info, number_points,sizeof(*primitive_info)); if (primitive_info == (PrimitiveInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); break; } } primitive_info[j].primitive=primitive_type; primitive_info[j].coordinates=(size_t) x; primitive_info[j].method=FloodfillMethod; primitive_info[j].text=(char *) NULL; if (active) { AffineToTransform(image,&affine); active=MagickFalse; } active=MagickFalse; switch (primitive_type) { case PointPrimitive: default: { if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } break; } case LinePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } (void) FormatLocaleString(message,MagickPathExtent, " <line x1=\"%g\" y1=\"%g\" x2=\"%g\" y2=\"%g\"/>\n", primitive_info[j].point.x,primitive_info[j].point.y, primitive_info[j+1].point.x,primitive_info[j+1].point.y); (void) WriteBlobString(image,message); break; } case RectanglePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } (void) FormatLocaleString(message,MagickPathExtent, " <rect x=\"%g\" y=\"%g\" width=\"%g\" height=\"%g\"/>\n", primitive_info[j].point.x,primitive_info[j].point.y, primitive_info[j+1].point.x-primitive_info[j].point.x, primitive_info[j+1].point.y-primitive_info[j].point.y); (void) WriteBlobString(image,message); break; } case RoundRectanglePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } (void) FormatLocaleString(message,MagickPathExtent, " <rect x=\"%g\" y=\"%g\" width=\"%g\" height=\"%g\" rx=\"%g\" " "ry=\"%g\"/>\n",primitive_info[j].point.x, primitive_info[j].point.y,primitive_info[j+1].point.x- primitive_info[j].point.x,primitive_info[j+1].point.y- primitive_info[j].point.y,primitive_info[j+2].point.x, primitive_info[j+2].point.y); (void) WriteBlobString(image,message); break; } case ArcPrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } break; } case EllipsePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } (void) FormatLocaleString(message,MagickPathExtent, " <ellipse cx=\"%g\" cy=\"%g\" rx=\"%g\" ry=\"%g\"/>\n", primitive_info[j].point.x,primitive_info[j].point.y, primitive_info[j+1].point.x,primitive_info[j+1].point.y); (void) WriteBlobString(image,message); break; } case CirclePrimitive: { double alpha, beta; if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } alpha=primitive_info[j+1].point.x-primitive_info[j].point.x; beta=primitive_info[j+1].point.y-primitive_info[j].point.y; (void) FormatLocaleString(message,MagickPathExtent, " <circle cx=\"%g\" cy=\"%g\" r=\"%g\"/>\n", primitive_info[j].point.x,primitive_info[j].point.y, hypot(alpha,beta)); (void) WriteBlobString(image,message); break; } case PolylinePrimitive: { if (primitive_info[j].coordinates < 2) { status=MagickFalse; break; } (void) CopyMagickString(message," <polyline points=\"", MagickPathExtent); (void) WriteBlobString(image,message); length=strlen(message); for ( ; j < i; j++) { (void) FormatLocaleString(message,MagickPathExtent,"%g,%g ", primitive_info[j].point.x,primitive_info[j].point.y); length+=strlen(message); if (length >= 80) { (void) WriteBlobString(image,"\n "); length=strlen(message)+5; } (void) WriteBlobString(image,message); } (void) WriteBlobString(image,"\"/>\n"); break; } case PolygonPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } primitive_info[i]=primitive_info[j]; primitive_info[i].coordinates=0; primitive_info[j].coordinates++; i++; (void) CopyMagickString(message," <polygon points=\"", MagickPathExtent); (void) WriteBlobString(image,message); length=strlen(message); for ( ; j < i; j++) { (void) FormatLocaleString(message,MagickPathExtent,"%g,%g ", primitive_info[j].point.x,primitive_info[j].point.y); length+=strlen(message); if (length >= 80) { (void) WriteBlobString(image,"\n "); length=strlen(message)+5; } (void) WriteBlobString(image,message); } (void) WriteBlobString(image,"\"/>\n"); break; } case BezierPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } break; } case PathPrimitive: { int number_attributes; (void) GetNextToken(q,&q,extent,token); number_attributes=1; for (p=token; *p != '\0'; p++) if (isalpha((int) ((unsigned char) *p)) != 0) number_attributes++; if (i > (ssize_t) (number_points-6*BezierQuantum*number_attributes-1)) { number_points+=6*BezierQuantum*number_attributes; primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(primitive_info, number_points,sizeof(*primitive_info)); if (primitive_info == (PrimitiveInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } } (void) WriteBlobString(image," <path d=\""); (void) WriteBlobString(image,token); (void) WriteBlobString(image,"\"/>\n"); break; } case AlphaPrimitive: case ColorPrimitive: { if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); if (LocaleCompare("point",token) == 0) primitive_info[j].method=PointMethod; if (LocaleCompare("replace",token) == 0) primitive_info[j].method=ReplaceMethod; if (LocaleCompare("floodfill",token) == 0) primitive_info[j].method=FloodfillMethod; if (LocaleCompare("filltoborder",token) == 0) primitive_info[j].method=FillToBorderMethod; if (LocaleCompare("reset",token) == 0) primitive_info[j].method=ResetMethod; break; } case TextPrimitive: { register char *p; if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); (void) FormatLocaleString(message,MagickPathExtent, " <text x=\"%g\" y=\"%g\">",primitive_info[j].point.x, primitive_info[j].point.y); (void) WriteBlobString(image,message); for (p=token; *p != '\0'; p++) switch (*p) { case '<': (void) WriteBlobString(image,"&lt;"); break; case '>': (void) WriteBlobString(image,"&gt;"); break; case '&': (void) WriteBlobString(image,"&amp;"); break; default: (void) WriteBlobByte(image,(unsigned char) *p); break; } (void) WriteBlobString(image,"</text>\n"); break; } case ImagePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } (void) GetNextToken(q,&q,extent,token); (void) FormatLocaleString(message,MagickPathExtent, " <image x=\"%g\" y=\"%g\" width=\"%g\" height=\"%g\" " "href=\"%s\"/>\n",primitive_info[j].point.x, primitive_info[j].point.y,primitive_info[j+1].point.x, primitive_info[j+1].point.y,token); (void) WriteBlobString(image,message); break; } } if (primitive_info == (PrimitiveInfo *) NULL) break; primitive_info[i].primitive=UndefinedPrimitive; if (status == MagickFalse) break; } (void) WriteBlobString(image,"</svg>\n"); /* Relinquish resources. */ token=DestroyString(token); if (primitive_info != (PrimitiveInfo *) NULL) primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info); (void) CloseBlob(image); return(status); }
./CrossVul/dataset_final_sorted/CWE-20/c/good_1221_0
crossvul-cpp_data_bad_5845_7
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk) * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * Copyright (C) Darryl Miles G7LED (dlm@g7led.demon.co.uk) * Copyright (C) Steven Whitehouse GW7RRM (stevew@acm.org) * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de) * Copyright (C) Hans-Joachim Hetscher DD8NE (dd8ne@bnv-bamberg.de) * Copyright (C) Hans Alblas PE1AYX (hans@esrac.ele.tue.nl) * Copyright (C) Frederic Rible F1OAT (frible@teaser.fr) */ #include <linux/capability.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/slab.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <net/sock.h> #include <asm/uaccess.h> #include <linux/fcntl.h> #include <linux/termios.h> /* For TIOCINQ/OUTQ */ #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/notifier.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/netfilter.h> #include <linux/sysctl.h> #include <linux/init.h> #include <linux/spinlock.h> #include <net/net_namespace.h> #include <net/tcp_states.h> #include <net/ip.h> #include <net/arp.h> HLIST_HEAD(ax25_list); DEFINE_SPINLOCK(ax25_list_lock); static const struct proto_ops ax25_proto_ops; static void ax25_free_sock(struct sock *sk) { ax25_cb_put(ax25_sk(sk)); } /* * Socket removal during an interrupt is now safe. */ static void ax25_cb_del(ax25_cb *ax25) { if (!hlist_unhashed(&ax25->ax25_node)) { spin_lock_bh(&ax25_list_lock); hlist_del_init(&ax25->ax25_node); spin_unlock_bh(&ax25_list_lock); ax25_cb_put(ax25); } } /* * Kill all bound sockets on a dropped device. */ static void ax25_kill_by_device(struct net_device *dev) { ax25_dev *ax25_dev; ax25_cb *s; if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) return; spin_lock_bh(&ax25_list_lock); again: ax25_for_each(s, &ax25_list) { if (s->ax25_dev == ax25_dev) { s->ax25_dev = NULL; spin_unlock_bh(&ax25_list_lock); ax25_disconnect(s, ENETUNREACH); spin_lock_bh(&ax25_list_lock); /* The entry could have been deleted from the * list meanwhile and thus the next pointer is * no longer valid. Play it safe and restart * the scan. Forward progress is ensured * because we set s->ax25_dev to NULL and we * are never passed a NULL 'dev' argument. */ goto again; } } spin_unlock_bh(&ax25_list_lock); } /* * Handle device status changes. */ static int ax25_device_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; /* Reject non AX.25 devices */ if (dev->type != ARPHRD_AX25) return NOTIFY_DONE; switch (event) { case NETDEV_UP: ax25_dev_device_up(dev); break; case NETDEV_DOWN: ax25_kill_by_device(dev); ax25_rt_device_down(dev); ax25_dev_device_down(dev); break; default: break; } return NOTIFY_DONE; } /* * Add a socket to the bound sockets list. */ void ax25_cb_add(ax25_cb *ax25) { spin_lock_bh(&ax25_list_lock); ax25_cb_hold(ax25); hlist_add_head(&ax25->ax25_node, &ax25_list); spin_unlock_bh(&ax25_list_lock); } /* * Find a socket that wants to accept the SABM we have just * received. */ struct sock *ax25_find_listener(ax25_address *addr, int digi, struct net_device *dev, int type) { ax25_cb *s; spin_lock(&ax25_list_lock); ax25_for_each(s, &ax25_list) { if ((s->iamdigi && !digi) || (!s->iamdigi && digi)) continue; if (s->sk && !ax25cmp(&s->source_addr, addr) && s->sk->sk_type == type && s->sk->sk_state == TCP_LISTEN) { /* If device is null we match any device */ if (s->ax25_dev == NULL || s->ax25_dev->dev == dev) { sock_hold(s->sk); spin_unlock(&ax25_list_lock); return s->sk; } } } spin_unlock(&ax25_list_lock); return NULL; } /* * Find an AX.25 socket given both ends. */ struct sock *ax25_get_socket(ax25_address *my_addr, ax25_address *dest_addr, int type) { struct sock *sk = NULL; ax25_cb *s; spin_lock(&ax25_list_lock); ax25_for_each(s, &ax25_list) { if (s->sk && !ax25cmp(&s->source_addr, my_addr) && !ax25cmp(&s->dest_addr, dest_addr) && s->sk->sk_type == type) { sk = s->sk; sock_hold(sk); break; } } spin_unlock(&ax25_list_lock); return sk; } /* * Find an AX.25 control block given both ends. It will only pick up * floating AX.25 control blocks or non Raw socket bound control blocks. */ ax25_cb *ax25_find_cb(ax25_address *src_addr, ax25_address *dest_addr, ax25_digi *digi, struct net_device *dev) { ax25_cb *s; spin_lock_bh(&ax25_list_lock); ax25_for_each(s, &ax25_list) { if (s->sk && s->sk->sk_type != SOCK_SEQPACKET) continue; if (s->ax25_dev == NULL) continue; if (ax25cmp(&s->source_addr, src_addr) == 0 && ax25cmp(&s->dest_addr, dest_addr) == 0 && s->ax25_dev->dev == dev) { if (digi != NULL && digi->ndigi != 0) { if (s->digipeat == NULL) continue; if (ax25digicmp(s->digipeat, digi) != 0) continue; } else { if (s->digipeat != NULL && s->digipeat->ndigi != 0) continue; } ax25_cb_hold(s); spin_unlock_bh(&ax25_list_lock); return s; } } spin_unlock_bh(&ax25_list_lock); return NULL; } EXPORT_SYMBOL(ax25_find_cb); void ax25_send_to_raw(ax25_address *addr, struct sk_buff *skb, int proto) { ax25_cb *s; struct sk_buff *copy; spin_lock(&ax25_list_lock); ax25_for_each(s, &ax25_list) { if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 && s->sk->sk_type == SOCK_RAW && s->sk->sk_protocol == proto && s->ax25_dev->dev == skb->dev && atomic_read(&s->sk->sk_rmem_alloc) <= s->sk->sk_rcvbuf) { if ((copy = skb_clone(skb, GFP_ATOMIC)) == NULL) continue; if (sock_queue_rcv_skb(s->sk, copy) != 0) kfree_skb(copy); } } spin_unlock(&ax25_list_lock); } /* * Deferred destroy. */ void ax25_destroy_socket(ax25_cb *); /* * Handler for deferred kills. */ static void ax25_destroy_timer(unsigned long data) { ax25_cb *ax25=(ax25_cb *)data; struct sock *sk; sk=ax25->sk; bh_lock_sock(sk); sock_hold(sk); ax25_destroy_socket(ax25); bh_unlock_sock(sk); sock_put(sk); } /* * This is called from user mode and the timers. Thus it protects itself * against interrupt users but doesn't worry about being called during * work. Once it is removed from the queue no interrupt or bottom half * will touch it and we are (fairly 8-) ) safe. */ void ax25_destroy_socket(ax25_cb *ax25) { struct sk_buff *skb; ax25_cb_del(ax25); ax25_stop_heartbeat(ax25); ax25_stop_t1timer(ax25); ax25_stop_t2timer(ax25); ax25_stop_t3timer(ax25); ax25_stop_idletimer(ax25); ax25_clear_queues(ax25); /* Flush the queues */ if (ax25->sk != NULL) { while ((skb = skb_dequeue(&ax25->sk->sk_receive_queue)) != NULL) { if (skb->sk != ax25->sk) { /* A pending connection */ ax25_cb *sax25 = ax25_sk(skb->sk); /* Queue the unaccepted socket for death */ sock_orphan(skb->sk); /* 9A4GL: hack to release unaccepted sockets */ skb->sk->sk_state = TCP_LISTEN; ax25_start_heartbeat(sax25); sax25->state = AX25_STATE_0; } kfree_skb(skb); } skb_queue_purge(&ax25->sk->sk_write_queue); } if (ax25->sk != NULL) { if (sk_has_allocations(ax25->sk)) { /* Defer: outstanding buffers */ setup_timer(&ax25->dtimer, ax25_destroy_timer, (unsigned long)ax25); ax25->dtimer.expires = jiffies + 2 * HZ; add_timer(&ax25->dtimer); } else { struct sock *sk=ax25->sk; ax25->sk=NULL; sock_put(sk); } } else { ax25_cb_put(ax25); } } /* * dl1bke 960311: set parameters for existing AX.25 connections, * includes a KILL command to abort any connection. * VERY useful for debugging ;-) */ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg) { struct ax25_ctl_struct ax25_ctl; ax25_digi digi; ax25_dev *ax25_dev; ax25_cb *ax25; unsigned int k; int ret = 0; if (copy_from_user(&ax25_ctl, arg, sizeof(ax25_ctl))) return -EFAULT; if ((ax25_dev = ax25_addr_ax25dev(&ax25_ctl.port_addr)) == NULL) return -ENODEV; if (ax25_ctl.digi_count > AX25_MAX_DIGIS) return -EINVAL; if (ax25_ctl.arg > ULONG_MAX / HZ && ax25_ctl.cmd != AX25_KILL) return -EINVAL; digi.ndigi = ax25_ctl.digi_count; for (k = 0; k < digi.ndigi; k++) digi.calls[k] = ax25_ctl.digi_addr[k]; if ((ax25 = ax25_find_cb(&ax25_ctl.source_addr, &ax25_ctl.dest_addr, &digi, ax25_dev->dev)) == NULL) return -ENOTCONN; switch (ax25_ctl.cmd) { case AX25_KILL: ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND); #ifdef CONFIG_AX25_DAMA_SLAVE if (ax25_dev->dama.slave && ax25->ax25_dev->values[AX25_VALUES_PROTOCOL] == AX25_PROTO_DAMA_SLAVE) ax25_dama_off(ax25); #endif ax25_disconnect(ax25, ENETRESET); break; case AX25_WINDOW: if (ax25->modulus == AX25_MODULUS) { if (ax25_ctl.arg < 1 || ax25_ctl.arg > 7) goto einval_put; } else { if (ax25_ctl.arg < 1 || ax25_ctl.arg > 63) goto einval_put; } ax25->window = ax25_ctl.arg; break; case AX25_T1: if (ax25_ctl.arg < 1 || ax25_ctl.arg > ULONG_MAX / HZ) goto einval_put; ax25->rtt = (ax25_ctl.arg * HZ) / 2; ax25->t1 = ax25_ctl.arg * HZ; break; case AX25_T2: if (ax25_ctl.arg < 1 || ax25_ctl.arg > ULONG_MAX / HZ) goto einval_put; ax25->t2 = ax25_ctl.arg * HZ; break; case AX25_N2: if (ax25_ctl.arg < 1 || ax25_ctl.arg > 31) goto einval_put; ax25->n2count = 0; ax25->n2 = ax25_ctl.arg; break; case AX25_T3: if (ax25_ctl.arg > ULONG_MAX / HZ) goto einval_put; ax25->t3 = ax25_ctl.arg * HZ; break; case AX25_IDLE: if (ax25_ctl.arg > ULONG_MAX / (60 * HZ)) goto einval_put; ax25->idle = ax25_ctl.arg * 60 * HZ; break; case AX25_PACLEN: if (ax25_ctl.arg < 16 || ax25_ctl.arg > 65535) goto einval_put; ax25->paclen = ax25_ctl.arg; break; default: goto einval_put; } out_put: ax25_cb_put(ax25); return ret; einval_put: ret = -EINVAL; goto out_put; } static void ax25_fillin_cb_from_dev(ax25_cb *ax25, ax25_dev *ax25_dev) { ax25->rtt = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T1]) / 2; ax25->t1 = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T1]); ax25->t2 = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T2]); ax25->t3 = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T3]); ax25->n2 = ax25_dev->values[AX25_VALUES_N2]; ax25->paclen = ax25_dev->values[AX25_VALUES_PACLEN]; ax25->idle = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_IDLE]); ax25->backoff = ax25_dev->values[AX25_VALUES_BACKOFF]; if (ax25_dev->values[AX25_VALUES_AXDEFMODE]) { ax25->modulus = AX25_EMODULUS; ax25->window = ax25_dev->values[AX25_VALUES_EWINDOW]; } else { ax25->modulus = AX25_MODULUS; ax25->window = ax25_dev->values[AX25_VALUES_WINDOW]; } } /* * Fill in a created AX.25 created control block with the default * values for a particular device. */ void ax25_fillin_cb(ax25_cb *ax25, ax25_dev *ax25_dev) { ax25->ax25_dev = ax25_dev; if (ax25->ax25_dev != NULL) { ax25_fillin_cb_from_dev(ax25, ax25_dev); return; } /* * No device, use kernel / AX.25 spec default values */ ax25->rtt = msecs_to_jiffies(AX25_DEF_T1) / 2; ax25->t1 = msecs_to_jiffies(AX25_DEF_T1); ax25->t2 = msecs_to_jiffies(AX25_DEF_T2); ax25->t3 = msecs_to_jiffies(AX25_DEF_T3); ax25->n2 = AX25_DEF_N2; ax25->paclen = AX25_DEF_PACLEN; ax25->idle = msecs_to_jiffies(AX25_DEF_IDLE); ax25->backoff = AX25_DEF_BACKOFF; if (AX25_DEF_AXDEFMODE) { ax25->modulus = AX25_EMODULUS; ax25->window = AX25_DEF_EWINDOW; } else { ax25->modulus = AX25_MODULUS; ax25->window = AX25_DEF_WINDOW; } } /* * Create an empty AX.25 control block. */ ax25_cb *ax25_create_cb(void) { ax25_cb *ax25; if ((ax25 = kzalloc(sizeof(*ax25), GFP_ATOMIC)) == NULL) return NULL; atomic_set(&ax25->refcount, 1); skb_queue_head_init(&ax25->write_queue); skb_queue_head_init(&ax25->frag_queue); skb_queue_head_init(&ax25->ack_queue); skb_queue_head_init(&ax25->reseq_queue); ax25_setup_timers(ax25); ax25_fillin_cb(ax25, NULL); ax25->state = AX25_STATE_0; return ax25; } /* * Handling for system calls applied via the various interfaces to an * AX25 socket object */ static int ax25_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; ax25_cb *ax25; struct net_device *dev; char devname[IFNAMSIZ]; unsigned long opt; int res = 0; if (level != SOL_AX25) return -ENOPROTOOPT; if (optlen < sizeof(unsigned int)) return -EINVAL; if (get_user(opt, (unsigned int __user *)optval)) return -EFAULT; lock_sock(sk); ax25 = ax25_sk(sk); switch (optname) { case AX25_WINDOW: if (ax25->modulus == AX25_MODULUS) { if (opt < 1 || opt > 7) { res = -EINVAL; break; } } else { if (opt < 1 || opt > 63) { res = -EINVAL; break; } } ax25->window = opt; break; case AX25_T1: if (opt < 1 || opt > ULONG_MAX / HZ) { res = -EINVAL; break; } ax25->rtt = (opt * HZ) >> 1; ax25->t1 = opt * HZ; break; case AX25_T2: if (opt < 1 || opt > ULONG_MAX / HZ) { res = -EINVAL; break; } ax25->t2 = opt * HZ; break; case AX25_N2: if (opt < 1 || opt > 31) { res = -EINVAL; break; } ax25->n2 = opt; break; case AX25_T3: if (opt < 1 || opt > ULONG_MAX / HZ) { res = -EINVAL; break; } ax25->t3 = opt * HZ; break; case AX25_IDLE: if (opt > ULONG_MAX / (60 * HZ)) { res = -EINVAL; break; } ax25->idle = opt * 60 * HZ; break; case AX25_BACKOFF: if (opt > 2) { res = -EINVAL; break; } ax25->backoff = opt; break; case AX25_EXTSEQ: ax25->modulus = opt ? AX25_EMODULUS : AX25_MODULUS; break; case AX25_PIDINCL: ax25->pidincl = opt ? 1 : 0; break; case AX25_IAMDIGI: ax25->iamdigi = opt ? 1 : 0; break; case AX25_PACLEN: if (opt < 16 || opt > 65535) { res = -EINVAL; break; } ax25->paclen = opt; break; case SO_BINDTODEVICE: if (optlen > IFNAMSIZ) optlen = IFNAMSIZ; if (copy_from_user(devname, optval, optlen)) { res = -EFAULT; break; } if (sk->sk_type == SOCK_SEQPACKET && (sock->state != SS_UNCONNECTED || sk->sk_state == TCP_LISTEN)) { res = -EADDRNOTAVAIL; break; } dev = dev_get_by_name(&init_net, devname); if (!dev) { res = -ENODEV; break; } ax25->ax25_dev = ax25_dev_ax25dev(dev); ax25_fillin_cb(ax25, ax25->ax25_dev); dev_put(dev); break; default: res = -ENOPROTOOPT; } release_sock(sk); return res; } static int ax25_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; ax25_cb *ax25; struct ax25_dev *ax25_dev; char devname[IFNAMSIZ]; void *valptr; int val = 0; int maxlen, length; if (level != SOL_AX25) return -ENOPROTOOPT; if (get_user(maxlen, optlen)) return -EFAULT; if (maxlen < 1) return -EFAULT; valptr = (void *) &val; length = min_t(unsigned int, maxlen, sizeof(int)); lock_sock(sk); ax25 = ax25_sk(sk); switch (optname) { case AX25_WINDOW: val = ax25->window; break; case AX25_T1: val = ax25->t1 / HZ; break; case AX25_T2: val = ax25->t2 / HZ; break; case AX25_N2: val = ax25->n2; break; case AX25_T3: val = ax25->t3 / HZ; break; case AX25_IDLE: val = ax25->idle / (60 * HZ); break; case AX25_BACKOFF: val = ax25->backoff; break; case AX25_EXTSEQ: val = (ax25->modulus == AX25_EMODULUS); break; case AX25_PIDINCL: val = ax25->pidincl; break; case AX25_IAMDIGI: val = ax25->iamdigi; break; case AX25_PACLEN: val = ax25->paclen; break; case SO_BINDTODEVICE: ax25_dev = ax25->ax25_dev; if (ax25_dev != NULL && ax25_dev->dev != NULL) { strlcpy(devname, ax25_dev->dev->name, sizeof(devname)); length = strlen(devname) + 1; } else { *devname = '\0'; length = 1; } valptr = (void *) devname; break; default: release_sock(sk); return -ENOPROTOOPT; } release_sock(sk); if (put_user(length, optlen)) return -EFAULT; return copy_to_user(optval, valptr, length) ? -EFAULT : 0; } static int ax25_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; int res = 0; lock_sock(sk); if (sk->sk_type == SOCK_SEQPACKET && sk->sk_state != TCP_LISTEN) { sk->sk_max_ack_backlog = backlog; sk->sk_state = TCP_LISTEN; goto out; } res = -EOPNOTSUPP; out: release_sock(sk); return res; } /* * XXX: when creating ax25_sock we should update the .obj_size setting * below. */ static struct proto ax25_proto = { .name = "AX25", .owner = THIS_MODULE, .obj_size = sizeof(struct sock), }; static int ax25_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; ax25_cb *ax25; if (!net_eq(net, &init_net)) return -EAFNOSUPPORT; switch (sock->type) { case SOCK_DGRAM: if (protocol == 0 || protocol == PF_AX25) protocol = AX25_P_TEXT; break; case SOCK_SEQPACKET: switch (protocol) { case 0: case PF_AX25: /* For CLX */ protocol = AX25_P_TEXT; break; case AX25_P_SEGMENT: #ifdef CONFIG_INET case AX25_P_ARP: case AX25_P_IP: #endif #ifdef CONFIG_NETROM case AX25_P_NETROM: #endif #ifdef CONFIG_ROSE case AX25_P_ROSE: #endif return -ESOCKTNOSUPPORT; #ifdef CONFIG_NETROM_MODULE case AX25_P_NETROM: if (ax25_protocol_is_registered(AX25_P_NETROM)) return -ESOCKTNOSUPPORT; break; #endif #ifdef CONFIG_ROSE_MODULE case AX25_P_ROSE: if (ax25_protocol_is_registered(AX25_P_ROSE)) return -ESOCKTNOSUPPORT; #endif default: break; } break; case SOCK_RAW: break; default: return -ESOCKTNOSUPPORT; } sk = sk_alloc(net, PF_AX25, GFP_ATOMIC, &ax25_proto); if (sk == NULL) return -ENOMEM; ax25 = sk->sk_protinfo = ax25_create_cb(); if (!ax25) { sk_free(sk); return -ENOMEM; } sock_init_data(sock, sk); sk->sk_destruct = ax25_free_sock; sock->ops = &ax25_proto_ops; sk->sk_protocol = protocol; ax25->sk = sk; return 0; } struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev) { struct sock *sk; ax25_cb *ax25, *oax25; sk = sk_alloc(sock_net(osk), PF_AX25, GFP_ATOMIC, osk->sk_prot); if (sk == NULL) return NULL; if ((ax25 = ax25_create_cb()) == NULL) { sk_free(sk); return NULL; } switch (osk->sk_type) { case SOCK_DGRAM: break; case SOCK_SEQPACKET: break; default: sk_free(sk); ax25_cb_put(ax25); return NULL; } sock_init_data(NULL, sk); sk->sk_type = osk->sk_type; sk->sk_priority = osk->sk_priority; sk->sk_protocol = osk->sk_protocol; sk->sk_rcvbuf = osk->sk_rcvbuf; sk->sk_sndbuf = osk->sk_sndbuf; sk->sk_state = TCP_ESTABLISHED; sock_copy_flags(sk, osk); oax25 = ax25_sk(osk); ax25->modulus = oax25->modulus; ax25->backoff = oax25->backoff; ax25->pidincl = oax25->pidincl; ax25->iamdigi = oax25->iamdigi; ax25->rtt = oax25->rtt; ax25->t1 = oax25->t1; ax25->t2 = oax25->t2; ax25->t3 = oax25->t3; ax25->n2 = oax25->n2; ax25->idle = oax25->idle; ax25->paclen = oax25->paclen; ax25->window = oax25->window; ax25->ax25_dev = ax25_dev; ax25->source_addr = oax25->source_addr; if (oax25->digipeat != NULL) { ax25->digipeat = kmemdup(oax25->digipeat, sizeof(ax25_digi), GFP_ATOMIC); if (ax25->digipeat == NULL) { sk_free(sk); ax25_cb_put(ax25); return NULL; } } sk->sk_protinfo = ax25; sk->sk_destruct = ax25_free_sock; ax25->sk = sk; return sk; } static int ax25_release(struct socket *sock) { struct sock *sk = sock->sk; ax25_cb *ax25; if (sk == NULL) return 0; sock_hold(sk); sock_orphan(sk); lock_sock(sk); ax25 = ax25_sk(sk); if (sk->sk_type == SOCK_SEQPACKET) { switch (ax25->state) { case AX25_STATE_0: release_sock(sk); ax25_disconnect(ax25, 0); lock_sock(sk); ax25_destroy_socket(ax25); break; case AX25_STATE_1: case AX25_STATE_2: ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND); release_sock(sk); ax25_disconnect(ax25, 0); lock_sock(sk); ax25_destroy_socket(ax25); break; case AX25_STATE_3: case AX25_STATE_4: ax25_clear_queues(ax25); ax25->n2count = 0; switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) { case AX25_PROTO_STD_SIMPLEX: case AX25_PROTO_STD_DUPLEX: ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND); ax25_stop_t2timer(ax25); ax25_stop_t3timer(ax25); ax25_stop_idletimer(ax25); break; #ifdef CONFIG_AX25_DAMA_SLAVE case AX25_PROTO_DAMA_SLAVE: ax25_stop_t3timer(ax25); ax25_stop_idletimer(ax25); break; #endif } ax25_calculate_t1(ax25); ax25_start_t1timer(ax25); ax25->state = AX25_STATE_2; sk->sk_state = TCP_CLOSE; sk->sk_shutdown |= SEND_SHUTDOWN; sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DESTROY); break; default: break; } } else { sk->sk_state = TCP_CLOSE; sk->sk_shutdown |= SEND_SHUTDOWN; sk->sk_state_change(sk); ax25_destroy_socket(ax25); } sock->sk = NULL; release_sock(sk); sock_put(sk); return 0; } /* * We support a funny extension here so you can (as root) give any callsign * digipeated via a local address as source. This hack is obsolete now * that we've implemented support for SO_BINDTODEVICE. It is however small * and trivially backward compatible. */ static int ax25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; struct full_sockaddr_ax25 *addr = (struct full_sockaddr_ax25 *)uaddr; ax25_dev *ax25_dev = NULL; ax25_uid_assoc *user; ax25_address call; ax25_cb *ax25; int err = 0; if (addr_len != sizeof(struct sockaddr_ax25) && addr_len != sizeof(struct full_sockaddr_ax25)) /* support for old structure may go away some time * ax25_bind(): uses old (6 digipeater) socket structure. */ if ((addr_len < sizeof(struct sockaddr_ax25) + sizeof(ax25_address) * 6) || (addr_len > sizeof(struct full_sockaddr_ax25))) return -EINVAL; if (addr->fsa_ax25.sax25_family != AF_AX25) return -EINVAL; user = ax25_findbyuid(current_euid()); if (user) { call = user->call; ax25_uid_put(user); } else { if (ax25_uid_policy && !capable(CAP_NET_ADMIN)) return -EACCES; call = addr->fsa_ax25.sax25_call; } lock_sock(sk); ax25 = ax25_sk(sk); if (!sock_flag(sk, SOCK_ZAPPED)) { err = -EINVAL; goto out; } ax25->source_addr = call; /* * User already set interface with SO_BINDTODEVICE */ if (ax25->ax25_dev != NULL) goto done; if (addr_len > sizeof(struct sockaddr_ax25) && addr->fsa_ax25.sax25_ndigis == 1) { if (ax25cmp(&addr->fsa_digipeater[0], &null_ax25_address) != 0 && (ax25_dev = ax25_addr_ax25dev(&addr->fsa_digipeater[0])) == NULL) { err = -EADDRNOTAVAIL; goto out; } } else { if ((ax25_dev = ax25_addr_ax25dev(&addr->fsa_ax25.sax25_call)) == NULL) { err = -EADDRNOTAVAIL; goto out; } } if (ax25_dev != NULL) ax25_fillin_cb(ax25, ax25_dev); done: ax25_cb_add(ax25); sock_reset_flag(sk, SOCK_ZAPPED); out: release_sock(sk); return err; } /* * FIXME: nonblock behaviour looks like it may have a bug. */ static int __must_check ax25_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; ax25_cb *ax25 = ax25_sk(sk), *ax25t; struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)uaddr; ax25_digi *digi = NULL; int ct = 0, err = 0; /* * some sanity checks. code further down depends on this */ if (addr_len == sizeof(struct sockaddr_ax25)) /* support for this will go away in early 2.5.x * ax25_connect(): uses obsolete socket structure */ ; else if (addr_len != sizeof(struct full_sockaddr_ax25)) /* support for old structure may go away some time * ax25_connect(): uses old (6 digipeater) socket structure. */ if ((addr_len < sizeof(struct sockaddr_ax25) + sizeof(ax25_address) * 6) || (addr_len > sizeof(struct full_sockaddr_ax25))) return -EINVAL; if (fsa->fsa_ax25.sax25_family != AF_AX25) return -EINVAL; lock_sock(sk); /* deal with restarts */ if (sock->state == SS_CONNECTING) { switch (sk->sk_state) { case TCP_SYN_SENT: /* still trying */ err = -EINPROGRESS; goto out_release; case TCP_ESTABLISHED: /* connection established */ sock->state = SS_CONNECTED; goto out_release; case TCP_CLOSE: /* connection refused */ sock->state = SS_UNCONNECTED; err = -ECONNREFUSED; goto out_release; } } if (sk->sk_state == TCP_ESTABLISHED && sk->sk_type == SOCK_SEQPACKET) { err = -EISCONN; /* No reconnect on a seqpacket socket */ goto out_release; } sk->sk_state = TCP_CLOSE; sock->state = SS_UNCONNECTED; kfree(ax25->digipeat); ax25->digipeat = NULL; /* * Handle digi-peaters to be used. */ if (addr_len > sizeof(struct sockaddr_ax25) && fsa->fsa_ax25.sax25_ndigis != 0) { /* Valid number of digipeaters ? */ if (fsa->fsa_ax25.sax25_ndigis < 1 || fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS) { err = -EINVAL; goto out_release; } if ((digi = kmalloc(sizeof(ax25_digi), GFP_KERNEL)) == NULL) { err = -ENOBUFS; goto out_release; } digi->ndigi = fsa->fsa_ax25.sax25_ndigis; digi->lastrepeat = -1; while (ct < fsa->fsa_ax25.sax25_ndigis) { if ((fsa->fsa_digipeater[ct].ax25_call[6] & AX25_HBIT) && ax25->iamdigi) { digi->repeated[ct] = 1; digi->lastrepeat = ct; } else { digi->repeated[ct] = 0; } digi->calls[ct] = fsa->fsa_digipeater[ct]; ct++; } } /* * Must bind first - autobinding in this may or may not work. If * the socket is already bound, check to see if the device has * been filled in, error if it hasn't. */ if (sock_flag(sk, SOCK_ZAPPED)) { /* check if we can remove this feature. It is broken. */ printk(KERN_WARNING "ax25_connect(): %s uses autobind, please contact jreuter@yaina.de\n", current->comm); if ((err = ax25_rt_autobind(ax25, &fsa->fsa_ax25.sax25_call)) < 0) { kfree(digi); goto out_release; } ax25_fillin_cb(ax25, ax25->ax25_dev); ax25_cb_add(ax25); } else { if (ax25->ax25_dev == NULL) { kfree(digi); err = -EHOSTUNREACH; goto out_release; } } if (sk->sk_type == SOCK_SEQPACKET && (ax25t=ax25_find_cb(&ax25->source_addr, &fsa->fsa_ax25.sax25_call, digi, ax25->ax25_dev->dev))) { kfree(digi); err = -EADDRINUSE; /* Already such a connection */ ax25_cb_put(ax25t); goto out_release; } ax25->dest_addr = fsa->fsa_ax25.sax25_call; ax25->digipeat = digi; /* First the easy one */ if (sk->sk_type != SOCK_SEQPACKET) { sock->state = SS_CONNECTED; sk->sk_state = TCP_ESTABLISHED; goto out_release; } /* Move to connecting socket, ax.25 lapb WAIT_UA.. */ sock->state = SS_CONNECTING; sk->sk_state = TCP_SYN_SENT; switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) { case AX25_PROTO_STD_SIMPLEX: case AX25_PROTO_STD_DUPLEX: ax25_std_establish_data_link(ax25); break; #ifdef CONFIG_AX25_DAMA_SLAVE case AX25_PROTO_DAMA_SLAVE: ax25->modulus = AX25_MODULUS; ax25->window = ax25->ax25_dev->values[AX25_VALUES_WINDOW]; if (ax25->ax25_dev->dama.slave) ax25_ds_establish_data_link(ax25); else ax25_std_establish_data_link(ax25); break; #endif } ax25->state = AX25_STATE_1; ax25_start_heartbeat(ax25); /* Now the loop */ if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) { err = -EINPROGRESS; goto out_release; } if (sk->sk_state == TCP_SYN_SENT) { DEFINE_WAIT(wait); for (;;) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (sk->sk_state != TCP_SYN_SENT) break; if (!signal_pending(current)) { release_sock(sk); schedule(); lock_sock(sk); continue; } err = -ERESTARTSYS; break; } finish_wait(sk_sleep(sk), &wait); if (err) goto out_release; } if (sk->sk_state != TCP_ESTABLISHED) { /* Not in ABM, not in WAIT_UA -> failed */ sock->state = SS_UNCONNECTED; err = sock_error(sk); /* Always set at this point */ goto out_release; } sock->state = SS_CONNECTED; err = 0; out_release: release_sock(sk); return err; } static int ax25_accept(struct socket *sock, struct socket *newsock, int flags) { struct sk_buff *skb; struct sock *newsk; DEFINE_WAIT(wait); struct sock *sk; int err = 0; if (sock->state != SS_UNCONNECTED) return -EINVAL; if ((sk = sock->sk) == NULL) return -EINVAL; lock_sock(sk); if (sk->sk_type != SOCK_SEQPACKET) { err = -EOPNOTSUPP; goto out; } if (sk->sk_state != TCP_LISTEN) { err = -EINVAL; goto out; } /* * The read queue this time is holding sockets ready to use * hooked into the SABM we saved */ for (;;) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); skb = skb_dequeue(&sk->sk_receive_queue); if (skb) break; if (flags & O_NONBLOCK) { err = -EWOULDBLOCK; break; } if (!signal_pending(current)) { release_sock(sk); schedule(); lock_sock(sk); continue; } err = -ERESTARTSYS; break; } finish_wait(sk_sleep(sk), &wait); if (err) goto out; newsk = skb->sk; sock_graft(newsk, newsock); /* Now attach up the new socket */ kfree_skb(skb); sk->sk_ack_backlog--; newsock->state = SS_CONNECTED; out: release_sock(sk); return err; } static int ax25_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)uaddr; struct sock *sk = sock->sk; unsigned char ndigi, i; ax25_cb *ax25; int err = 0; memset(fsa, 0, sizeof(*fsa)); lock_sock(sk); ax25 = ax25_sk(sk); if (peer != 0) { if (sk->sk_state != TCP_ESTABLISHED) { err = -ENOTCONN; goto out; } fsa->fsa_ax25.sax25_family = AF_AX25; fsa->fsa_ax25.sax25_call = ax25->dest_addr; if (ax25->digipeat != NULL) { ndigi = ax25->digipeat->ndigi; fsa->fsa_ax25.sax25_ndigis = ndigi; for (i = 0; i < ndigi; i++) fsa->fsa_digipeater[i] = ax25->digipeat->calls[i]; } } else { fsa->fsa_ax25.sax25_family = AF_AX25; fsa->fsa_ax25.sax25_call = ax25->source_addr; fsa->fsa_ax25.sax25_ndigis = 1; if (ax25->ax25_dev != NULL) { memcpy(&fsa->fsa_digipeater[0], ax25->ax25_dev->dev->dev_addr, AX25_ADDR_LEN); } else { fsa->fsa_digipeater[0] = null_ax25_address; } } *uaddr_len = sizeof (struct full_sockaddr_ax25); out: release_sock(sk); return err; } static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sockaddr_ax25 *usax = (struct sockaddr_ax25 *)msg->msg_name; struct sock *sk = sock->sk; struct sockaddr_ax25 sax; struct sk_buff *skb; ax25_digi dtmp, *dp; ax25_cb *ax25; size_t size; int lv, err, addr_len = msg->msg_namelen; if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT)) return -EINVAL; lock_sock(sk); ax25 = ax25_sk(sk); if (sock_flag(sk, SOCK_ZAPPED)) { err = -EADDRNOTAVAIL; goto out; } if (sk->sk_shutdown & SEND_SHUTDOWN) { send_sig(SIGPIPE, current, 0); err = -EPIPE; goto out; } if (ax25->ax25_dev == NULL) { err = -ENETUNREACH; goto out; } if (len > ax25->ax25_dev->dev->mtu) { err = -EMSGSIZE; goto out; } if (usax != NULL) { if (usax->sax25_family != AF_AX25) { err = -EINVAL; goto out; } if (addr_len == sizeof(struct sockaddr_ax25)) /* ax25_sendmsg(): uses obsolete socket structure */ ; else if (addr_len != sizeof(struct full_sockaddr_ax25)) /* support for old structure may go away some time * ax25_sendmsg(): uses old (6 digipeater) * socket structure. */ if ((addr_len < sizeof(struct sockaddr_ax25) + sizeof(ax25_address) * 6) || (addr_len > sizeof(struct full_sockaddr_ax25))) { err = -EINVAL; goto out; } if (addr_len > sizeof(struct sockaddr_ax25) && usax->sax25_ndigis != 0) { int ct = 0; struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)usax; /* Valid number of digipeaters ? */ if (usax->sax25_ndigis < 1 || usax->sax25_ndigis > AX25_MAX_DIGIS) { err = -EINVAL; goto out; } dtmp.ndigi = usax->sax25_ndigis; while (ct < usax->sax25_ndigis) { dtmp.repeated[ct] = 0; dtmp.calls[ct] = fsa->fsa_digipeater[ct]; ct++; } dtmp.lastrepeat = 0; } sax = *usax; if (sk->sk_type == SOCK_SEQPACKET && ax25cmp(&ax25->dest_addr, &sax.sax25_call)) { err = -EISCONN; goto out; } if (usax->sax25_ndigis == 0) dp = NULL; else dp = &dtmp; } else { /* * FIXME: 1003.1g - if the socket is like this because * it has become closed (not started closed) and is VC * we ought to SIGPIPE, EPIPE */ if (sk->sk_state != TCP_ESTABLISHED) { err = -ENOTCONN; goto out; } sax.sax25_family = AF_AX25; sax.sax25_call = ax25->dest_addr; dp = ax25->digipeat; } /* Build a packet */ /* Assume the worst case */ size = len + ax25->ax25_dev->dev->hard_header_len; skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT, &err); if (skb == NULL) goto out; skb_reserve(skb, size - len); /* User data follows immediately after the AX.25 data */ if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { err = -EFAULT; kfree_skb(skb); goto out; } skb_reset_network_header(skb); /* Add the PID if one is not supplied by the user in the skb */ if (!ax25->pidincl) *skb_push(skb, 1) = sk->sk_protocol; if (sk->sk_type == SOCK_SEQPACKET) { /* Connected mode sockets go via the LAPB machine */ if (sk->sk_state != TCP_ESTABLISHED) { kfree_skb(skb); err = -ENOTCONN; goto out; } /* Shove it onto the queue and kick */ ax25_output(ax25, ax25->paclen, skb); err = len; goto out; } skb_push(skb, 1 + ax25_addr_size(dp)); /* Building AX.25 Header */ /* Build an AX.25 header */ lv = ax25_addr_build(skb->data, &ax25->source_addr, &sax.sax25_call, dp, AX25_COMMAND, AX25_MODULUS); skb_set_transport_header(skb, lv); *skb_transport_header(skb) = AX25_UI; /* Datagram frames go straight out of the door as UI */ ax25_queue_xmit(skb, ax25->ax25_dev->dev); err = len; out: release_sock(sk); return err; } static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; int copied; int err = 0; lock_sock(sk); /* * This works for seqpacket too. The receiver has ordered the * queue for us! We do one quick check first though */ if (sk->sk_type == SOCK_SEQPACKET && sk->sk_state != TCP_ESTABLISHED) { err = -ENOTCONN; goto out; } /* Now we can treat all alike */ skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (skb == NULL) goto out; if (!ax25_sk(sk)->pidincl) skb_pull(skb, 1); /* Remove PID */ skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (msg->msg_namelen != 0) { struct sockaddr_ax25 *sax = (struct sockaddr_ax25 *)msg->msg_name; ax25_digi digi; ax25_address src; const unsigned char *mac = skb_mac_header(skb); memset(sax, 0, sizeof(struct full_sockaddr_ax25)); ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL, &digi, NULL, NULL); sax->sax25_family = AF_AX25; /* We set this correctly, even though we may not let the application know the digi calls further down (because it did NOT ask to know them). This could get political... **/ sax->sax25_ndigis = digi.ndigi; sax->sax25_call = src; if (sax->sax25_ndigis != 0) { int ct; struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)sax; for (ct = 0; ct < digi.ndigi; ct++) fsa->fsa_digipeater[ct] = digi.calls[ct]; } msg->msg_namelen = sizeof(struct full_sockaddr_ax25); } skb_free_datagram(sk, skb); err = copied; out: release_sock(sk); return err; } static int ax25_shutdown(struct socket *sk, int how) { /* FIXME - generate DM and RNR states */ return -EOPNOTSUPP; } static int ax25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; void __user *argp = (void __user *)arg; int res = 0; lock_sock(sk); switch (cmd) { case TIOCOUTQ: { long amount; amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amount < 0) amount = 0; res = put_user(amount, (int __user *)argp); break; } case TIOCINQ: { struct sk_buff *skb; long amount = 0L; /* These two are safe on a single CPU system as only user tasks fiddle here */ if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) amount = skb->len; res = put_user(amount, (int __user *) argp); break; } case SIOCGSTAMP: res = sock_get_timestamp(sk, argp); break; case SIOCGSTAMPNS: res = sock_get_timestampns(sk, argp); break; case SIOCAX25ADDUID: /* Add a uid to the uid/call map table */ case SIOCAX25DELUID: /* Delete a uid from the uid/call map table */ case SIOCAX25GETUID: { struct sockaddr_ax25 sax25; if (copy_from_user(&sax25, argp, sizeof(sax25))) { res = -EFAULT; break; } res = ax25_uid_ioctl(cmd, &sax25); break; } case SIOCAX25NOUID: { /* Set the default policy (default/bar) */ long amount; if (!capable(CAP_NET_ADMIN)) { res = -EPERM; break; } if (get_user(amount, (long __user *)argp)) { res = -EFAULT; break; } if (amount < 0 || amount > AX25_NOUID_BLOCK) { res = -EINVAL; break; } ax25_uid_policy = amount; res = 0; break; } case SIOCADDRT: case SIOCDELRT: case SIOCAX25OPTRT: if (!capable(CAP_NET_ADMIN)) { res = -EPERM; break; } res = ax25_rt_ioctl(cmd, argp); break; case SIOCAX25CTLCON: if (!capable(CAP_NET_ADMIN)) { res = -EPERM; break; } res = ax25_ctl_ioctl(cmd, argp); break; case SIOCAX25GETINFO: case SIOCAX25GETINFOOLD: { ax25_cb *ax25 = ax25_sk(sk); struct ax25_info_struct ax25_info; ax25_info.t1 = ax25->t1 / HZ; ax25_info.t2 = ax25->t2 / HZ; ax25_info.t3 = ax25->t3 / HZ; ax25_info.idle = ax25->idle / (60 * HZ); ax25_info.n2 = ax25->n2; ax25_info.t1timer = ax25_display_timer(&ax25->t1timer) / HZ; ax25_info.t2timer = ax25_display_timer(&ax25->t2timer) / HZ; ax25_info.t3timer = ax25_display_timer(&ax25->t3timer) / HZ; ax25_info.idletimer = ax25_display_timer(&ax25->idletimer) / (60 * HZ); ax25_info.n2count = ax25->n2count; ax25_info.state = ax25->state; ax25_info.rcv_q = sk_rmem_alloc_get(sk); ax25_info.snd_q = sk_wmem_alloc_get(sk); ax25_info.vs = ax25->vs; ax25_info.vr = ax25->vr; ax25_info.va = ax25->va; ax25_info.vs_max = ax25->vs; /* reserved */ ax25_info.paclen = ax25->paclen; ax25_info.window = ax25->window; /* old structure? */ if (cmd == SIOCAX25GETINFOOLD) { static int warned = 0; if (!warned) { printk(KERN_INFO "%s uses old SIOCAX25GETINFO\n", current->comm); warned=1; } if (copy_to_user(argp, &ax25_info, sizeof(struct ax25_info_struct_deprecated))) { res = -EFAULT; break; } } else { if (copy_to_user(argp, &ax25_info, sizeof(struct ax25_info_struct))) { res = -EINVAL; break; } } res = 0; break; } case SIOCAX25ADDFWD: case SIOCAX25DELFWD: { struct ax25_fwd_struct ax25_fwd; if (!capable(CAP_NET_ADMIN)) { res = -EPERM; break; } if (copy_from_user(&ax25_fwd, argp, sizeof(ax25_fwd))) { res = -EFAULT; break; } res = ax25_fwd_ioctl(cmd, &ax25_fwd); break; } case SIOCGIFADDR: case SIOCSIFADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCGIFMETRIC: case SIOCSIFMETRIC: res = -EINVAL; break; default: res = -ENOIOCTLCMD; break; } release_sock(sk); return res; } #ifdef CONFIG_PROC_FS static void *ax25_info_start(struct seq_file *seq, loff_t *pos) __acquires(ax25_list_lock) { spin_lock_bh(&ax25_list_lock); return seq_hlist_start(&ax25_list, *pos); } static void *ax25_info_next(struct seq_file *seq, void *v, loff_t *pos) { return seq_hlist_next(v, &ax25_list, pos); } static void ax25_info_stop(struct seq_file *seq, void *v) __releases(ax25_list_lock) { spin_unlock_bh(&ax25_list_lock); } static int ax25_info_show(struct seq_file *seq, void *v) { ax25_cb *ax25 = hlist_entry(v, struct ax25_cb, ax25_node); char buf[11]; int k; /* * New format: * magic dev src_addr dest_addr,digi1,digi2,.. st vs vr va t1 t1 t2 t2 t3 t3 idle idle n2 n2 rtt window paclen Snd-Q Rcv-Q inode */ seq_printf(seq, "%8.8lx %s %s%s ", (long) ax25, ax25->ax25_dev == NULL? "???" : ax25->ax25_dev->dev->name, ax2asc(buf, &ax25->source_addr), ax25->iamdigi? "*":""); seq_printf(seq, "%s", ax2asc(buf, &ax25->dest_addr)); for (k=0; (ax25->digipeat != NULL) && (k < ax25->digipeat->ndigi); k++) { seq_printf(seq, ",%s%s", ax2asc(buf, &ax25->digipeat->calls[k]), ax25->digipeat->repeated[k]? "*":""); } seq_printf(seq, " %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %lu %d %d %lu %d %d", ax25->state, ax25->vs, ax25->vr, ax25->va, ax25_display_timer(&ax25->t1timer) / HZ, ax25->t1 / HZ, ax25_display_timer(&ax25->t2timer) / HZ, ax25->t2 / HZ, ax25_display_timer(&ax25->t3timer) / HZ, ax25->t3 / HZ, ax25_display_timer(&ax25->idletimer) / (60 * HZ), ax25->idle / (60 * HZ), ax25->n2count, ax25->n2, ax25->rtt / HZ, ax25->window, ax25->paclen); if (ax25->sk != NULL) { seq_printf(seq, " %d %d %lu\n", sk_wmem_alloc_get(ax25->sk), sk_rmem_alloc_get(ax25->sk), sock_i_ino(ax25->sk)); } else { seq_puts(seq, " * * *\n"); } return 0; } static const struct seq_operations ax25_info_seqops = { .start = ax25_info_start, .next = ax25_info_next, .stop = ax25_info_stop, .show = ax25_info_show, }; static int ax25_info_open(struct inode *inode, struct file *file) { return seq_open(file, &ax25_info_seqops); } static const struct file_operations ax25_info_fops = { .owner = THIS_MODULE, .open = ax25_info_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #endif static const struct net_proto_family ax25_family_ops = { .family = PF_AX25, .create = ax25_create, .owner = THIS_MODULE, }; static const struct proto_ops ax25_proto_ops = { .family = PF_AX25, .owner = THIS_MODULE, .release = ax25_release, .bind = ax25_bind, .connect = ax25_connect, .socketpair = sock_no_socketpair, .accept = ax25_accept, .getname = ax25_getname, .poll = datagram_poll, .ioctl = ax25_ioctl, .listen = ax25_listen, .shutdown = ax25_shutdown, .setsockopt = ax25_setsockopt, .getsockopt = ax25_getsockopt, .sendmsg = ax25_sendmsg, .recvmsg = ax25_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; /* * Called by socket.c on kernel start up */ static struct packet_type ax25_packet_type __read_mostly = { .type = cpu_to_be16(ETH_P_AX25), .func = ax25_kiss_rcv, }; static struct notifier_block ax25_dev_notifier = { .notifier_call = ax25_device_event, }; static int __init ax25_init(void) { int rc = proto_register(&ax25_proto, 0); if (rc != 0) goto out; sock_register(&ax25_family_ops); dev_add_pack(&ax25_packet_type); register_netdevice_notifier(&ax25_dev_notifier); proc_create("ax25_route", S_IRUGO, init_net.proc_net, &ax25_route_fops); proc_create("ax25", S_IRUGO, init_net.proc_net, &ax25_info_fops); proc_create("ax25_calls", S_IRUGO, init_net.proc_net, &ax25_uid_fops); out: return rc; } module_init(ax25_init); MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>"); MODULE_DESCRIPTION("The amateur radio AX.25 link layer protocol"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_AX25); static void __exit ax25_exit(void) { remove_proc_entry("ax25_route", init_net.proc_net); remove_proc_entry("ax25", init_net.proc_net); remove_proc_entry("ax25_calls", init_net.proc_net); unregister_netdevice_notifier(&ax25_dev_notifier); dev_remove_pack(&ax25_packet_type); sock_unregister(PF_AX25); proto_unregister(&ax25_proto); ax25_rt_free(); ax25_uid_free(); ax25_dev_free(); } module_exit(ax25_exit);
./CrossVul/dataset_final_sorted/CWE-20/c/bad_5845_7
crossvul-cpp_data_bad_1420_1
/* $OpenBSD: tcp_input.c,v 1.359 2018/09/17 14:07:48 friehm Exp $ */ /* $NetBSD: tcp_input.c,v 1.23 1996/02/13 23:43:44 christos Exp $ */ /* * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)COPYRIGHT 1.1 (NRL) 17 January 1995 * * NRL grants permission for redistribution and use in source and binary * forms, with or without modification, of the software and documentation * created at NRL provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgements: * This product includes software developed by the University of * California, Berkeley and its contributors. * This product includes software developed at the Information * Technology Division, US Naval Research Laboratory. * 4. Neither the name of the NRL nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THE SOFTWARE PROVIDED BY NRL IS PROVIDED BY NRL AND CONTRIBUTORS ``AS * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NRL OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation * are those of the authors and should not be interpreted as representing * official policies, either expressed or implied, of the US Naval * Research Laboratory (NRL). */ #include "pf.h" #include <sys/param.h> #include <sys/systm.h> #include <sys/mbuf.h> #include <sys/protosw.h> #include <sys/socket.h> #include <sys/socketvar.h> #include <sys/timeout.h> #include <sys/kernel.h> #include <sys/pool.h> #include <net/if.h> #include <net/if_var.h> #include <net/route.h> #include <netinet/in.h> #include <netinet/ip.h> #include <netinet/in_pcb.h> #include <netinet/ip_var.h> #include <netinet/tcp.h> #include <netinet/tcp_fsm.h> #include <netinet/tcp_seq.h> #include <netinet/tcp_timer.h> #include <netinet/tcp_var.h> #include <netinet/tcp_debug.h> #if NPF > 0 #include <net/pfvar.h> #endif struct tcpiphdr tcp_saveti; int tcp_mss_adv(struct mbuf *, int); int tcp_flush_queue(struct tcpcb *); #ifdef INET6 #include <netinet6/in6_var.h> #include <netinet6/nd6.h> struct tcpipv6hdr tcp_saveti6; /* for the packet header length in the mbuf */ #define M_PH_LEN(m) (((struct mbuf *)(m))->m_pkthdr.len) #define M_V6_LEN(m) (M_PH_LEN(m) - sizeof(struct ip6_hdr)) #define M_V4_LEN(m) (M_PH_LEN(m) - sizeof(struct ip)) #endif /* INET6 */ int tcprexmtthresh = 3; int tcptv_keep_init = TCPTV_KEEP_INIT; int tcp_rst_ppslim = 100; /* 100pps */ int tcp_rst_ppslim_count = 0; struct timeval tcp_rst_ppslim_last; int tcp_ackdrop_ppslim = 100; /* 100pps */ int tcp_ackdrop_ppslim_count = 0; struct timeval tcp_ackdrop_ppslim_last; #define TCP_PAWS_IDLE (24 * 24 * 60 * 60 * PR_SLOWHZ) /* for modulo comparisons of timestamps */ #define TSTMP_LT(a,b) ((int)((a)-(b)) < 0) #define TSTMP_GEQ(a,b) ((int)((a)-(b)) >= 0) /* for TCP SACK comparisons */ #define SEQ_MIN(a,b) (SEQ_LT(a,b) ? (a) : (b)) #define SEQ_MAX(a,b) (SEQ_GT(a,b) ? (a) : (b)) /* * Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */ #ifdef INET6 #define ND6_HINT(tp) \ do { \ if (tp && tp->t_inpcb && (tp->t_inpcb->inp_flags & INP_IPV6) && \ rtisvalid(tp->t_inpcb->inp_route6.ro_rt)) { \ nd6_nud_hint(tp->t_inpcb->inp_route6.ro_rt); \ } \ } while (0) #else #define ND6_HINT(tp) #endif #ifdef TCP_ECN /* * ECN (Explicit Congestion Notification) support based on RFC3168 * implementation note: * snd_last is used to track a recovery phase. * when cwnd is reduced, snd_last is set to snd_max. * while snd_last > snd_una, the sender is in a recovery phase and * its cwnd should not be reduced again. * snd_last follows snd_una when not in a recovery phase. */ #endif /* * Macro to compute ACK transmission behavior. Delay the ACK unless * we have already delayed an ACK (must send an ACK every two segments). * We also ACK immediately if we received a PUSH and the ACK-on-PUSH * option is enabled or when the packet is coming from a loopback * interface. */ #define TCP_SETUP_ACK(tp, tiflags, m) \ do { \ struct ifnet *ifp = NULL; \ if (m && (m->m_flags & M_PKTHDR)) \ ifp = if_get(m->m_pkthdr.ph_ifidx); \ if (TCP_TIMER_ISARMED(tp, TCPT_DELACK) || \ (tcp_ack_on_push && (tiflags) & TH_PUSH) || \ (ifp && (ifp->if_flags & IFF_LOOPBACK))) \ tp->t_flags |= TF_ACKNOW; \ else \ TCP_TIMER_ARM_MSEC(tp, TCPT_DELACK, tcp_delack_msecs); \ if_put(ifp); \ } while (0) void tcp_sack_partialack(struct tcpcb *, struct tcphdr *); void tcp_newreno_partialack(struct tcpcb *, struct tcphdr *); void syn_cache_put(struct syn_cache *); void syn_cache_rm(struct syn_cache *); int syn_cache_respond(struct syn_cache *, struct mbuf *); void syn_cache_timer(void *); void syn_cache_reaper(void *); void syn_cache_insert(struct syn_cache *, struct tcpcb *); void syn_cache_reset(struct sockaddr *, struct sockaddr *, struct tcphdr *, u_int); int syn_cache_add(struct sockaddr *, struct sockaddr *, struct tcphdr *, unsigned int, struct socket *, struct mbuf *, u_char *, int, struct tcp_opt_info *, tcp_seq *); struct socket *syn_cache_get(struct sockaddr *, struct sockaddr *, struct tcphdr *, unsigned int, unsigned int, struct socket *, struct mbuf *); struct syn_cache *syn_cache_lookup(struct sockaddr *, struct sockaddr *, struct syn_cache_head **, u_int); /* * Insert segment ti into reassembly queue of tcp with * control block tp. Return TH_FIN if reassembly now includes * a segment with FIN. The macro form does the common case inline * (segment is the next to be received on an established connection, * and the queue is empty), avoiding linkage into and removal * from the queue and repetition of various conversions. * Set DELACK for segments received in order, but ack immediately * when segments are out of order (so fast retransmit can work). */ int tcp_reass(struct tcpcb *tp, struct tcphdr *th, struct mbuf *m, int *tlen) { struct tcpqent *p, *q, *nq, *tiqe; /* * Allocate a new queue entry, before we throw away any data. * If we can't, just drop the packet. XXX */ tiqe = pool_get(&tcpqe_pool, PR_NOWAIT); if (tiqe == NULL) { tiqe = TAILQ_LAST(&tp->t_segq, tcpqehead); if (tiqe != NULL && th->th_seq == tp->rcv_nxt) { /* Reuse last entry since new segment fills a hole */ m_freem(tiqe->tcpqe_m); TAILQ_REMOVE(&tp->t_segq, tiqe, tcpqe_q); } if (tiqe == NULL || th->th_seq != tp->rcv_nxt) { /* Flush segment queue for this connection */ tcp_freeq(tp); tcpstat_inc(tcps_rcvmemdrop); m_freem(m); return (0); } } /* * Find a segment which begins after this one does. */ for (p = NULL, q = TAILQ_FIRST(&tp->t_segq); q != NULL; p = q, q = TAILQ_NEXT(q, tcpqe_q)) if (SEQ_GT(q->tcpqe_tcp->th_seq, th->th_seq)) break; /* * If there is a preceding segment, it may provide some of * our data already. If so, drop the data from the incoming * segment. If it provides all of our data, drop us. */ if (p != NULL) { struct tcphdr *phdr = p->tcpqe_tcp; int i; /* conversion to int (in i) handles seq wraparound */ i = phdr->th_seq + phdr->th_reseqlen - th->th_seq; if (i > 0) { if (i >= *tlen) { tcpstat_pkt(tcps_rcvduppack, tcps_rcvdupbyte, *tlen); m_freem(m); pool_put(&tcpqe_pool, tiqe); return (0); } m_adj(m, i); *tlen -= i; th->th_seq += i; } } tcpstat_pkt(tcps_rcvoopack, tcps_rcvoobyte, *tlen); /* * While we overlap succeeding segments trim them or, * if they are completely covered, dequeue them. */ for (; q != NULL; q = nq) { struct tcphdr *qhdr = q->tcpqe_tcp; int i = (th->th_seq + *tlen) - qhdr->th_seq; if (i <= 0) break; if (i < qhdr->th_reseqlen) { qhdr->th_seq += i; qhdr->th_reseqlen -= i; m_adj(q->tcpqe_m, i); break; } nq = TAILQ_NEXT(q, tcpqe_q); m_freem(q->tcpqe_m); TAILQ_REMOVE(&tp->t_segq, q, tcpqe_q); pool_put(&tcpqe_pool, q); } /* Insert the new segment queue entry into place. */ tiqe->tcpqe_m = m; th->th_reseqlen = *tlen; tiqe->tcpqe_tcp = th; if (p == NULL) { TAILQ_INSERT_HEAD(&tp->t_segq, tiqe, tcpqe_q); } else { TAILQ_INSERT_AFTER(&tp->t_segq, p, tiqe, tcpqe_q); } if (th->th_seq != tp->rcv_nxt) return (0); return (tcp_flush_queue(tp)); } int tcp_flush_queue(struct tcpcb *tp) { struct socket *so = tp->t_inpcb->inp_socket; struct tcpqent *q, *nq; int flags; /* * Present data to user, advancing rcv_nxt through * completed sequence space. */ if (TCPS_HAVEESTABLISHED(tp->t_state) == 0) return (0); q = TAILQ_FIRST(&tp->t_segq); if (q == NULL || q->tcpqe_tcp->th_seq != tp->rcv_nxt) return (0); if (tp->t_state == TCPS_SYN_RECEIVED && q->tcpqe_tcp->th_reseqlen) return (0); do { tp->rcv_nxt += q->tcpqe_tcp->th_reseqlen; flags = q->tcpqe_tcp->th_flags & TH_FIN; nq = TAILQ_NEXT(q, tcpqe_q); TAILQ_REMOVE(&tp->t_segq, q, tcpqe_q); ND6_HINT(tp); if (so->so_state & SS_CANTRCVMORE) m_freem(q->tcpqe_m); else sbappendstream(so, &so->so_rcv, q->tcpqe_m); pool_put(&tcpqe_pool, q); q = nq; } while (q != NULL && q->tcpqe_tcp->th_seq == tp->rcv_nxt); tp->t_flags |= TF_BLOCKOUTPUT; sorwakeup(so); tp->t_flags &= ~TF_BLOCKOUTPUT; return (flags); } /* * TCP input routine, follows pages 65-76 of the * protocol specification dated September, 1981 very closely. */ int tcp_input(struct mbuf **mp, int *offp, int proto, int af) { struct mbuf *m = *mp; int iphlen = *offp; struct ip *ip = NULL; struct inpcb *inp = NULL; u_int8_t *optp = NULL; int optlen = 0; int tlen, off; struct tcpcb *otp = NULL, *tp = NULL; int tiflags; struct socket *so = NULL; int todrop, acked, ourfinisacked; int hdroptlen = 0; short ostate; caddr_t saveti; tcp_seq iss, *reuse = NULL; u_long tiwin; struct tcp_opt_info opti; struct tcphdr *th; #ifdef INET6 struct ip6_hdr *ip6 = NULL; #endif /* INET6 */ #ifdef IPSEC struct m_tag *mtag; struct tdb_ident *tdbi; struct tdb *tdb; int error; #endif /* IPSEC */ #ifdef TCP_ECN u_char iptos; #endif tcpstat_inc(tcps_rcvtotal); opti.ts_present = 0; opti.maxseg = 0; /* * RFC1122 4.2.3.10, p. 104: discard bcast/mcast SYN */ if (m->m_flags & (M_BCAST|M_MCAST)) goto drop; /* * Get IP and TCP header together in first mbuf. * Note: IP leaves IP header in first mbuf. */ IP6_EXTHDR_GET(th, struct tcphdr *, m, iphlen, sizeof(*th)); if (!th) { tcpstat_inc(tcps_rcvshort); return IPPROTO_DONE; } tlen = m->m_pkthdr.len - iphlen; switch (af) { case AF_INET: ip = mtod(m, struct ip *); #ifdef TCP_ECN /* save ip_tos before clearing it for checksum */ iptos = ip->ip_tos; #endif break; #ifdef INET6 case AF_INET6: ip6 = mtod(m, struct ip6_hdr *); #ifdef TCP_ECN iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff; #endif /* * Be proactive about unspecified IPv6 address in source. * As we use all-zero to indicate unbounded/unconnected pcb, * unspecified IPv6 address can be used to confuse us. * * Note that packets with unspecified IPv6 destination is * already dropped in ip6_input. */ if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) { /* XXX stat */ goto drop; } /* Discard packets to multicast */ if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) { /* XXX stat */ goto drop; } break; #endif default: unhandled_af(af); } /* * Checksum extended TCP header and data. */ if ((m->m_pkthdr.csum_flags & M_TCP_CSUM_IN_OK) == 0) { int sum; if (m->m_pkthdr.csum_flags & M_TCP_CSUM_IN_BAD) { tcpstat_inc(tcps_rcvbadsum); goto drop; } tcpstat_inc(tcps_inswcsum); switch (af) { case AF_INET: sum = in4_cksum(m, IPPROTO_TCP, iphlen, tlen); break; #ifdef INET6 case AF_INET6: sum = in6_cksum(m, IPPROTO_TCP, sizeof(struct ip6_hdr), tlen); break; #endif } if (sum != 0) { tcpstat_inc(tcps_rcvbadsum); goto drop; } } /* * Check that TCP offset makes sense, * pull out TCP options and adjust length. XXX */ off = th->th_off << 2; if (off < sizeof(struct tcphdr) || off > tlen) { tcpstat_inc(tcps_rcvbadoff); goto drop; } tlen -= off; if (off > sizeof(struct tcphdr)) { IP6_EXTHDR_GET(th, struct tcphdr *, m, iphlen, off); if (!th) { tcpstat_inc(tcps_rcvshort); return IPPROTO_DONE; } optlen = off - sizeof(struct tcphdr); optp = (u_int8_t *)(th + 1); /* * Do quick retrieval of timestamp options ("options * prediction?"). If timestamp is the only option and it's * formatted as recommended in RFC 1323 appendix A, we * quickly get the values now and not bother calling * tcp_dooptions(), etc. */ if ((optlen == TCPOLEN_TSTAMP_APPA || (optlen > TCPOLEN_TSTAMP_APPA && optp[TCPOLEN_TSTAMP_APPA] == TCPOPT_EOL)) && *(u_int32_t *)optp == htonl(TCPOPT_TSTAMP_HDR) && (th->th_flags & TH_SYN) == 0) { opti.ts_present = 1; opti.ts_val = ntohl(*(u_int32_t *)(optp + 4)); opti.ts_ecr = ntohl(*(u_int32_t *)(optp + 8)); optp = NULL; /* we've parsed the options */ } } tiflags = th->th_flags; /* * Convert TCP protocol specific fields to host format. */ th->th_seq = ntohl(th->th_seq); th->th_ack = ntohl(th->th_ack); th->th_win = ntohs(th->th_win); th->th_urp = ntohs(th->th_urp); /* * Locate pcb for segment. */ #if NPF > 0 inp = pf_inp_lookup(m); #endif findpcb: if (inp == NULL) { switch (af) { #ifdef INET6 case AF_INET6: inp = in6_pcbhashlookup(&tcbtable, &ip6->ip6_src, th->th_sport, &ip6->ip6_dst, th->th_dport, m->m_pkthdr.ph_rtableid); break; #endif case AF_INET: inp = in_pcbhashlookup(&tcbtable, ip->ip_src, th->th_sport, ip->ip_dst, th->th_dport, m->m_pkthdr.ph_rtableid); break; } } if (inp == NULL) { tcpstat_inc(tcps_pcbhashmiss); switch (af) { #ifdef INET6 case AF_INET6: inp = in6_pcblookup_listen(&tcbtable, &ip6->ip6_dst, th->th_dport, m, m->m_pkthdr.ph_rtableid); break; #endif /* INET6 */ case AF_INET: inp = in_pcblookup_listen(&tcbtable, ip->ip_dst, th->th_dport, m, m->m_pkthdr.ph_rtableid); break; } /* * If the state is CLOSED (i.e., TCB does not exist) then * all data in the incoming segment is discarded. * If the TCB exists but is in CLOSED state, it is embryonic, * but should either do a listen or a connect soon. */ if (inp == NULL) { tcpstat_inc(tcps_noport); goto dropwithreset_ratelim; } } KASSERT(sotoinpcb(inp->inp_socket) == inp); KASSERT(intotcpcb(inp) == NULL || intotcpcb(inp)->t_inpcb == inp); soassertlocked(inp->inp_socket); /* Check the minimum TTL for socket. */ switch (af) { case AF_INET: if (inp->inp_ip_minttl && inp->inp_ip_minttl > ip->ip_ttl) goto drop; break; #ifdef INET6 case AF_INET6: if (inp->inp_ip6_minhlim && inp->inp_ip6_minhlim > ip6->ip6_hlim) goto drop; break; #endif } tp = intotcpcb(inp); if (tp == NULL) goto dropwithreset_ratelim; if (tp->t_state == TCPS_CLOSED) goto drop; /* Unscale the window into a 32-bit value. */ if ((tiflags & TH_SYN) == 0) tiwin = th->th_win << tp->snd_scale; else tiwin = th->th_win; so = inp->inp_socket; if (so->so_options & (SO_DEBUG|SO_ACCEPTCONN)) { union syn_cache_sa src; union syn_cache_sa dst; bzero(&src, sizeof(src)); bzero(&dst, sizeof(dst)); switch (af) { case AF_INET: src.sin.sin_len = sizeof(struct sockaddr_in); src.sin.sin_family = AF_INET; src.sin.sin_addr = ip->ip_src; src.sin.sin_port = th->th_sport; dst.sin.sin_len = sizeof(struct sockaddr_in); dst.sin.sin_family = AF_INET; dst.sin.sin_addr = ip->ip_dst; dst.sin.sin_port = th->th_dport; break; #ifdef INET6 case AF_INET6: src.sin6.sin6_len = sizeof(struct sockaddr_in6); src.sin6.sin6_family = AF_INET6; src.sin6.sin6_addr = ip6->ip6_src; src.sin6.sin6_port = th->th_sport; dst.sin6.sin6_len = sizeof(struct sockaddr_in6); dst.sin6.sin6_family = AF_INET6; dst.sin6.sin6_addr = ip6->ip6_dst; dst.sin6.sin6_port = th->th_dport; break; #endif /* INET6 */ } if (so->so_options & SO_DEBUG) { otp = tp; ostate = tp->t_state; switch (af) { #ifdef INET6 case AF_INET6: saveti = (caddr_t) &tcp_saveti6; memcpy(&tcp_saveti6.ti6_i, ip6, sizeof(*ip6)); memcpy(&tcp_saveti6.ti6_t, th, sizeof(*th)); break; #endif case AF_INET: saveti = (caddr_t) &tcp_saveti; memcpy(&tcp_saveti.ti_i, ip, sizeof(*ip)); memcpy(&tcp_saveti.ti_t, th, sizeof(*th)); break; } } if (so->so_options & SO_ACCEPTCONN) { switch (tiflags & (TH_RST|TH_SYN|TH_ACK)) { case TH_SYN|TH_ACK|TH_RST: case TH_SYN|TH_RST: case TH_ACK|TH_RST: case TH_RST: syn_cache_reset(&src.sa, &dst.sa, th, inp->inp_rtableid); goto drop; case TH_SYN|TH_ACK: /* * Received a SYN,ACK. This should * never happen while we are in * LISTEN. Send an RST. */ goto badsyn; case TH_ACK: so = syn_cache_get(&src.sa, &dst.sa, th, iphlen, tlen, so, m); if (so == NULL) { /* * We don't have a SYN for * this ACK; send an RST. */ goto badsyn; } else if (so == (struct socket *)(-1)) { /* * We were unable to create * the connection. If the * 3-way handshake was * completed, and RST has * been sent to the peer. * Since the mbuf might be * in use for the reply, * do not free it. */ m = *mp = NULL; goto drop; } else { /* * We have created a * full-blown connection. */ tp = NULL; inp = sotoinpcb(so); tp = intotcpcb(inp); if (tp == NULL) goto badsyn; /*XXX*/ } break; default: /* * None of RST, SYN or ACK was set. * This is an invalid packet for a * TCB in LISTEN state. Send a RST. */ goto badsyn; case TH_SYN: /* * Received a SYN. */ #ifdef INET6 /* * If deprecated address is forbidden, we do * not accept SYN to deprecated interface * address to prevent any new inbound * connection from getting established. * When we do not accept SYN, we send a TCP * RST, with deprecated source address (instead * of dropping it). We compromise it as it is * much better for peer to send a RST, and * RST will be the final packet for the * exchange. * * If we do not forbid deprecated addresses, we * accept the SYN packet. RFC2462 does not * suggest dropping SYN in this case. * If we decipher RFC2462 5.5.4, it says like * this: * 1. use of deprecated addr with existing * communication is okay - "SHOULD continue * to be used" * 2. use of it with new communication: * (2a) "SHOULD NOT be used if alternate * address with sufficient scope is * available" * (2b) nothing mentioned otherwise. * Here we fall into (2b) case as we have no * choice in our source address selection - we * must obey the peer. * * The wording in RFC2462 is confusing, and * there are multiple description text for * deprecated address handling - worse, they * are not exactly the same. I believe 5.5.4 * is the best one, so we follow 5.5.4. */ if (ip6 && !ip6_use_deprecated) { struct in6_ifaddr *ia6; struct ifnet *ifp = if_get(m->m_pkthdr.ph_ifidx); if (ifp && (ia6 = in6ifa_ifpwithaddr(ifp, &ip6->ip6_dst)) && (ia6->ia6_flags & IN6_IFF_DEPRECATED)) { tp = NULL; if_put(ifp); goto dropwithreset; } if_put(ifp); } #endif /* * LISTEN socket received a SYN * from itself? This can't possibly * be valid; drop the packet. */ if (th->th_dport == th->th_sport) { switch (af) { #ifdef INET6 case AF_INET6: if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &ip6->ip6_dst)) { tcpstat_inc(tcps_badsyn); goto drop; } break; #endif /* INET6 */ case AF_INET: if (ip->ip_dst.s_addr == ip->ip_src.s_addr) { tcpstat_inc(tcps_badsyn); goto drop; } break; } } /* * SYN looks ok; create compressed TCP * state for it. */ if (so->so_qlen > so->so_qlimit || syn_cache_add(&src.sa, &dst.sa, th, iphlen, so, m, optp, optlen, &opti, reuse) == -1) { tcpstat_inc(tcps_dropsyn); goto drop; } return IPPROTO_DONE; } } } #ifdef DIAGNOSTIC /* * Should not happen now that all embryonic connections * are handled with compressed state. */ if (tp->t_state == TCPS_LISTEN) panic("tcp_input: TCPS_LISTEN"); #endif #if NPF > 0 pf_inp_link(m, inp); #endif #ifdef IPSEC /* Find most recent IPsec tag */ mtag = m_tag_find(m, PACKET_TAG_IPSEC_IN_DONE, NULL); if (mtag != NULL) { tdbi = (struct tdb_ident *)(mtag + 1); tdb = gettdb(tdbi->rdomain, tdbi->spi, &tdbi->dst, tdbi->proto); } else tdb = NULL; ipsp_spd_lookup(m, af, iphlen, &error, IPSP_DIRECTION_IN, tdb, inp, 0); if (error) { tcpstat_inc(tcps_rcvnosec); goto drop; } #endif /* IPSEC */ /* * Segment received on connection. * Reset idle time and keep-alive timer. */ tp->t_rcvtime = tcp_now; if (TCPS_HAVEESTABLISHED(tp->t_state)) TCP_TIMER_ARM(tp, TCPT_KEEP, tcp_keepidle); if (tp->sack_enable) tcp_del_sackholes(tp, th); /* Delete stale SACK holes */ /* * Process options. */ #ifdef TCP_SIGNATURE if (optp || (tp->t_flags & TF_SIGNATURE)) #else if (optp) #endif if (tcp_dooptions(tp, optp, optlen, th, m, iphlen, &opti, m->m_pkthdr.ph_rtableid)) goto drop; if (opti.ts_present && opti.ts_ecr) { int rtt_test; /* subtract out the tcp timestamp modulator */ opti.ts_ecr -= tp->ts_modulate; /* make sure ts_ecr is sensible */ rtt_test = tcp_now - opti.ts_ecr; if (rtt_test < 0 || rtt_test > TCP_RTT_MAX) opti.ts_ecr = 0; } #ifdef TCP_ECN /* if congestion experienced, set ECE bit in subsequent packets. */ if ((iptos & IPTOS_ECN_MASK) == IPTOS_ECN_CE) { tp->t_flags |= TF_RCVD_CE; tcpstat_inc(tcps_ecn_rcvce); } #endif /* * Header prediction: check for the two common cases * of a uni-directional data xfer. If the packet has * no control flags, is in-sequence, the window didn't * change and we're not retransmitting, it's a * candidate. If the length is zero and the ack moved * forward, we're the sender side of the xfer. Just * free the data acked & wake any higher level process * that was blocked waiting for space. If the length * is non-zero and the ack didn't move, we're the * receiver side. If we're getting packets in-order * (the reassembly queue is empty), add the data to * the socket buffer and note that we need a delayed ack. */ if (tp->t_state == TCPS_ESTABLISHED && #ifdef TCP_ECN (tiflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ECE|TH_CWR|TH_ACK)) == TH_ACK && #else (tiflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK && #endif (!opti.ts_present || TSTMP_GEQ(opti.ts_val, tp->ts_recent)) && th->th_seq == tp->rcv_nxt && tiwin && tiwin == tp->snd_wnd && tp->snd_nxt == tp->snd_max) { /* * If last ACK falls within this segment's sequence numbers, * record the timestamp. * Fix from Braden, see Stevens p. 870 */ if (opti.ts_present && SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { tp->ts_recent_age = tcp_now; tp->ts_recent = opti.ts_val; } if (tlen == 0) { if (SEQ_GT(th->th_ack, tp->snd_una) && SEQ_LEQ(th->th_ack, tp->snd_max) && tp->snd_cwnd >= tp->snd_wnd && tp->t_dupacks == 0) { /* * this is a pure ack for outstanding data. */ tcpstat_inc(tcps_predack); if (opti.ts_present && opti.ts_ecr) tcp_xmit_timer(tp, tcp_now - opti.ts_ecr); else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) tcp_xmit_timer(tp, tcp_now - tp->t_rtttime); acked = th->th_ack - tp->snd_una; tcpstat_pkt(tcps_rcvackpack, tcps_rcvackbyte, acked); ND6_HINT(tp); sbdrop(so, &so->so_snd, acked); /* * If we had a pending ICMP message that * refers to data that have just been * acknowledged, disregard the recorded ICMP * message. */ if ((tp->t_flags & TF_PMTUD_PEND) && SEQ_GT(th->th_ack, tp->t_pmtud_th_seq)) tp->t_flags &= ~TF_PMTUD_PEND; /* * Keep track of the largest chunk of data * acknowledged since last PMTU update */ if (tp->t_pmtud_mss_acked < acked) tp->t_pmtud_mss_acked = acked; tp->snd_una = th->th_ack; /* * We want snd_last to track snd_una so * as to avoid sequence wraparound problems * for very large transfers. */ #ifdef TCP_ECN if (SEQ_GT(tp->snd_una, tp->snd_last)) #endif tp->snd_last = tp->snd_una; m_freem(m); /* * If all outstanding data are acked, stop * retransmit timer, otherwise restart timer * using current (possibly backed-off) value. * If process is waiting for space, * wakeup/selwakeup/signal. If data * are ready to send, let tcp_output * decide between more output or persist. */ if (tp->snd_una == tp->snd_max) TCP_TIMER_DISARM(tp, TCPT_REXMT); else if (TCP_TIMER_ISARMED(tp, TCPT_PERSIST) == 0) TCP_TIMER_ARM(tp, TCPT_REXMT, tp->t_rxtcur); tcp_update_sndspace(tp); if (sb_notify(so, &so->so_snd)) { tp->t_flags |= TF_BLOCKOUTPUT; sowwakeup(so); tp->t_flags &= ~TF_BLOCKOUTPUT; } if (so->so_snd.sb_cc || tp->t_flags & TF_NEEDOUTPUT) (void) tcp_output(tp); return IPPROTO_DONE; } } else if (th->th_ack == tp->snd_una && TAILQ_EMPTY(&tp->t_segq) && tlen <= sbspace(so, &so->so_rcv)) { /* * This is a pure, in-sequence data packet * with nothing on the reassembly queue and * we have enough buffer space to take it. */ /* Clean receiver SACK report if present */ if (tp->sack_enable && tp->rcv_numsacks) tcp_clean_sackreport(tp); tcpstat_inc(tcps_preddat); tp->rcv_nxt += tlen; tcpstat_pkt(tcps_rcvpack, tcps_rcvbyte, tlen); ND6_HINT(tp); TCP_SETUP_ACK(tp, tiflags, m); /* * Drop TCP, IP headers and TCP options then add data * to socket buffer. */ if (so->so_state & SS_CANTRCVMORE) m_freem(m); else { if (opti.ts_present && opti.ts_ecr) { if (tp->rfbuf_ts < opti.ts_ecr && opti.ts_ecr - tp->rfbuf_ts < hz) { tcp_update_rcvspace(tp); /* Start over with next RTT. */ tp->rfbuf_cnt = 0; tp->rfbuf_ts = 0; } else tp->rfbuf_cnt += tlen; } m_adj(m, iphlen + off); sbappendstream(so, &so->so_rcv, m); } tp->t_flags |= TF_BLOCKOUTPUT; sorwakeup(so); tp->t_flags &= ~TF_BLOCKOUTPUT; if (tp->t_flags & (TF_ACKNOW|TF_NEEDOUTPUT)) (void) tcp_output(tp); return IPPROTO_DONE; } } /* * Compute mbuf offset to TCP data segment. */ hdroptlen = iphlen + off; /* * Calculate amount of space in receive window, * and then do TCP input processing. * Receive window is amount of space in rcv queue, * but not less than advertised window. */ { int win; win = sbspace(so, &so->so_rcv); if (win < 0) win = 0; tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt)); } /* Reset receive buffer auto scaling when not in bulk receive mode. */ tp->rfbuf_cnt = 0; tp->rfbuf_ts = 0; switch (tp->t_state) { /* * If the state is SYN_RECEIVED: * if seg contains SYN/ACK, send an RST. * if seg contains an ACK, but not for our SYN/ACK, send an RST */ case TCPS_SYN_RECEIVED: if (tiflags & TH_ACK) { if (tiflags & TH_SYN) { tcpstat_inc(tcps_badsyn); goto dropwithreset; } if (SEQ_LEQ(th->th_ack, tp->snd_una) || SEQ_GT(th->th_ack, tp->snd_max)) goto dropwithreset; } break; /* * If the state is SYN_SENT: * if seg contains an ACK, but not for our SYN, drop the input. * if seg contains a RST, then drop the connection. * if seg does not contain SYN, then drop it. * Otherwise this is an acceptable SYN segment * initialize tp->rcv_nxt and tp->irs * if seg contains ack then advance tp->snd_una * if SYN has been acked change to ESTABLISHED else SYN_RCVD state * arrange for segment to be acked (eventually) * continue processing rest of data/controls, beginning with URG */ case TCPS_SYN_SENT: if ((tiflags & TH_ACK) && (SEQ_LEQ(th->th_ack, tp->iss) || SEQ_GT(th->th_ack, tp->snd_max))) goto dropwithreset; if (tiflags & TH_RST) { #ifdef TCP_ECN /* if ECN is enabled, fall back to non-ecn at rexmit */ if (tcp_do_ecn && !(tp->t_flags & TF_DISABLE_ECN)) goto drop; #endif if (tiflags & TH_ACK) tp = tcp_drop(tp, ECONNREFUSED); goto drop; } if ((tiflags & TH_SYN) == 0) goto drop; if (tiflags & TH_ACK) { tp->snd_una = th->th_ack; if (SEQ_LT(tp->snd_nxt, tp->snd_una)) tp->snd_nxt = tp->snd_una; } TCP_TIMER_DISARM(tp, TCPT_REXMT); tp->irs = th->th_seq; tcp_mss(tp, opti.maxseg); /* Reset initial window to 1 segment for retransmit */ if (tp->t_rxtshift > 0) tp->snd_cwnd = tp->t_maxseg; tcp_rcvseqinit(tp); tp->t_flags |= TF_ACKNOW; /* * If we've sent a SACK_PERMITTED option, and the peer * also replied with one, then TF_SACK_PERMIT should have * been set in tcp_dooptions(). If it was not, disable SACKs. */ if (tp->sack_enable) tp->sack_enable = tp->t_flags & TF_SACK_PERMIT; #ifdef TCP_ECN /* * if ECE is set but CWR is not set for SYN-ACK, or * both ECE and CWR are set for simultaneous open, * peer is ECN capable. */ if (tcp_do_ecn) { switch (tiflags & (TH_ACK|TH_ECE|TH_CWR)) { case TH_ACK|TH_ECE: case TH_ECE|TH_CWR: tp->t_flags |= TF_ECN_PERMIT; tiflags &= ~(TH_ECE|TH_CWR); tcpstat_inc(tcps_ecn_accepts); } } #endif if (tiflags & TH_ACK && SEQ_GT(tp->snd_una, tp->iss)) { tcpstat_inc(tcps_connects); tp->t_flags |= TF_BLOCKOUTPUT; soisconnected(so); tp->t_flags &= ~TF_BLOCKOUTPUT; tp->t_state = TCPS_ESTABLISHED; TCP_TIMER_ARM(tp, TCPT_KEEP, tcp_keepidle); /* Do window scaling on this connection? */ if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == (TF_RCVD_SCALE|TF_REQ_SCALE)) { tp->snd_scale = tp->requested_s_scale; tp->rcv_scale = tp->request_r_scale; } tcp_flush_queue(tp); /* * if we didn't have to retransmit the SYN, * use its rtt as our initial srtt & rtt var. */ if (tp->t_rtttime) tcp_xmit_timer(tp, tcp_now - tp->t_rtttime); /* * Since new data was acked (the SYN), open the * congestion window by one MSS. We do this * here, because we won't go through the normal * ACK processing below. And since this is the * start of the connection, we know we are in * the exponential phase of slow-start. */ tp->snd_cwnd += tp->t_maxseg; } else tp->t_state = TCPS_SYN_RECEIVED; #if 0 trimthenstep6: #endif /* * Advance th->th_seq to correspond to first data byte. * If data, trim to stay within window, * dropping FIN if necessary. */ th->th_seq++; if (tlen > tp->rcv_wnd) { todrop = tlen - tp->rcv_wnd; m_adj(m, -todrop); tlen = tp->rcv_wnd; tiflags &= ~TH_FIN; tcpstat_pkt(tcps_rcvpackafterwin, tcps_rcvbyteafterwin, todrop); } tp->snd_wl1 = th->th_seq - 1; tp->rcv_up = th->th_seq; goto step6; /* * If a new connection request is received while in TIME_WAIT, * drop the old connection and start over if the if the * timestamp or the sequence numbers are above the previous * ones. */ case TCPS_TIME_WAIT: if (((tiflags & (TH_SYN|TH_ACK)) == TH_SYN) && ((opti.ts_present && TSTMP_LT(tp->ts_recent, opti.ts_val)) || SEQ_GT(th->th_seq, tp->rcv_nxt))) { #if NPF > 0 /* * The socket will be recreated but the new state * has already been linked to the socket. Remove the * link between old socket and new state. */ pf_inp_unlink(inp); #endif /* * Advance the iss by at least 32768, but * clear the msb in order to make sure * that SEG_LT(snd_nxt, iss). */ iss = tp->snd_nxt + ((arc4random() & 0x7fffffff) | 0x8000); reuse = &iss; tp = tcp_close(tp); inp = NULL; goto findpcb; } } /* * States other than LISTEN or SYN_SENT. * First check timestamp, if present. * Then check that at least some bytes of segment are within * receive window. If segment begins before rcv_nxt, * drop leading data (and SYN); if nothing left, just ack. * * RFC 1323 PAWS: If we have a timestamp reply on this segment * and it's less than opti.ts_recent, drop it. */ if (opti.ts_present && (tiflags & TH_RST) == 0 && tp->ts_recent && TSTMP_LT(opti.ts_val, tp->ts_recent)) { /* Check to see if ts_recent is over 24 days old. */ if ((int)(tcp_now - tp->ts_recent_age) > TCP_PAWS_IDLE) { /* * Invalidate ts_recent. If this segment updates * ts_recent, the age will be reset later and ts_recent * will get a valid value. If it does not, setting * ts_recent to zero will at least satisfy the * requirement that zero be placed in the timestamp * echo reply when ts_recent isn't valid. The * age isn't reset until we get a valid ts_recent * because we don't want out-of-order segments to be * dropped when ts_recent is old. */ tp->ts_recent = 0; } else { tcpstat_pkt(tcps_rcvduppack, tcps_rcvdupbyte, tlen); tcpstat_inc(tcps_pawsdrop); if (tlen) goto dropafterack; goto drop; } } todrop = tp->rcv_nxt - th->th_seq; if (todrop > 0) { if (tiflags & TH_SYN) { tiflags &= ~TH_SYN; th->th_seq++; if (th->th_urp > 1) th->th_urp--; else tiflags &= ~TH_URG; todrop--; } if (todrop > tlen || (todrop == tlen && (tiflags & TH_FIN) == 0)) { /* * Any valid FIN must be to the left of the * window. At this point, FIN must be a * duplicate or out-of-sequence, so drop it. */ tiflags &= ~TH_FIN; /* * Send ACK to resynchronize, and drop any data, * but keep on processing for RST or ACK. */ tp->t_flags |= TF_ACKNOW; todrop = tlen; tcpstat_pkt(tcps_rcvduppack, tcps_rcvdupbyte, todrop); } else { tcpstat_pkt(tcps_rcvpartduppack, tcps_rcvpartdupbyte, todrop); } hdroptlen += todrop; /* drop from head afterwards */ th->th_seq += todrop; tlen -= todrop; if (th->th_urp > todrop) th->th_urp -= todrop; else { tiflags &= ~TH_URG; th->th_urp = 0; } } /* * If new data are received on a connection after the * user processes are gone, then RST the other end. */ if ((so->so_state & SS_NOFDREF) && tp->t_state > TCPS_CLOSE_WAIT && tlen) { tp = tcp_close(tp); tcpstat_inc(tcps_rcvafterclose); goto dropwithreset; } /* * If segment ends after window, drop trailing data * (and PUSH and FIN); if nothing left, just ACK. */ todrop = (th->th_seq + tlen) - (tp->rcv_nxt+tp->rcv_wnd); if (todrop > 0) { tcpstat_inc(tcps_rcvpackafterwin); if (todrop >= tlen) { tcpstat_add(tcps_rcvbyteafterwin, tlen); /* * If window is closed can only take segments at * window edge, and have to drop data and PUSH from * incoming segments. Continue processing, but * remember to ack. Otherwise, drop segment * and ack. */ if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) { tp->t_flags |= TF_ACKNOW; tcpstat_inc(tcps_rcvwinprobe); } else goto dropafterack; } else tcpstat_add(tcps_rcvbyteafterwin, todrop); m_adj(m, -todrop); tlen -= todrop; tiflags &= ~(TH_PUSH|TH_FIN); } /* * If last ACK falls within this segment's sequence numbers, * record its timestamp if it's more recent. * NOTE that the test is modified according to the latest * proposal of the tcplw@cray.com list (Braden 1993/04/26). */ if (opti.ts_present && TSTMP_GEQ(opti.ts_val, tp->ts_recent) && SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { tp->ts_recent_age = tcp_now; tp->ts_recent = opti.ts_val; } /* * If the RST bit is set examine the state: * SYN_RECEIVED STATE: * If passive open, return to LISTEN state. * If active open, inform user that connection was refused. * ESTABLISHED, FIN_WAIT_1, FIN_WAIT2, CLOSE_WAIT STATES: * Inform user that connection was reset, and close tcb. * CLOSING, LAST_ACK, TIME_WAIT STATES * Close the tcb. */ if (tiflags & TH_RST) { if (th->th_seq != tp->last_ack_sent && th->th_seq != tp->rcv_nxt && th->th_seq != (tp->rcv_nxt + 1)) goto drop; switch (tp->t_state) { case TCPS_SYN_RECEIVED: #ifdef TCP_ECN /* if ECN is enabled, fall back to non-ecn at rexmit */ if (tcp_do_ecn && !(tp->t_flags & TF_DISABLE_ECN)) goto drop; #endif so->so_error = ECONNREFUSED; goto close; case TCPS_ESTABLISHED: case TCPS_FIN_WAIT_1: case TCPS_FIN_WAIT_2: case TCPS_CLOSE_WAIT: so->so_error = ECONNRESET; close: tp->t_state = TCPS_CLOSED; tcpstat_inc(tcps_drops); tp = tcp_close(tp); goto drop; case TCPS_CLOSING: case TCPS_LAST_ACK: case TCPS_TIME_WAIT: tp = tcp_close(tp); goto drop; } } /* * If a SYN is in the window, then this is an * error and we ACK and drop the packet. */ if (tiflags & TH_SYN) goto dropafterack_ratelim; /* * If the ACK bit is off we drop the segment and return. */ if ((tiflags & TH_ACK) == 0) { if (tp->t_flags & TF_ACKNOW) goto dropafterack; else goto drop; } /* * Ack processing. */ switch (tp->t_state) { /* * In SYN_RECEIVED state, the ack ACKs our SYN, so enter * ESTABLISHED state and continue processing. * The ACK was checked above. */ case TCPS_SYN_RECEIVED: tcpstat_inc(tcps_connects); tp->t_flags |= TF_BLOCKOUTPUT; soisconnected(so); tp->t_flags &= ~TF_BLOCKOUTPUT; tp->t_state = TCPS_ESTABLISHED; TCP_TIMER_ARM(tp, TCPT_KEEP, tcp_keepidle); /* Do window scaling? */ if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == (TF_RCVD_SCALE|TF_REQ_SCALE)) { tp->snd_scale = tp->requested_s_scale; tp->rcv_scale = tp->request_r_scale; tiwin = th->th_win << tp->snd_scale; } tcp_flush_queue(tp); tp->snd_wl1 = th->th_seq - 1; /* fall into ... */ /* * In ESTABLISHED state: drop duplicate ACKs; ACK out of range * ACKs. If the ack is in the range * tp->snd_una < th->th_ack <= tp->snd_max * then advance tp->snd_una to th->th_ack and drop * data from the retransmission queue. If this ACK reflects * more up to date window information we update our window information. */ case TCPS_ESTABLISHED: case TCPS_FIN_WAIT_1: case TCPS_FIN_WAIT_2: case TCPS_CLOSE_WAIT: case TCPS_CLOSING: case TCPS_LAST_ACK: case TCPS_TIME_WAIT: #ifdef TCP_ECN /* * if we receive ECE and are not already in recovery phase, * reduce cwnd by half but don't slow-start. * advance snd_last to snd_max not to reduce cwnd again * until all outstanding packets are acked. */ if (tcp_do_ecn && (tiflags & TH_ECE)) { if ((tp->t_flags & TF_ECN_PERMIT) && SEQ_GEQ(tp->snd_una, tp->snd_last)) { u_int win; win = min(tp->snd_wnd, tp->snd_cwnd) / tp->t_maxseg; if (win > 1) { tp->snd_ssthresh = win / 2 * tp->t_maxseg; tp->snd_cwnd = tp->snd_ssthresh; tp->snd_last = tp->snd_max; tp->t_flags |= TF_SEND_CWR; tcpstat_inc(tcps_cwr_ecn); } } tcpstat_inc(tcps_ecn_rcvece); } /* * if we receive CWR, we know that the peer has reduced * its congestion window. stop sending ecn-echo. */ if ((tiflags & TH_CWR)) { tp->t_flags &= ~TF_RCVD_CE; tcpstat_inc(tcps_ecn_rcvcwr); } #endif /* TCP_ECN */ if (SEQ_LEQ(th->th_ack, tp->snd_una)) { /* * Duplicate/old ACK processing. * Increments t_dupacks: * Pure duplicate (same seq/ack/window, no data) * Doesn't affect t_dupacks: * Data packets. * Normal window updates (window opens) * Resets t_dupacks: * New data ACKed. * Window shrinks * Old ACK */ if (tlen) { /* Drop very old ACKs unless th_seq matches */ if (th->th_seq != tp->rcv_nxt && SEQ_LT(th->th_ack, tp->snd_una - tp->max_sndwnd)) { tcpstat_inc(tcps_rcvacktooold); goto drop; } break; } /* * If we get an old ACK, there is probably packet * reordering going on. Be conservative and reset * t_dupacks so that we are less aggressive in * doing a fast retransmit. */ if (th->th_ack != tp->snd_una) { tp->t_dupacks = 0; break; } if (tiwin == tp->snd_wnd) { tcpstat_inc(tcps_rcvdupack); /* * If we have outstanding data (other than * a window probe), this is a completely * duplicate ack (ie, window info didn't * change), the ack is the biggest we've * seen and we've seen exactly our rexmt * threshold of them, assume a packet * has been dropped and retransmit it. * Kludge snd_nxt & the congestion * window so we send only this one * packet. * * We know we're losing at the current * window size so do congestion avoidance * (set ssthresh to half the current window * and pull our congestion window back to * the new ssthresh). * * Dup acks mean that packets have left the * network (they're now cached at the receiver) * so bump cwnd by the amount in the receiver * to keep a constant cwnd packets in the * network. */ if (TCP_TIMER_ISARMED(tp, TCPT_REXMT) == 0) tp->t_dupacks = 0; else if (++tp->t_dupacks == tcprexmtthresh) { tcp_seq onxt = tp->snd_nxt; u_long win = ulmin(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg; if (SEQ_LT(th->th_ack, tp->snd_last)){ /* * False fast retx after * timeout. Do not cut window. */ tp->t_dupacks = 0; goto drop; } if (win < 2) win = 2; tp->snd_ssthresh = win * tp->t_maxseg; tp->snd_last = tp->snd_max; if (tp->sack_enable) { TCP_TIMER_DISARM(tp, TCPT_REXMT); tp->t_rtttime = 0; #ifdef TCP_ECN tp->t_flags |= TF_SEND_CWR; #endif tcpstat_inc(tcps_cwr_frecovery); tcpstat_inc(tcps_sack_recovery_episode); /* * tcp_output() will send * oldest SACK-eligible rtx. */ (void) tcp_output(tp); tp->snd_cwnd = tp->snd_ssthresh+ tp->t_maxseg * tp->t_dupacks; goto drop; } TCP_TIMER_DISARM(tp, TCPT_REXMT); tp->t_rtttime = 0; tp->snd_nxt = th->th_ack; tp->snd_cwnd = tp->t_maxseg; #ifdef TCP_ECN tp->t_flags |= TF_SEND_CWR; #endif tcpstat_inc(tcps_cwr_frecovery); tcpstat_inc(tcps_sndrexmitfast); (void) tcp_output(tp); tp->snd_cwnd = tp->snd_ssthresh + tp->t_maxseg * tp->t_dupacks; if (SEQ_GT(onxt, tp->snd_nxt)) tp->snd_nxt = onxt; goto drop; } else if (tp->t_dupacks > tcprexmtthresh) { tp->snd_cwnd += tp->t_maxseg; (void) tcp_output(tp); goto drop; } } else if (tiwin < tp->snd_wnd) { /* * The window was retracted! Previous dup * ACKs may have been due to packets arriving * after the shrunken window, not a missing * packet, so play it safe and reset t_dupacks */ tp->t_dupacks = 0; } break; } /* * If the congestion window was inflated to account * for the other side's cached packets, retract it. */ if (tp->t_dupacks >= tcprexmtthresh) { /* Check for a partial ACK */ if (SEQ_LT(th->th_ack, tp->snd_last)) { if (tp->sack_enable) tcp_sack_partialack(tp, th); else tcp_newreno_partialack(tp, th); } else { /* Out of fast recovery */ tp->snd_cwnd = tp->snd_ssthresh; if (tcp_seq_subtract(tp->snd_max, th->th_ack) < tp->snd_ssthresh) tp->snd_cwnd = tcp_seq_subtract(tp->snd_max, th->th_ack); tp->t_dupacks = 0; } } else { /* * Reset the duplicate ACK counter if we * were not in fast recovery. */ tp->t_dupacks = 0; } if (SEQ_GT(th->th_ack, tp->snd_max)) { tcpstat_inc(tcps_rcvacktoomuch); goto dropafterack_ratelim; } acked = th->th_ack - tp->snd_una; tcpstat_pkt(tcps_rcvackpack, tcps_rcvackbyte, acked); /* * If we have a timestamp reply, update smoothed * round trip time. If no timestamp is present but * transmit timer is running and timed sequence * number was acked, update smoothed round trip time. * Since we now have an rtt measurement, cancel the * timer backoff (cf., Phil Karn's retransmit alg.). * Recompute the initial retransmit timer. */ if (opti.ts_present && opti.ts_ecr) tcp_xmit_timer(tp, tcp_now - opti.ts_ecr); else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) tcp_xmit_timer(tp, tcp_now - tp->t_rtttime); /* * If all outstanding data is acked, stop retransmit * timer and remember to restart (more output or persist). * If there is more data to be acked, restart retransmit * timer, using current (possibly backed-off) value. */ if (th->th_ack == tp->snd_max) { TCP_TIMER_DISARM(tp, TCPT_REXMT); tp->t_flags |= TF_NEEDOUTPUT; } else if (TCP_TIMER_ISARMED(tp, TCPT_PERSIST) == 0) TCP_TIMER_ARM(tp, TCPT_REXMT, tp->t_rxtcur); /* * When new data is acked, open the congestion window. * If the window gives us less than ssthresh packets * in flight, open exponentially (maxseg per packet). * Otherwise open linearly: maxseg per window * (maxseg^2 / cwnd per packet). */ { u_int cw = tp->snd_cwnd; u_int incr = tp->t_maxseg; if (cw > tp->snd_ssthresh) incr = incr * incr / cw; if (tp->t_dupacks < tcprexmtthresh) tp->snd_cwnd = ulmin(cw + incr, TCP_MAXWIN << tp->snd_scale); } ND6_HINT(tp); if (acked > so->so_snd.sb_cc) { tp->snd_wnd -= so->so_snd.sb_cc; sbdrop(so, &so->so_snd, (int)so->so_snd.sb_cc); ourfinisacked = 1; } else { sbdrop(so, &so->so_snd, acked); tp->snd_wnd -= acked; ourfinisacked = 0; } tcp_update_sndspace(tp); if (sb_notify(so, &so->so_snd)) { tp->t_flags |= TF_BLOCKOUTPUT; sowwakeup(so); tp->t_flags &= ~TF_BLOCKOUTPUT; } /* * If we had a pending ICMP message that referred to data * that have just been acknowledged, disregard the recorded * ICMP message. */ if ((tp->t_flags & TF_PMTUD_PEND) && SEQ_GT(th->th_ack, tp->t_pmtud_th_seq)) tp->t_flags &= ~TF_PMTUD_PEND; /* * Keep track of the largest chunk of data acknowledged * since last PMTU update */ if (tp->t_pmtud_mss_acked < acked) tp->t_pmtud_mss_acked = acked; tp->snd_una = th->th_ack; #ifdef TCP_ECN /* sync snd_last with snd_una */ if (SEQ_GT(tp->snd_una, tp->snd_last)) tp->snd_last = tp->snd_una; #endif if (SEQ_LT(tp->snd_nxt, tp->snd_una)) tp->snd_nxt = tp->snd_una; switch (tp->t_state) { /* * In FIN_WAIT_1 STATE in addition to the processing * for the ESTABLISHED state if our FIN is now acknowledged * then enter FIN_WAIT_2. */ case TCPS_FIN_WAIT_1: if (ourfinisacked) { /* * If we can't receive any more * data, then closing user can proceed. * Starting the timer is contrary to the * specification, but if we don't get a FIN * we'll hang forever. */ if (so->so_state & SS_CANTRCVMORE) { tp->t_flags |= TF_BLOCKOUTPUT; soisdisconnected(so); tp->t_flags &= ~TF_BLOCKOUTPUT; TCP_TIMER_ARM(tp, TCPT_2MSL, tcp_maxidle); } tp->t_state = TCPS_FIN_WAIT_2; } break; /* * In CLOSING STATE in addition to the processing for * the ESTABLISHED state if the ACK acknowledges our FIN * then enter the TIME-WAIT state, otherwise ignore * the segment. */ case TCPS_CLOSING: if (ourfinisacked) { tp->t_state = TCPS_TIME_WAIT; tcp_canceltimers(tp); TCP_TIMER_ARM(tp, TCPT_2MSL, 2 * TCPTV_MSL); tp->t_flags |= TF_BLOCKOUTPUT; soisdisconnected(so); tp->t_flags &= ~TF_BLOCKOUTPUT; } break; /* * In LAST_ACK, we may still be waiting for data to drain * and/or to be acked, as well as for the ack of our FIN. * If our FIN is now acknowledged, delete the TCB, * enter the closed state and return. */ case TCPS_LAST_ACK: if (ourfinisacked) { tp = tcp_close(tp); goto drop; } break; /* * In TIME_WAIT state the only thing that should arrive * is a retransmission of the remote FIN. Acknowledge * it and restart the finack timer. */ case TCPS_TIME_WAIT: TCP_TIMER_ARM(tp, TCPT_2MSL, 2 * TCPTV_MSL); goto dropafterack; } } step6: /* * Update window information. * Don't look at window if no ACK: TAC's send garbage on first SYN. */ if ((tiflags & TH_ACK) && (SEQ_LT(tp->snd_wl1, th->th_seq) || (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { /* keep track of pure window updates */ if (tlen == 0 && tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) tcpstat_inc(tcps_rcvwinupd); tp->snd_wnd = tiwin; tp->snd_wl1 = th->th_seq; tp->snd_wl2 = th->th_ack; if (tp->snd_wnd > tp->max_sndwnd) tp->max_sndwnd = tp->snd_wnd; tp->t_flags |= TF_NEEDOUTPUT; } /* * Process segments with URG. */ if ((tiflags & TH_URG) && th->th_urp && TCPS_HAVERCVDFIN(tp->t_state) == 0) { /* * This is a kludge, but if we receive and accept * random urgent pointers, we'll crash in * soreceive. It's hard to imagine someone * actually wanting to send this much urgent data. */ if (th->th_urp + so->so_rcv.sb_cc > sb_max) { th->th_urp = 0; /* XXX */ tiflags &= ~TH_URG; /* XXX */ goto dodata; /* XXX */ } /* * If this segment advances the known urgent pointer, * then mark the data stream. This should not happen * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since * a FIN has been received from the remote side. * In these states we ignore the URG. * * According to RFC961 (Assigned Protocols), * the urgent pointer points to the last octet * of urgent data. We continue, however, * to consider it to indicate the first octet * of data past the urgent section as the original * spec states (in one of two places). */ if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) { tp->rcv_up = th->th_seq + th->th_urp; so->so_oobmark = so->so_rcv.sb_cc + (tp->rcv_up - tp->rcv_nxt) - 1; if (so->so_oobmark == 0) so->so_state |= SS_RCVATMARK; sohasoutofband(so); tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA); } /* * Remove out of band data so doesn't get presented to user. * This can happen independent of advancing the URG pointer, * but if two URG's are pending at once, some out-of-band * data may creep in... ick. */ if (th->th_urp <= (u_int16_t) tlen && (so->so_options & SO_OOBINLINE) == 0) tcp_pulloutofband(so, th->th_urp, m, hdroptlen); } else /* * If no out of band data is expected, * pull receive urgent pointer along * with the receive window. */ if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) tp->rcv_up = tp->rcv_nxt; dodata: /* XXX */ /* * Process the segment text, merging it into the TCP sequencing queue, * and arranging for acknowledgment of receipt if necessary. * This process logically involves adjusting tp->rcv_wnd as data * is presented to the user (this happens in tcp_usrreq.c, * case PRU_RCVD). If a FIN has already been received on this * connection then we just ignore the text. */ if ((tlen || (tiflags & TH_FIN)) && TCPS_HAVERCVDFIN(tp->t_state) == 0) { tcp_seq laststart = th->th_seq; tcp_seq lastend = th->th_seq + tlen; if (th->th_seq == tp->rcv_nxt && TAILQ_EMPTY(&tp->t_segq) && tp->t_state == TCPS_ESTABLISHED) { TCP_SETUP_ACK(tp, tiflags, m); tp->rcv_nxt += tlen; tiflags = th->th_flags & TH_FIN; tcpstat_pkt(tcps_rcvpack, tcps_rcvbyte, tlen); ND6_HINT(tp); if (so->so_state & SS_CANTRCVMORE) m_freem(m); else { m_adj(m, hdroptlen); sbappendstream(so, &so->so_rcv, m); } tp->t_flags |= TF_BLOCKOUTPUT; sorwakeup(so); tp->t_flags &= ~TF_BLOCKOUTPUT; } else { m_adj(m, hdroptlen); tiflags = tcp_reass(tp, th, m, &tlen); tp->t_flags |= TF_ACKNOW; } if (tp->sack_enable) tcp_update_sack_list(tp, laststart, lastend); /* * variable len never referenced again in modern BSD, * so why bother computing it ?? */ #if 0 /* * Note the amount of data that peer has sent into * our window, in order to estimate the sender's * buffer size. */ len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt); #endif /* 0 */ } else { m_freem(m); tiflags &= ~TH_FIN; } /* * If FIN is received ACK the FIN and let the user know * that the connection is closing. Ignore a FIN received before * the connection is fully established. */ if ((tiflags & TH_FIN) && TCPS_HAVEESTABLISHED(tp->t_state)) { if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { tp->t_flags |= TF_BLOCKOUTPUT; socantrcvmore(so); tp->t_flags &= ~TF_BLOCKOUTPUT; tp->t_flags |= TF_ACKNOW; tp->rcv_nxt++; } switch (tp->t_state) { /* * In ESTABLISHED STATE enter the CLOSE_WAIT state. */ case TCPS_ESTABLISHED: tp->t_state = TCPS_CLOSE_WAIT; break; /* * If still in FIN_WAIT_1 STATE FIN has not been acked so * enter the CLOSING state. */ case TCPS_FIN_WAIT_1: tp->t_state = TCPS_CLOSING; break; /* * In FIN_WAIT_2 state enter the TIME_WAIT state, * starting the time-wait timer, turning off the other * standard timers. */ case TCPS_FIN_WAIT_2: tp->t_state = TCPS_TIME_WAIT; tcp_canceltimers(tp); TCP_TIMER_ARM(tp, TCPT_2MSL, 2 * TCPTV_MSL); tp->t_flags |= TF_BLOCKOUTPUT; soisdisconnected(so); tp->t_flags &= ~TF_BLOCKOUTPUT; break; /* * In TIME_WAIT state restart the 2 MSL time_wait timer. */ case TCPS_TIME_WAIT: TCP_TIMER_ARM(tp, TCPT_2MSL, 2 * TCPTV_MSL); break; } } if (otp) tcp_trace(TA_INPUT, ostate, tp, otp, saveti, 0, tlen); /* * Return any desired output. */ if (tp->t_flags & (TF_ACKNOW|TF_NEEDOUTPUT)) (void) tcp_output(tp); return IPPROTO_DONE; badsyn: /* * Received a bad SYN. Increment counters and dropwithreset. */ tcpstat_inc(tcps_badsyn); tp = NULL; goto dropwithreset; dropafterack_ratelim: if (ppsratecheck(&tcp_ackdrop_ppslim_last, &tcp_ackdrop_ppslim_count, tcp_ackdrop_ppslim) == 0) { /* XXX stat */ goto drop; } /* ...fall into dropafterack... */ dropafterack: /* * Generate an ACK dropping incoming segment if it occupies * sequence space, where the ACK reflects our state. */ if (tiflags & TH_RST) goto drop; m_freem(m); tp->t_flags |= TF_ACKNOW; (void) tcp_output(tp); return IPPROTO_DONE; dropwithreset_ratelim: /* * We may want to rate-limit RSTs in certain situations, * particularly if we are sending an RST in response to * an attempt to connect to or otherwise communicate with * a port for which we have no socket. */ if (ppsratecheck(&tcp_rst_ppslim_last, &tcp_rst_ppslim_count, tcp_rst_ppslim) == 0) { /* XXX stat */ goto drop; } /* ...fall into dropwithreset... */ dropwithreset: /* * Generate a RST, dropping incoming segment. * Make ACK acceptable to originator of segment. * Don't bother to respond to RST. */ if (tiflags & TH_RST) goto drop; if (tiflags & TH_ACK) { tcp_respond(tp, mtod(m, caddr_t), th, (tcp_seq)0, th->th_ack, TH_RST, m->m_pkthdr.ph_rtableid); } else { if (tiflags & TH_SYN) tlen++; tcp_respond(tp, mtod(m, caddr_t), th, th->th_seq + tlen, (tcp_seq)0, TH_RST|TH_ACK, m->m_pkthdr.ph_rtableid); } m_freem(m); return IPPROTO_DONE; drop: /* * Drop space held by incoming segment and return. */ if (otp) tcp_trace(TA_DROP, ostate, tp, otp, saveti, 0, tlen); m_freem(m); return IPPROTO_DONE; } int tcp_dooptions(struct tcpcb *tp, u_char *cp, int cnt, struct tcphdr *th, struct mbuf *m, int iphlen, struct tcp_opt_info *oi, u_int rtableid) { u_int16_t mss = 0; int opt, optlen; #ifdef TCP_SIGNATURE caddr_t sigp = NULL; struct tdb *tdb = NULL; #endif /* TCP_SIGNATURE */ for (; cp && cnt > 0; cnt -= optlen, cp += optlen) { opt = cp[0]; if (opt == TCPOPT_EOL) break; if (opt == TCPOPT_NOP) optlen = 1; else { if (cnt < 2) break; optlen = cp[1]; if (optlen < 2 || optlen > cnt) break; } switch (opt) { default: continue; case TCPOPT_MAXSEG: if (optlen != TCPOLEN_MAXSEG) continue; if (!(th->th_flags & TH_SYN)) continue; if (TCPS_HAVERCVDSYN(tp->t_state)) continue; memcpy(&mss, cp + 2, sizeof(mss)); mss = ntohs(mss); oi->maxseg = mss; break; case TCPOPT_WINDOW: if (optlen != TCPOLEN_WINDOW) continue; if (!(th->th_flags & TH_SYN)) continue; if (TCPS_HAVERCVDSYN(tp->t_state)) continue; tp->t_flags |= TF_RCVD_SCALE; tp->requested_s_scale = min(cp[2], TCP_MAX_WINSHIFT); break; case TCPOPT_TIMESTAMP: if (optlen != TCPOLEN_TIMESTAMP) continue; oi->ts_present = 1; memcpy(&oi->ts_val, cp + 2, sizeof(oi->ts_val)); oi->ts_val = ntohl(oi->ts_val); memcpy(&oi->ts_ecr, cp + 6, sizeof(oi->ts_ecr)); oi->ts_ecr = ntohl(oi->ts_ecr); if (!(th->th_flags & TH_SYN)) continue; if (TCPS_HAVERCVDSYN(tp->t_state)) continue; /* * A timestamp received in a SYN makes * it ok to send timestamp requests and replies. */ tp->t_flags |= TF_RCVD_TSTMP; tp->ts_recent = oi->ts_val; tp->ts_recent_age = tcp_now; break; case TCPOPT_SACK_PERMITTED: if (!tp->sack_enable || optlen!=TCPOLEN_SACK_PERMITTED) continue; if (!(th->th_flags & TH_SYN)) continue; if (TCPS_HAVERCVDSYN(tp->t_state)) continue; /* MUST only be set on SYN */ tp->t_flags |= TF_SACK_PERMIT; break; case TCPOPT_SACK: tcp_sack_option(tp, th, cp, optlen); break; #ifdef TCP_SIGNATURE case TCPOPT_SIGNATURE: if (optlen != TCPOLEN_SIGNATURE) continue; if (sigp && timingsafe_bcmp(sigp, cp + 2, 16)) return (-1); sigp = cp + 2; break; #endif /* TCP_SIGNATURE */ } } #ifdef TCP_SIGNATURE if (tp->t_flags & TF_SIGNATURE) { union sockaddr_union src, dst; memset(&src, 0, sizeof(union sockaddr_union)); memset(&dst, 0, sizeof(union sockaddr_union)); switch (tp->pf) { case 0: case AF_INET: src.sa.sa_len = sizeof(struct sockaddr_in); src.sa.sa_family = AF_INET; src.sin.sin_addr = mtod(m, struct ip *)->ip_src; dst.sa.sa_len = sizeof(struct sockaddr_in); dst.sa.sa_family = AF_INET; dst.sin.sin_addr = mtod(m, struct ip *)->ip_dst; break; #ifdef INET6 case AF_INET6: src.sa.sa_len = sizeof(struct sockaddr_in6); src.sa.sa_family = AF_INET6; src.sin6.sin6_addr = mtod(m, struct ip6_hdr *)->ip6_src; dst.sa.sa_len = sizeof(struct sockaddr_in6); dst.sa.sa_family = AF_INET6; dst.sin6.sin6_addr = mtod(m, struct ip6_hdr *)->ip6_dst; break; #endif /* INET6 */ } tdb = gettdbbysrcdst(rtable_l2(rtableid), 0, &src, &dst, IPPROTO_TCP); /* * We don't have an SA for this peer, so we turn off * TF_SIGNATURE on the listen socket */ if (tdb == NULL && tp->t_state == TCPS_LISTEN) tp->t_flags &= ~TF_SIGNATURE; } if ((sigp ? TF_SIGNATURE : 0) ^ (tp->t_flags & TF_SIGNATURE)) { tcpstat_inc(tcps_rcvbadsig); return (-1); } if (sigp) { char sig[16]; if (tdb == NULL) { tcpstat_inc(tcps_rcvbadsig); return (-1); } if (tcp_signature(tdb, tp->pf, m, th, iphlen, 1, sig) < 0) return (-1); if (timingsafe_bcmp(sig, sigp, 16)) { tcpstat_inc(tcps_rcvbadsig); return (-1); } tcpstat_inc(tcps_rcvgoodsig); } #endif /* TCP_SIGNATURE */ return (0); } u_long tcp_seq_subtract(u_long a, u_long b) { return ((long)(a - b)); } /* * This function is called upon receipt of new valid data (while not in header * prediction mode), and it updates the ordered list of sacks. */ void tcp_update_sack_list(struct tcpcb *tp, tcp_seq rcv_laststart, tcp_seq rcv_lastend) { /* * First reported block MUST be the most recent one. Subsequent * blocks SHOULD be in the order in which they arrived at the * receiver. These two conditions make the implementation fully * compliant with RFC 2018. */ int i, j = 0, count = 0, lastpos = -1; struct sackblk sack, firstsack, temp[MAX_SACK_BLKS]; /* First clean up current list of sacks */ for (i = 0; i < tp->rcv_numsacks; i++) { sack = tp->sackblks[i]; if (sack.start == 0 && sack.end == 0) { count++; /* count = number of blocks to be discarded */ continue; } if (SEQ_LEQ(sack.end, tp->rcv_nxt)) { tp->sackblks[i].start = tp->sackblks[i].end = 0; count++; } else { temp[j].start = tp->sackblks[i].start; temp[j++].end = tp->sackblks[i].end; } } tp->rcv_numsacks -= count; if (tp->rcv_numsacks == 0) { /* no sack blocks currently (fast path) */ tcp_clean_sackreport(tp); if (SEQ_LT(tp->rcv_nxt, rcv_laststart)) { /* ==> need first sack block */ tp->sackblks[0].start = rcv_laststart; tp->sackblks[0].end = rcv_lastend; tp->rcv_numsacks = 1; } return; } /* Otherwise, sack blocks are already present. */ for (i = 0; i < tp->rcv_numsacks; i++) tp->sackblks[i] = temp[i]; /* first copy back sack list */ if (SEQ_GEQ(tp->rcv_nxt, rcv_lastend)) return; /* sack list remains unchanged */ /* * From here, segment just received should be (part of) the 1st sack. * Go through list, possibly coalescing sack block entries. */ firstsack.start = rcv_laststart; firstsack.end = rcv_lastend; for (i = 0; i < tp->rcv_numsacks; i++) { sack = tp->sackblks[i]; if (SEQ_LT(sack.end, firstsack.start) || SEQ_GT(sack.start, firstsack.end)) continue; /* no overlap */ if (sack.start == firstsack.start && sack.end == firstsack.end){ /* * identical block; delete it here since we will * move it to the front of the list. */ tp->sackblks[i].start = tp->sackblks[i].end = 0; lastpos = i; /* last posn with a zero entry */ continue; } if (SEQ_LEQ(sack.start, firstsack.start)) firstsack.start = sack.start; /* merge blocks */ if (SEQ_GEQ(sack.end, firstsack.end)) firstsack.end = sack.end; /* merge blocks */ tp->sackblks[i].start = tp->sackblks[i].end = 0; lastpos = i; /* last posn with a zero entry */ } if (lastpos != -1) { /* at least one merge */ for (i = 0, j = 1; i < tp->rcv_numsacks; i++) { sack = tp->sackblks[i]; if (sack.start == 0 && sack.end == 0) continue; temp[j++] = sack; } tp->rcv_numsacks = j; /* including first blk (added later) */ for (i = 1; i < tp->rcv_numsacks; i++) /* now copy back */ tp->sackblks[i] = temp[i]; } else { /* no merges -- shift sacks by 1 */ if (tp->rcv_numsacks < MAX_SACK_BLKS) tp->rcv_numsacks++; for (i = tp->rcv_numsacks-1; i > 0; i--) tp->sackblks[i] = tp->sackblks[i-1]; } tp->sackblks[0] = firstsack; return; } /* * Process the TCP SACK option. tp->snd_holes is an ordered list * of holes (oldest to newest, in terms of the sequence space). */ void tcp_sack_option(struct tcpcb *tp, struct tcphdr *th, u_char *cp, int optlen) { int tmp_olen; u_char *tmp_cp; struct sackhole *cur, *p, *temp; if (!tp->sack_enable) return; /* SACK without ACK doesn't make sense. */ if ((th->th_flags & TH_ACK) == 0) return; /* Make sure the ACK on this segment is in [snd_una, snd_max]. */ if (SEQ_LT(th->th_ack, tp->snd_una) || SEQ_GT(th->th_ack, tp->snd_max)) return; /* Note: TCPOLEN_SACK must be 2*sizeof(tcp_seq) */ if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0) return; /* Note: TCPOLEN_SACK must be 2*sizeof(tcp_seq) */ tmp_cp = cp + 2; tmp_olen = optlen - 2; tcpstat_inc(tcps_sack_rcv_opts); if (tp->snd_numholes < 0) tp->snd_numholes = 0; if (tp->t_maxseg == 0) panic("tcp_sack_option"); /* Should never happen */ while (tmp_olen > 0) { struct sackblk sack; memcpy(&sack.start, tmp_cp, sizeof(tcp_seq)); sack.start = ntohl(sack.start); memcpy(&sack.end, tmp_cp + sizeof(tcp_seq), sizeof(tcp_seq)); sack.end = ntohl(sack.end); tmp_olen -= TCPOLEN_SACK; tmp_cp += TCPOLEN_SACK; if (SEQ_LEQ(sack.end, sack.start)) continue; /* bad SACK fields */ if (SEQ_LEQ(sack.end, tp->snd_una)) continue; /* old block */ if (SEQ_GT(th->th_ack, tp->snd_una)) { if (SEQ_LT(sack.start, th->th_ack)) continue; } if (SEQ_GT(sack.end, tp->snd_max)) continue; if (tp->snd_holes == NULL) { /* first hole */ tp->snd_holes = (struct sackhole *) pool_get(&sackhl_pool, PR_NOWAIT); if (tp->snd_holes == NULL) { /* ENOBUFS, so ignore SACKed block for now*/ goto done; } cur = tp->snd_holes; cur->start = th->th_ack; cur->end = sack.start; cur->rxmit = cur->start; cur->next = NULL; tp->snd_numholes = 1; tp->rcv_lastsack = sack.end; /* * dups is at least one. If more data has been * SACKed, it can be greater than one. */ cur->dups = min(tcprexmtthresh, ((sack.end - cur->end)/tp->t_maxseg)); if (cur->dups < 1) cur->dups = 1; continue; /* with next sack block */ } /* Go thru list of holes: p = previous, cur = current */ p = cur = tp->snd_holes; while (cur) { if (SEQ_LEQ(sack.end, cur->start)) /* SACKs data before the current hole */ break; /* no use going through more holes */ if (SEQ_GEQ(sack.start, cur->end)) { /* SACKs data beyond the current hole */ cur->dups++; if (((sack.end - cur->end)/tp->t_maxseg) >= tcprexmtthresh) cur->dups = tcprexmtthresh; p = cur; cur = cur->next; continue; } if (SEQ_LEQ(sack.start, cur->start)) { /* Data acks at least the beginning of hole */ if (SEQ_GEQ(sack.end, cur->end)) { /* Acks entire hole, so delete hole */ if (p != cur) { p->next = cur->next; pool_put(&sackhl_pool, cur); cur = p->next; } else { cur = cur->next; pool_put(&sackhl_pool, p); p = cur; tp->snd_holes = p; } tp->snd_numholes--; continue; } /* otherwise, move start of hole forward */ cur->start = sack.end; cur->rxmit = SEQ_MAX(cur->rxmit, cur->start); p = cur; cur = cur->next; continue; } /* move end of hole backward */ if (SEQ_GEQ(sack.end, cur->end)) { cur->end = sack.start; cur->rxmit = SEQ_MIN(cur->rxmit, cur->end); cur->dups++; if (((sack.end - cur->end)/tp->t_maxseg) >= tcprexmtthresh) cur->dups = tcprexmtthresh; p = cur; cur = cur->next; continue; } if (SEQ_LT(cur->start, sack.start) && SEQ_GT(cur->end, sack.end)) { /* * ACKs some data in middle of a hole; need to * split current hole */ temp = (struct sackhole *) pool_get(&sackhl_pool, PR_NOWAIT); if (temp == NULL) goto done; /* ENOBUFS */ temp->next = cur->next; temp->start = sack.end; temp->end = cur->end; temp->dups = cur->dups; temp->rxmit = SEQ_MAX(cur->rxmit, temp->start); cur->end = sack.start; cur->rxmit = SEQ_MIN(cur->rxmit, cur->end); cur->dups++; if (((sack.end - cur->end)/tp->t_maxseg) >= tcprexmtthresh) cur->dups = tcprexmtthresh; cur->next = temp; p = temp; cur = p->next; tp->snd_numholes++; } } /* At this point, p points to the last hole on the list */ if (SEQ_LT(tp->rcv_lastsack, sack.start)) { /* * Need to append new hole at end. * Last hole is p (and it's not NULL). */ temp = (struct sackhole *) pool_get(&sackhl_pool, PR_NOWAIT); if (temp == NULL) goto done; /* ENOBUFS */ temp->start = tp->rcv_lastsack; temp->end = sack.start; temp->dups = min(tcprexmtthresh, ((sack.end - sack.start)/tp->t_maxseg)); if (temp->dups < 1) temp->dups = 1; temp->rxmit = temp->start; temp->next = 0; p->next = temp; tp->rcv_lastsack = sack.end; tp->snd_numholes++; } } done: return; } /* * Delete stale (i.e, cumulatively ack'd) holes. Hole is deleted only if * it is completely acked; otherwise, tcp_sack_option(), called from * tcp_dooptions(), will fix up the hole. */ void tcp_del_sackholes(struct tcpcb *tp, struct tcphdr *th) { if (tp->sack_enable && tp->t_state != TCPS_LISTEN) { /* max because this could be an older ack just arrived */ tcp_seq lastack = SEQ_GT(th->th_ack, tp->snd_una) ? th->th_ack : tp->snd_una; struct sackhole *cur = tp->snd_holes; struct sackhole *prev; while (cur) if (SEQ_LEQ(cur->end, lastack)) { prev = cur; cur = cur->next; pool_put(&sackhl_pool, prev); tp->snd_numholes--; } else if (SEQ_LT(cur->start, lastack)) { cur->start = lastack; if (SEQ_LT(cur->rxmit, cur->start)) cur->rxmit = cur->start; break; } else break; tp->snd_holes = cur; } } /* * Delete all receiver-side SACK information. */ void tcp_clean_sackreport(struct tcpcb *tp) { int i; tp->rcv_numsacks = 0; for (i = 0; i < MAX_SACK_BLKS; i++) tp->sackblks[i].start = tp->sackblks[i].end=0; } /* * Partial ack handling within a sack recovery episode. When a partial ack * arrives, turn off retransmission timer, deflate the window, do not clear * tp->t_dupacks. */ void tcp_sack_partialack(struct tcpcb *tp, struct tcphdr *th) { /* Turn off retx. timer (will start again next segment) */ TCP_TIMER_DISARM(tp, TCPT_REXMT); tp->t_rtttime = 0; /* * Partial window deflation. This statement relies on the * fact that tp->snd_una has not been updated yet. */ if (tp->snd_cwnd > (th->th_ack - tp->snd_una)) { tp->snd_cwnd -= th->th_ack - tp->snd_una; tp->snd_cwnd += tp->t_maxseg; } else tp->snd_cwnd = tp->t_maxseg; tp->snd_cwnd += tp->t_maxseg; tp->t_flags |= TF_NEEDOUTPUT; } /* * Pull out of band byte out of a segment so * it doesn't appear in the user's data queue. * It is still reflected in the segment length for * sequencing purposes. */ void tcp_pulloutofband(struct socket *so, u_int urgent, struct mbuf *m, int off) { int cnt = off + urgent - 1; while (cnt >= 0) { if (m->m_len > cnt) { char *cp = mtod(m, caddr_t) + cnt; struct tcpcb *tp = sototcpcb(so); tp->t_iobc = *cp; tp->t_oobflags |= TCPOOB_HAVEDATA; memmove(cp, cp + 1, m->m_len - cnt - 1); m->m_len--; return; } cnt -= m->m_len; m = m->m_next; if (m == NULL) break; } panic("tcp_pulloutofband"); } /* * Collect new round-trip time estimate * and update averages and current timeout. */ void tcp_xmit_timer(struct tcpcb *tp, int rtt) { short delta; short rttmin; if (rtt < 0) rtt = 0; else if (rtt > TCP_RTT_MAX) rtt = TCP_RTT_MAX; tcpstat_inc(tcps_rttupdated); if (tp->t_srtt != 0) { /* * delta is fixed point with 2 (TCP_RTT_BASE_SHIFT) bits * after the binary point (scaled by 4), whereas * srtt is stored as fixed point with 5 bits after the * binary point (i.e., scaled by 32). The following magic * is equivalent to the smoothing algorithm in rfc793 with * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed * point). */ delta = (rtt << TCP_RTT_BASE_SHIFT) - (tp->t_srtt >> TCP_RTT_SHIFT); if ((tp->t_srtt += delta) <= 0) tp->t_srtt = 1 << TCP_RTT_BASE_SHIFT; /* * We accumulate a smoothed rtt variance (actually, a * smoothed mean difference), then set the retransmit * timer to smoothed rtt + 4 times the smoothed variance. * rttvar is stored as fixed point with 4 bits after the * binary point (scaled by 16). The following is * equivalent to rfc793 smoothing with an alpha of .75 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces * rfc793's wired-in beta. */ if (delta < 0) delta = -delta; delta -= (tp->t_rttvar >> TCP_RTTVAR_SHIFT); if ((tp->t_rttvar += delta) <= 0) tp->t_rttvar = 1 << TCP_RTT_BASE_SHIFT; } else { /* * No rtt measurement yet - use the unsmoothed rtt. * Set the variance to half the rtt (so our first * retransmit happens at 3*rtt). */ tp->t_srtt = (rtt + 1) << (TCP_RTT_SHIFT + TCP_RTT_BASE_SHIFT); tp->t_rttvar = (rtt + 1) << (TCP_RTTVAR_SHIFT + TCP_RTT_BASE_SHIFT - 1); } tp->t_rtttime = 0; tp->t_rxtshift = 0; /* * the retransmit should happen at rtt + 4 * rttvar. * Because of the way we do the smoothing, srtt and rttvar * will each average +1/2 tick of bias. When we compute * the retransmit timer, we want 1/2 tick of rounding and * 1 extra tick because of +-1/2 tick uncertainty in the * firing of the timer. The bias will give us exactly the * 1.5 tick we need. But, because the bias is * statistical, we have to test that we don't drop below * the minimum feasible timer (which is 2 ticks). */ rttmin = min(max(rtt + 2, tp->t_rttmin), TCPTV_REXMTMAX); TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), rttmin, TCPTV_REXMTMAX); /* * We received an ack for a packet that wasn't retransmitted; * it is probably safe to discard any error indications we've * received recently. This isn't quite right, but close enough * for now (a route might have failed after we sent a segment, * and the return path might not be symmetrical). */ tp->t_softerror = 0; } /* * Determine a reasonable value for maxseg size. * If the route is known, check route for mtu. * If none, use an mss that can be handled on the outgoing * interface without forcing IP to fragment; if bigger than * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES * to utilize large mbufs. If no route is found, route has no mtu, * or the destination isn't local, use a default, hopefully conservative * size (usually 512 or the default IP max size, but no more than the mtu * of the interface), as we can't discover anything about intervening * gateways or networks. We also initialize the congestion/slow start * window to be a single segment if the destination isn't local. * While looking at the routing entry, we also initialize other path-dependent * parameters from pre-set or cached values in the routing entry. * * Also take into account the space needed for options that we * send regularly. Make maxseg shorter by that amount to assure * that we can send maxseg amount of data even when the options * are present. Store the upper limit of the length of options plus * data in maxopd. * * NOTE: offer == -1 indicates that the maxseg size changed due to * Path MTU discovery. */ int tcp_mss(struct tcpcb *tp, int offer) { struct rtentry *rt; struct ifnet *ifp = NULL; int mss, mssopt; int iphlen; struct inpcb *inp; inp = tp->t_inpcb; mssopt = mss = tcp_mssdflt; rt = in_pcbrtentry(inp); if (rt == NULL) goto out; ifp = if_get(rt->rt_ifidx); if (ifp == NULL) goto out; switch (tp->pf) { #ifdef INET6 case AF_INET6: iphlen = sizeof(struct ip6_hdr); break; #endif case AF_INET: iphlen = sizeof(struct ip); break; default: /* the family does not support path MTU discovery */ goto out; } /* * if there's an mtu associated with the route and we support * path MTU discovery for the underlying protocol family, use it. */ if (rt->rt_mtu) { /* * One may wish to lower MSS to take into account options, * especially security-related options. */ if (tp->pf == AF_INET6 && rt->rt_mtu < IPV6_MMTU) { /* * RFC2460 section 5, last paragraph: if path MTU is * smaller than 1280, use 1280 as packet size and * attach fragment header. */ mss = IPV6_MMTU - iphlen - sizeof(struct ip6_frag) - sizeof(struct tcphdr); } else { mss = rt->rt_mtu - iphlen - sizeof(struct tcphdr); } } else if (ifp->if_flags & IFF_LOOPBACK) { mss = ifp->if_mtu - iphlen - sizeof(struct tcphdr); } else if (tp->pf == AF_INET) { if (ip_mtudisc) mss = ifp->if_mtu - iphlen - sizeof(struct tcphdr); } #ifdef INET6 else if (tp->pf == AF_INET6) { /* * for IPv6, path MTU discovery is always turned on, * or the node must use packet size <= 1280. */ mss = ifp->if_mtu - iphlen - sizeof(struct tcphdr); } #endif /* INET6 */ /* Calculate the value that we offer in TCPOPT_MAXSEG */ if (offer != -1) { mssopt = ifp->if_mtu - iphlen - sizeof(struct tcphdr); mssopt = max(tcp_mssdflt, mssopt); } out: if_put(ifp); /* * The current mss, t_maxseg, is initialized to the default value. * If we compute a smaller value, reduce the current mss. * If we compute a larger value, return it for use in sending * a max seg size option, but don't store it for use * unless we received an offer at least that large from peer. * * However, do not accept offers lower than the minimum of * the interface MTU and 216. */ if (offer > 0) tp->t_peermss = offer; if (tp->t_peermss) mss = min(mss, max(tp->t_peermss, 216)); /* sanity - at least max opt. space */ mss = max(mss, 64); /* * maxopd stores the maximum length of data AND options * in a segment; maxseg is the amount of data in a normal * segment. We need to store this value (maxopd) apart * from maxseg, because now every segment carries options * and thus we normally have somewhat less data in segments. */ tp->t_maxopd = mss; if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP && (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP) mss -= TCPOLEN_TSTAMP_APPA; #ifdef TCP_SIGNATURE if (tp->t_flags & TF_SIGNATURE) mss -= TCPOLEN_SIGLEN; #endif if (offer == -1) { /* mss changed due to Path MTU discovery */ tp->t_flags &= ~TF_PMTUD_PEND; tp->t_pmtud_mtu_sent = 0; tp->t_pmtud_mss_acked = 0; if (mss < tp->t_maxseg) { /* * Follow suggestion in RFC 2414 to reduce the * congestion window by the ratio of the old * segment size to the new segment size. */ tp->snd_cwnd = ulmax((tp->snd_cwnd / tp->t_maxseg) * mss, mss); } } else if (tcp_do_rfc3390 == 2) { /* increase initial window */ tp->snd_cwnd = ulmin(10 * mss, ulmax(2 * mss, 14600)); } else if (tcp_do_rfc3390) { /* increase initial window */ tp->snd_cwnd = ulmin(4 * mss, ulmax(2 * mss, 4380)); } else tp->snd_cwnd = mss; tp->t_maxseg = mss; return (offer != -1 ? mssopt : mss); } u_int tcp_hdrsz(struct tcpcb *tp) { u_int hlen; switch (tp->pf) { #ifdef INET6 case AF_INET6: hlen = sizeof(struct ip6_hdr); break; #endif case AF_INET: hlen = sizeof(struct ip); break; default: hlen = 0; break; } hlen += sizeof(struct tcphdr); if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP && (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP) hlen += TCPOLEN_TSTAMP_APPA; #ifdef TCP_SIGNATURE if (tp->t_flags & TF_SIGNATURE) hlen += TCPOLEN_SIGLEN; #endif return (hlen); } /* * Set connection variables based on the effective MSS. * We are passed the TCPCB for the actual connection. If we * are the server, we are called by the compressed state engine * when the 3-way handshake is complete. If we are the client, * we are called when we receive the SYN,ACK from the server. * * NOTE: The t_maxseg value must be initialized in the TCPCB * before this routine is called! */ void tcp_mss_update(struct tcpcb *tp) { int mss; u_long bufsize; struct rtentry *rt; struct socket *so; so = tp->t_inpcb->inp_socket; mss = tp->t_maxseg; rt = in_pcbrtentry(tp->t_inpcb); if (rt == NULL) return; bufsize = so->so_snd.sb_hiwat; if (bufsize < mss) { mss = bufsize; /* Update t_maxseg and t_maxopd */ tcp_mss(tp, mss); } else { bufsize = roundup(bufsize, mss); if (bufsize > sb_max) bufsize = sb_max; (void)sbreserve(so, &so->so_snd, bufsize); } bufsize = so->so_rcv.sb_hiwat; if (bufsize > mss) { bufsize = roundup(bufsize, mss); if (bufsize > sb_max) bufsize = sb_max; (void)sbreserve(so, &so->so_rcv, bufsize); } } /* * When a partial ack arrives, force the retransmission of the * next unacknowledged segment. Do not clear tp->t_dupacks. * By setting snd_nxt to ti_ack, this forces retransmission timer * to be started again. */ void tcp_newreno_partialack(struct tcpcb *tp, struct tcphdr *th) { /* * snd_una has not been updated and the socket send buffer * not yet drained of the acked data, so we have to leave * snd_una as it was to get the correct data offset in * tcp_output(). */ tcp_seq onxt = tp->snd_nxt; u_long ocwnd = tp->snd_cwnd; TCP_TIMER_DISARM(tp, TCPT_REXMT); tp->t_rtttime = 0; tp->snd_nxt = th->th_ack; /* * Set snd_cwnd to one segment beyond acknowledged offset * (tp->snd_una not yet updated when this function is called) */ tp->snd_cwnd = tp->t_maxseg + (th->th_ack - tp->snd_una); (void)tcp_output(tp); tp->snd_cwnd = ocwnd; if (SEQ_GT(onxt, tp->snd_nxt)) tp->snd_nxt = onxt; /* * Partial window deflation. Relies on fact that tp->snd_una * not updated yet. */ if (tp->snd_cwnd > th->th_ack - tp->snd_una) tp->snd_cwnd -= th->th_ack - tp->snd_una; else tp->snd_cwnd = 0; tp->snd_cwnd += tp->t_maxseg; } int tcp_mss_adv(struct mbuf *m, int af) { int mss = 0; int iphlen; struct ifnet *ifp = NULL; if (m && (m->m_flags & M_PKTHDR)) ifp = if_get(m->m_pkthdr.ph_ifidx); switch (af) { case AF_INET: if (ifp != NULL) mss = ifp->if_mtu; iphlen = sizeof(struct ip); break; #ifdef INET6 case AF_INET6: if (ifp != NULL) mss = ifp->if_mtu; iphlen = sizeof(struct ip6_hdr); break; #endif default: unhandled_af(af); } if_put(ifp); mss = mss - iphlen - sizeof(struct tcphdr); return (max(mss, tcp_mssdflt)); } /* * TCP compressed state engine. Currently used to hold compressed * state for SYN_RECEIVED. */ /* syn hash parameters */ int tcp_syn_hash_size = TCP_SYN_HASH_SIZE; int tcp_syn_cache_limit = TCP_SYN_HASH_SIZE*TCP_SYN_BUCKET_SIZE; int tcp_syn_bucket_limit = 3*TCP_SYN_BUCKET_SIZE; int tcp_syn_use_limit = 100000; struct syn_cache_set tcp_syn_cache[2]; int tcp_syn_cache_active; #define SYN_HASH(sa, sp, dp, rand) \ (((sa)->s_addr ^ (rand)[0]) * \ (((((u_int32_t)(dp))<<16) + ((u_int32_t)(sp))) ^ (rand)[4])) #ifndef INET6 #define SYN_HASHALL(hash, src, dst, rand) \ do { \ hash = SYN_HASH(&satosin(src)->sin_addr, \ satosin(src)->sin_port, \ satosin(dst)->sin_port, (rand)); \ } while (/*CONSTCOND*/ 0) #else #define SYN_HASH6(sa, sp, dp, rand) \ (((sa)->s6_addr32[0] ^ (rand)[0]) * \ ((sa)->s6_addr32[1] ^ (rand)[1]) * \ ((sa)->s6_addr32[2] ^ (rand)[2]) * \ ((sa)->s6_addr32[3] ^ (rand)[3]) * \ (((((u_int32_t)(dp))<<16) + ((u_int32_t)(sp))) ^ (rand)[4])) #define SYN_HASHALL(hash, src, dst, rand) \ do { \ switch ((src)->sa_family) { \ case AF_INET: \ hash = SYN_HASH(&satosin(src)->sin_addr, \ satosin(src)->sin_port, \ satosin(dst)->sin_port, (rand)); \ break; \ case AF_INET6: \ hash = SYN_HASH6(&satosin6(src)->sin6_addr, \ satosin6(src)->sin6_port, \ satosin6(dst)->sin6_port, (rand)); \ break; \ default: \ hash = 0; \ } \ } while (/*CONSTCOND*/0) #endif /* INET6 */ void syn_cache_rm(struct syn_cache *sc) { sc->sc_flags |= SCF_DEAD; TAILQ_REMOVE(&sc->sc_buckethead->sch_bucket, sc, sc_bucketq); sc->sc_tp = NULL; LIST_REMOVE(sc, sc_tpq); sc->sc_buckethead->sch_length--; timeout_del(&sc->sc_timer); sc->sc_set->scs_count--; } void syn_cache_put(struct syn_cache *sc) { m_free(sc->sc_ipopts); if (sc->sc_route4.ro_rt != NULL) { rtfree(sc->sc_route4.ro_rt); sc->sc_route4.ro_rt = NULL; } timeout_set(&sc->sc_timer, syn_cache_reaper, sc); timeout_add(&sc->sc_timer, 0); } struct pool syn_cache_pool; /* * We don't estimate RTT with SYNs, so each packet starts with the default * RTT and each timer step has a fixed timeout value. */ #define SYN_CACHE_TIMER_ARM(sc) \ do { \ TCPT_RANGESET((sc)->sc_rxtcur, \ TCPTV_SRTTDFLT * tcp_backoff[(sc)->sc_rxtshift], TCPTV_MIN, \ TCPTV_REXMTMAX); \ if (!timeout_initialized(&(sc)->sc_timer)) \ timeout_set_proc(&(sc)->sc_timer, syn_cache_timer, (sc)); \ timeout_add(&(sc)->sc_timer, (sc)->sc_rxtcur * (hz / PR_SLOWHZ)); \ } while (/*CONSTCOND*/0) #define SYN_CACHE_TIMESTAMP(sc) tcp_now + (sc)->sc_modulate void syn_cache_init(void) { int i; /* Initialize the hash buckets. */ tcp_syn_cache[0].scs_buckethead = mallocarray(tcp_syn_hash_size, sizeof(struct syn_cache_head), M_SYNCACHE, M_WAITOK|M_ZERO); tcp_syn_cache[1].scs_buckethead = mallocarray(tcp_syn_hash_size, sizeof(struct syn_cache_head), M_SYNCACHE, M_WAITOK|M_ZERO); tcp_syn_cache[0].scs_size = tcp_syn_hash_size; tcp_syn_cache[1].scs_size = tcp_syn_hash_size; for (i = 0; i < tcp_syn_hash_size; i++) { TAILQ_INIT(&tcp_syn_cache[0].scs_buckethead[i].sch_bucket); TAILQ_INIT(&tcp_syn_cache[1].scs_buckethead[i].sch_bucket); } /* Initialize the syn cache pool. */ pool_init(&syn_cache_pool, sizeof(struct syn_cache), 0, IPL_SOFTNET, 0, "syncache", NULL); } void syn_cache_insert(struct syn_cache *sc, struct tcpcb *tp) { struct syn_cache_set *set = &tcp_syn_cache[tcp_syn_cache_active]; struct syn_cache_head *scp; struct syn_cache *sc2; int i; NET_ASSERT_LOCKED(); /* * If there are no entries in the hash table, reinitialize * the hash secrets. To avoid useless cache swaps and * reinitialization, use it until the limit is reached. * An emtpy cache is also the oportunity to resize the hash. */ if (set->scs_count == 0 && set->scs_use <= 0) { set->scs_use = tcp_syn_use_limit; if (set->scs_size != tcp_syn_hash_size) { scp = mallocarray(tcp_syn_hash_size, sizeof(struct syn_cache_head), M_SYNCACHE, M_NOWAIT|M_ZERO); if (scp == NULL) { /* Try again next time. */ set->scs_use = 0; } else { free(set->scs_buckethead, M_SYNCACHE, set->scs_size * sizeof(struct syn_cache_head)); set->scs_buckethead = scp; set->scs_size = tcp_syn_hash_size; for (i = 0; i < tcp_syn_hash_size; i++) TAILQ_INIT(&scp[i].sch_bucket); } } arc4random_buf(set->scs_random, sizeof(set->scs_random)); tcpstat_inc(tcps_sc_seedrandom); } SYN_HASHALL(sc->sc_hash, &sc->sc_src.sa, &sc->sc_dst.sa, set->scs_random); scp = &set->scs_buckethead[sc->sc_hash % set->scs_size]; sc->sc_buckethead = scp; /* * Make sure that we don't overflow the per-bucket * limit or the total cache size limit. */ if (scp->sch_length >= tcp_syn_bucket_limit) { tcpstat_inc(tcps_sc_bucketoverflow); /* * Someone might attack our bucket hash function. Reseed * with random as soon as the passive syn cache gets empty. */ set->scs_use = 0; /* * The bucket is full. Toss the oldest element in the * bucket. This will be the first entry in the bucket. */ sc2 = TAILQ_FIRST(&scp->sch_bucket); #ifdef DIAGNOSTIC /* * This should never happen; we should always find an * entry in our bucket. */ if (sc2 == NULL) panic("%s: bucketoverflow: impossible", __func__); #endif syn_cache_rm(sc2); syn_cache_put(sc2); } else if (set->scs_count >= tcp_syn_cache_limit) { struct syn_cache_head *scp2, *sce; tcpstat_inc(tcps_sc_overflowed); /* * The cache is full. Toss the oldest entry in the * first non-empty bucket we can find. * * XXX We would really like to toss the oldest * entry in the cache, but we hope that this * condition doesn't happen very often. */ scp2 = scp; if (TAILQ_EMPTY(&scp2->sch_bucket)) { sce = &set->scs_buckethead[set->scs_size]; for (++scp2; scp2 != scp; scp2++) { if (scp2 >= sce) scp2 = &set->scs_buckethead[0]; if (! TAILQ_EMPTY(&scp2->sch_bucket)) break; } #ifdef DIAGNOSTIC /* * This should never happen; we should always find a * non-empty bucket. */ if (scp2 == scp) panic("%s: cacheoverflow: impossible", __func__); #endif } sc2 = TAILQ_FIRST(&scp2->sch_bucket); syn_cache_rm(sc2); syn_cache_put(sc2); } /* * Initialize the entry's timer. */ sc->sc_rxttot = 0; sc->sc_rxtshift = 0; SYN_CACHE_TIMER_ARM(sc); /* Link it from tcpcb entry */ LIST_INSERT_HEAD(&tp->t_sc, sc, sc_tpq); /* Put it into the bucket. */ TAILQ_INSERT_TAIL(&scp->sch_bucket, sc, sc_bucketq); scp->sch_length++; sc->sc_set = set; set->scs_count++; set->scs_use--; tcpstat_inc(tcps_sc_added); /* * If the active cache has exceeded its use limit and * the passive syn cache is empty, exchange their roles. */ if (set->scs_use <= 0 && tcp_syn_cache[!tcp_syn_cache_active].scs_count == 0) tcp_syn_cache_active = !tcp_syn_cache_active; } /* * Walk the timer queues, looking for SYN,ACKs that need to be retransmitted. * If we have retransmitted an entry the maximum number of times, expire * that entry. */ void syn_cache_timer(void *arg) { struct syn_cache *sc = arg; NET_LOCK(); if (sc->sc_flags & SCF_DEAD) goto out; if (__predict_false(sc->sc_rxtshift == TCP_MAXRXTSHIFT)) { /* Drop it -- too many retransmissions. */ goto dropit; } /* * Compute the total amount of time this entry has * been on a queue. If this entry has been on longer * than the keep alive timer would allow, expire it. */ sc->sc_rxttot += sc->sc_rxtcur; if (sc->sc_rxttot >= tcptv_keep_init) goto dropit; tcpstat_inc(tcps_sc_retransmitted); (void) syn_cache_respond(sc, NULL); /* Advance the timer back-off. */ sc->sc_rxtshift++; SYN_CACHE_TIMER_ARM(sc); out: NET_UNLOCK(); return; dropit: tcpstat_inc(tcps_sc_timed_out); syn_cache_rm(sc); syn_cache_put(sc); NET_UNLOCK(); } void syn_cache_reaper(void *arg) { struct syn_cache *sc = arg; pool_put(&syn_cache_pool, (sc)); return; } /* * Remove syn cache created by the specified tcb entry, * because this does not make sense to keep them * (if there's no tcb entry, syn cache entry will never be used) */ void syn_cache_cleanup(struct tcpcb *tp) { struct syn_cache *sc, *nsc; NET_ASSERT_LOCKED(); LIST_FOREACH_SAFE(sc, &tp->t_sc, sc_tpq, nsc) { #ifdef DIAGNOSTIC if (sc->sc_tp != tp) panic("invalid sc_tp in syn_cache_cleanup"); #endif syn_cache_rm(sc); syn_cache_put(sc); } /* just for safety */ LIST_INIT(&tp->t_sc); } /* * Find an entry in the syn cache. */ struct syn_cache * syn_cache_lookup(struct sockaddr *src, struct sockaddr *dst, struct syn_cache_head **headp, u_int rtableid) { struct syn_cache_set *sets[2]; struct syn_cache *sc; struct syn_cache_head *scp; u_int32_t hash; int i; NET_ASSERT_LOCKED(); /* Check the active cache first, the passive cache is likely emtpy. */ sets[0] = &tcp_syn_cache[tcp_syn_cache_active]; sets[1] = &tcp_syn_cache[!tcp_syn_cache_active]; for (i = 0; i < 2; i++) { if (sets[i]->scs_count == 0) continue; SYN_HASHALL(hash, src, dst, sets[i]->scs_random); scp = &sets[i]->scs_buckethead[hash % sets[i]->scs_size]; *headp = scp; TAILQ_FOREACH(sc, &scp->sch_bucket, sc_bucketq) { if (sc->sc_hash != hash) continue; if (!bcmp(&sc->sc_src, src, src->sa_len) && !bcmp(&sc->sc_dst, dst, dst->sa_len) && rtable_l2(rtableid) == rtable_l2(sc->sc_rtableid)) return (sc); } } return (NULL); } /* * This function gets called when we receive an ACK for a * socket in the LISTEN state. We look up the connection * in the syn cache, and if its there, we pull it out of * the cache and turn it into a full-blown connection in * the SYN-RECEIVED state. * * The return values may not be immediately obvious, and their effects * can be subtle, so here they are: * * NULL SYN was not found in cache; caller should drop the * packet and send an RST. * * -1 We were unable to create the new connection, and are * aborting it. An ACK,RST is being sent to the peer * (unless we got screwey sequence numbners; see below), * because the 3-way handshake has been completed. Caller * should not free the mbuf, since we may be using it. If * we are not, we will free it. * * Otherwise, the return value is a pointer to the new socket * associated with the connection. */ struct socket * syn_cache_get(struct sockaddr *src, struct sockaddr *dst, struct tcphdr *th, u_int hlen, u_int tlen, struct socket *so, struct mbuf *m) { struct syn_cache *sc; struct syn_cache_head *scp; struct inpcb *inp, *oldinp; struct tcpcb *tp = NULL; struct mbuf *am; struct socket *oso; NET_ASSERT_LOCKED(); sc = syn_cache_lookup(src, dst, &scp, sotoinpcb(so)->inp_rtableid); if (sc == NULL) return (NULL); /* * Verify the sequence and ack numbers. Try getting the correct * response again. */ if ((th->th_ack != sc->sc_iss + 1) || SEQ_LEQ(th->th_seq, sc->sc_irs) || SEQ_GT(th->th_seq, sc->sc_irs + 1 + sc->sc_win)) { (void) syn_cache_respond(sc, m); return ((struct socket *)(-1)); } /* Remove this cache entry */ syn_cache_rm(sc); /* * Ok, create the full blown connection, and set things up * as they would have been set up if we had created the * connection when the SYN arrived. If we can't create * the connection, abort it. */ oso = so; so = sonewconn(so, SS_ISCONNECTED); if (so == NULL) goto resetandabort; oldinp = sotoinpcb(oso); inp = sotoinpcb(so); #ifdef IPSEC /* * We need to copy the required security levels * from the old pcb. Ditto for any other * IPsec-related information. */ memcpy(inp->inp_seclevel, oldinp->inp_seclevel, sizeof(oldinp->inp_seclevel)); #endif /* IPSEC */ #ifdef INET6 /* * inp still has the OLD in_pcb stuff, set the * v6-related flags on the new guy, too. */ inp->inp_flags |= (oldinp->inp_flags & INP_IPV6); if (inp->inp_flags & INP_IPV6) { inp->inp_ipv6.ip6_hlim = oldinp->inp_ipv6.ip6_hlim; inp->inp_hops = oldinp->inp_hops; } else #endif /* INET6 */ { inp->inp_ip.ip_ttl = oldinp->inp_ip.ip_ttl; } #if NPF > 0 if (m->m_pkthdr.pf.flags & PF_TAG_DIVERTED) { struct pf_divert *divert; divert = pf_find_divert(m); KASSERT(divert != NULL); inp->inp_rtableid = divert->rdomain; } else #endif /* inherit rtable from listening socket */ inp->inp_rtableid = sc->sc_rtableid; inp->inp_lport = th->th_dport; switch (src->sa_family) { #ifdef INET6 case AF_INET6: inp->inp_laddr6 = satosin6(dst)->sin6_addr; break; #endif /* INET6 */ case AF_INET: inp->inp_laddr = satosin(dst)->sin_addr; inp->inp_options = ip_srcroute(m); if (inp->inp_options == NULL) { inp->inp_options = sc->sc_ipopts; sc->sc_ipopts = NULL; } break; } in_pcbrehash(inp); /* * Give the new socket our cached route reference. */ if (src->sa_family == AF_INET) inp->inp_route = sc->sc_route4; /* struct assignment */ #ifdef INET6 else inp->inp_route6 = sc->sc_route6; #endif sc->sc_route4.ro_rt = NULL; am = m_get(M_DONTWAIT, MT_SONAME); /* XXX */ if (am == NULL) goto resetandabort; am->m_len = src->sa_len; memcpy(mtod(am, caddr_t), src, src->sa_len); if (in_pcbconnect(inp, am)) { (void) m_free(am); goto resetandabort; } (void) m_free(am); tp = intotcpcb(inp); tp->t_flags = sototcpcb(oso)->t_flags & (TF_NOPUSH|TF_NODELAY); if (sc->sc_request_r_scale != 15) { tp->requested_s_scale = sc->sc_requested_s_scale; tp->request_r_scale = sc->sc_request_r_scale; tp->t_flags |= TF_REQ_SCALE|TF_RCVD_SCALE; } if (sc->sc_flags & SCF_TIMESTAMP) tp->t_flags |= TF_REQ_TSTMP|TF_RCVD_TSTMP; tp->t_template = tcp_template(tp); if (tp->t_template == 0) { tp = tcp_drop(tp, ENOBUFS); /* destroys socket */ so = NULL; goto abort; } tp->sack_enable = sc->sc_flags & SCF_SACK_PERMIT; tp->ts_modulate = sc->sc_modulate; tp->ts_recent = sc->sc_timestamp; tp->iss = sc->sc_iss; tp->irs = sc->sc_irs; tcp_sendseqinit(tp); tp->snd_last = tp->snd_una; #ifdef TCP_ECN if (sc->sc_flags & SCF_ECN_PERMIT) { tp->t_flags |= TF_ECN_PERMIT; tcpstat_inc(tcps_ecn_accepts); } #endif if (sc->sc_flags & SCF_SACK_PERMIT) tp->t_flags |= TF_SACK_PERMIT; #ifdef TCP_SIGNATURE if (sc->sc_flags & SCF_SIGNATURE) tp->t_flags |= TF_SIGNATURE; #endif tcp_rcvseqinit(tp); tp->t_state = TCPS_SYN_RECEIVED; tp->t_rcvtime = tcp_now; TCP_TIMER_ARM(tp, TCPT_KEEP, tcptv_keep_init); tcpstat_inc(tcps_accepts); tcp_mss(tp, sc->sc_peermaxseg); /* sets t_maxseg */ if (sc->sc_peermaxseg) tcp_mss_update(tp); /* Reset initial window to 1 segment for retransmit */ if (sc->sc_rxtshift > 0) tp->snd_cwnd = tp->t_maxseg; tp->snd_wl1 = sc->sc_irs; tp->rcv_up = sc->sc_irs + 1; /* * This is what whould have happened in tcp_output() when * the SYN,ACK was sent. */ tp->snd_up = tp->snd_una; tp->snd_max = tp->snd_nxt = tp->iss+1; TCP_TIMER_ARM(tp, TCPT_REXMT, tp->t_rxtcur); if (sc->sc_win > 0 && SEQ_GT(tp->rcv_nxt + sc->sc_win, tp->rcv_adv)) tp->rcv_adv = tp->rcv_nxt + sc->sc_win; tp->last_ack_sent = tp->rcv_nxt; tcpstat_inc(tcps_sc_completed); syn_cache_put(sc); return (so); resetandabort: tcp_respond(NULL, mtod(m, caddr_t), th, (tcp_seq)0, th->th_ack, TH_RST, m->m_pkthdr.ph_rtableid); abort: m_freem(m); if (so != NULL) (void) soabort(so); syn_cache_put(sc); tcpstat_inc(tcps_sc_aborted); return ((struct socket *)(-1)); } /* * This function is called when we get a RST for a * non-existent connection, so that we can see if the * connection is in the syn cache. If it is, zap it. */ void syn_cache_reset(struct sockaddr *src, struct sockaddr *dst, struct tcphdr *th, u_int rtableid) { struct syn_cache *sc; struct syn_cache_head *scp; NET_ASSERT_LOCKED(); if ((sc = syn_cache_lookup(src, dst, &scp, rtableid)) == NULL) return; if (SEQ_LT(th->th_seq, sc->sc_irs) || SEQ_GT(th->th_seq, sc->sc_irs + 1)) return; syn_cache_rm(sc); tcpstat_inc(tcps_sc_reset); syn_cache_put(sc); } void syn_cache_unreach(struct sockaddr *src, struct sockaddr *dst, struct tcphdr *th, u_int rtableid) { struct syn_cache *sc; struct syn_cache_head *scp; NET_ASSERT_LOCKED(); if ((sc = syn_cache_lookup(src, dst, &scp, rtableid)) == NULL) return; /* If the sequence number != sc_iss, then it's a bogus ICMP msg */ if (ntohl (th->th_seq) != sc->sc_iss) { return; } /* * If we've retransmitted 3 times and this is our second error, * we remove the entry. Otherwise, we allow it to continue on. * This prevents us from incorrectly nuking an entry during a * spurious network outage. * * See tcp_notify(). */ if ((sc->sc_flags & SCF_UNREACH) == 0 || sc->sc_rxtshift < 3) { sc->sc_flags |= SCF_UNREACH; return; } syn_cache_rm(sc); tcpstat_inc(tcps_sc_unreach); syn_cache_put(sc); } /* * Given a LISTEN socket and an inbound SYN request, add * this to the syn cache, and send back a segment: * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK> * to the source. * * IMPORTANT NOTE: We do _NOT_ ACK data that might accompany the SYN. * Doing so would require that we hold onto the data and deliver it * to the application. However, if we are the target of a SYN-flood * DoS attack, an attacker could send data which would eventually * consume all available buffer space if it were ACKed. By not ACKing * the data, we avoid this DoS scenario. */ int syn_cache_add(struct sockaddr *src, struct sockaddr *dst, struct tcphdr *th, u_int iphlen, struct socket *so, struct mbuf *m, u_char *optp, int optlen, struct tcp_opt_info *oi, tcp_seq *issp) { struct tcpcb tb, *tp; long win; struct syn_cache *sc; struct syn_cache_head *scp; struct mbuf *ipopts; tp = sototcpcb(so); /* * RFC1122 4.2.3.10, p. 104: discard bcast/mcast SYN * * Note this check is performed in tcp_input() very early on. */ /* * Initialize some local state. */ win = sbspace(so, &so->so_rcv); if (win > TCP_MAXWIN) win = TCP_MAXWIN; bzero(&tb, sizeof(tb)); #ifdef TCP_SIGNATURE if (optp || (tp->t_flags & TF_SIGNATURE)) { #else if (optp) { #endif tb.pf = tp->pf; tb.sack_enable = tp->sack_enable; tb.t_flags = tcp_do_rfc1323 ? (TF_REQ_SCALE|TF_REQ_TSTMP) : 0; #ifdef TCP_SIGNATURE if (tp->t_flags & TF_SIGNATURE) tb.t_flags |= TF_SIGNATURE; #endif tb.t_state = TCPS_LISTEN; if (tcp_dooptions(&tb, optp, optlen, th, m, iphlen, oi, sotoinpcb(so)->inp_rtableid)) return (-1); } switch (src->sa_family) { case AF_INET: /* * Remember the IP options, if any. */ ipopts = ip_srcroute(m); break; default: ipopts = NULL; } /* * See if we already have an entry for this connection. * If we do, resend the SYN,ACK. We do not count this * as a retransmission (XXX though maybe we should). */ sc = syn_cache_lookup(src, dst, &scp, sotoinpcb(so)->inp_rtableid); if (sc != NULL) { tcpstat_inc(tcps_sc_dupesyn); if (ipopts) { /* * If we were remembering a previous source route, * forget it and use the new one we've been given. */ m_free(sc->sc_ipopts); sc->sc_ipopts = ipopts; } sc->sc_timestamp = tb.ts_recent; if (syn_cache_respond(sc, m) == 0) { tcpstat_inc(tcps_sndacks); tcpstat_inc(tcps_sndtotal); } return (0); } sc = pool_get(&syn_cache_pool, PR_NOWAIT|PR_ZERO); if (sc == NULL) { m_free(ipopts); return (-1); } /* * Fill in the cache, and put the necessary IP and TCP * options into the reply. */ memcpy(&sc->sc_src, src, src->sa_len); memcpy(&sc->sc_dst, dst, dst->sa_len); sc->sc_rtableid = sotoinpcb(so)->inp_rtableid; sc->sc_flags = 0; sc->sc_ipopts = ipopts; sc->sc_irs = th->th_seq; sc->sc_iss = issp ? *issp : arc4random(); sc->sc_peermaxseg = oi->maxseg; sc->sc_ourmaxseg = tcp_mss_adv(m, sc->sc_src.sa.sa_family); sc->sc_win = win; sc->sc_timestamp = tb.ts_recent; if ((tb.t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP)) == (TF_REQ_TSTMP|TF_RCVD_TSTMP)) { sc->sc_flags |= SCF_TIMESTAMP; sc->sc_modulate = arc4random(); } if ((tb.t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == (TF_RCVD_SCALE|TF_REQ_SCALE)) { sc->sc_requested_s_scale = tb.requested_s_scale; sc->sc_request_r_scale = 0; /* * Pick the smallest possible scaling factor that * will still allow us to scale up to sb_max. * * We do this because there are broken firewalls that * will corrupt the window scale option, leading to * the other endpoint believing that our advertised * window is unscaled. At scale factors larger than * 5 the unscaled window will drop below 1500 bytes, * leading to serious problems when traversing these * broken firewalls. * * With the default sbmax of 256K, a scale factor * of 3 will be chosen by this algorithm. Those who * choose a larger sbmax should watch out * for the compatiblity problems mentioned above. * * RFC1323: The Window field in a SYN (i.e., a <SYN> * or <SYN,ACK>) segment itself is never scaled. */ while (sc->sc_request_r_scale < TCP_MAX_WINSHIFT && (TCP_MAXWIN << sc->sc_request_r_scale) < sb_max) sc->sc_request_r_scale++; } else { sc->sc_requested_s_scale = 15; sc->sc_request_r_scale = 15; } #ifdef TCP_ECN /* * if both ECE and CWR flag bits are set, peer is ECN capable. */ if (tcp_do_ecn && (th->th_flags & (TH_ECE|TH_CWR)) == (TH_ECE|TH_CWR)) sc->sc_flags |= SCF_ECN_PERMIT; #endif /* * Set SCF_SACK_PERMIT if peer did send a SACK_PERMITTED option * (i.e., if tcp_dooptions() did set TF_SACK_PERMIT). */ if (tb.sack_enable && (tb.t_flags & TF_SACK_PERMIT)) sc->sc_flags |= SCF_SACK_PERMIT; #ifdef TCP_SIGNATURE if (tb.t_flags & TF_SIGNATURE) sc->sc_flags |= SCF_SIGNATURE; #endif sc->sc_tp = tp; if (syn_cache_respond(sc, m) == 0) { syn_cache_insert(sc, tp); tcpstat_inc(tcps_sndacks); tcpstat_inc(tcps_sndtotal); } else { syn_cache_put(sc); tcpstat_inc(tcps_sc_dropped); } return (0); } int syn_cache_respond(struct syn_cache *sc, struct mbuf *m) { u_int8_t *optp; int optlen, error; u_int16_t tlen; struct ip *ip = NULL; #ifdef INET6 struct ip6_hdr *ip6 = NULL; #endif struct tcphdr *th; u_int hlen; struct inpcb *inp; switch (sc->sc_src.sa.sa_family) { case AF_INET: hlen = sizeof(struct ip); break; #ifdef INET6 case AF_INET6: hlen = sizeof(struct ip6_hdr); break; #endif default: m_freem(m); return (EAFNOSUPPORT); } /* Compute the size of the TCP options. */ optlen = 4 + (sc->sc_request_r_scale != 15 ? 4 : 0) + ((sc->sc_flags & SCF_SACK_PERMIT) ? 4 : 0) + #ifdef TCP_SIGNATURE ((sc->sc_flags & SCF_SIGNATURE) ? TCPOLEN_SIGLEN : 0) + #endif ((sc->sc_flags & SCF_TIMESTAMP) ? TCPOLEN_TSTAMP_APPA : 0); tlen = hlen + sizeof(struct tcphdr) + optlen; /* * Create the IP+TCP header from scratch. */ m_freem(m); #ifdef DIAGNOSTIC if (max_linkhdr + tlen > MCLBYTES) return (ENOBUFS); #endif MGETHDR(m, M_DONTWAIT, MT_DATA); if (m && max_linkhdr + tlen > MHLEN) { MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { m_freem(m); m = NULL; } } if (m == NULL) return (ENOBUFS); /* Fixup the mbuf. */ m->m_data += max_linkhdr; m->m_len = m->m_pkthdr.len = tlen; m->m_pkthdr.ph_ifidx = 0; m->m_pkthdr.ph_rtableid = sc->sc_rtableid; memset(mtod(m, u_char *), 0, tlen); switch (sc->sc_src.sa.sa_family) { case AF_INET: ip = mtod(m, struct ip *); ip->ip_dst = sc->sc_src.sin.sin_addr; ip->ip_src = sc->sc_dst.sin.sin_addr; ip->ip_p = IPPROTO_TCP; th = (struct tcphdr *)(ip + 1); th->th_dport = sc->sc_src.sin.sin_port; th->th_sport = sc->sc_dst.sin.sin_port; break; #ifdef INET6 case AF_INET6: ip6 = mtod(m, struct ip6_hdr *); ip6->ip6_dst = sc->sc_src.sin6.sin6_addr; ip6->ip6_src = sc->sc_dst.sin6.sin6_addr; ip6->ip6_nxt = IPPROTO_TCP; /* ip6_plen will be updated in ip6_output() */ th = (struct tcphdr *)(ip6 + 1); th->th_dport = sc->sc_src.sin6.sin6_port; th->th_sport = sc->sc_dst.sin6.sin6_port; break; #endif default: unhandled_af(sc->sc_src.sa.sa_family); } th->th_seq = htonl(sc->sc_iss); th->th_ack = htonl(sc->sc_irs + 1); th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; th->th_flags = TH_SYN|TH_ACK; #ifdef TCP_ECN /* Set ECE for SYN-ACK if peer supports ECN. */ if (tcp_do_ecn && (sc->sc_flags & SCF_ECN_PERMIT)) th->th_flags |= TH_ECE; #endif th->th_win = htons(sc->sc_win); /* th_sum already 0 */ /* th_urp already 0 */ /* Tack on the TCP options. */ optp = (u_int8_t *)(th + 1); *optp++ = TCPOPT_MAXSEG; *optp++ = 4; *optp++ = (sc->sc_ourmaxseg >> 8) & 0xff; *optp++ = sc->sc_ourmaxseg & 0xff; /* Include SACK_PERMIT_HDR option if peer has already done so. */ if (sc->sc_flags & SCF_SACK_PERMIT) { *((u_int32_t *)optp) = htonl(TCPOPT_SACK_PERMIT_HDR); optp += 4; } if (sc->sc_request_r_scale != 15) { *((u_int32_t *)optp) = htonl(TCPOPT_NOP << 24 | TCPOPT_WINDOW << 16 | TCPOLEN_WINDOW << 8 | sc->sc_request_r_scale); optp += 4; } if (sc->sc_flags & SCF_TIMESTAMP) { u_int32_t *lp = (u_int32_t *)(optp); /* Form timestamp option as shown in appendix A of RFC 1323. */ *lp++ = htonl(TCPOPT_TSTAMP_HDR); *lp++ = htonl(SYN_CACHE_TIMESTAMP(sc)); *lp = htonl(sc->sc_timestamp); optp += TCPOLEN_TSTAMP_APPA; } #ifdef TCP_SIGNATURE if (sc->sc_flags & SCF_SIGNATURE) { union sockaddr_union src, dst; struct tdb *tdb; bzero(&src, sizeof(union sockaddr_union)); bzero(&dst, sizeof(union sockaddr_union)); src.sa.sa_len = sc->sc_src.sa.sa_len; src.sa.sa_family = sc->sc_src.sa.sa_family; dst.sa.sa_len = sc->sc_dst.sa.sa_len; dst.sa.sa_family = sc->sc_dst.sa.sa_family; switch (sc->sc_src.sa.sa_family) { case 0: /*default to PF_INET*/ case AF_INET: src.sin.sin_addr = mtod(m, struct ip *)->ip_src; dst.sin.sin_addr = mtod(m, struct ip *)->ip_dst; break; #ifdef INET6 case AF_INET6: src.sin6.sin6_addr = mtod(m, struct ip6_hdr *)->ip6_src; dst.sin6.sin6_addr = mtod(m, struct ip6_hdr *)->ip6_dst; break; #endif /* INET6 */ } tdb = gettdbbysrcdst(rtable_l2(sc->sc_rtableid), 0, &src, &dst, IPPROTO_TCP); if (tdb == NULL) { m_freem(m); return (EPERM); } /* Send signature option */ *(optp++) = TCPOPT_SIGNATURE; *(optp++) = TCPOLEN_SIGNATURE; if (tcp_signature(tdb, sc->sc_src.sa.sa_family, m, th, hlen, 0, optp) < 0) { m_freem(m); return (EINVAL); } optp += 16; /* Pad options list to the next 32 bit boundary and * terminate it. */ *optp++ = TCPOPT_NOP; *optp++ = TCPOPT_EOL; } #endif /* TCP_SIGNATURE */ /* Compute the packet's checksum. */ switch (sc->sc_src.sa.sa_family) { case AF_INET: ip->ip_len = htons(tlen - hlen); th->th_sum = 0; th->th_sum = in_cksum(m, tlen); break; #ifdef INET6 case AF_INET6: ip6->ip6_plen = htons(tlen - hlen); th->th_sum = 0; th->th_sum = in6_cksum(m, IPPROTO_TCP, hlen, tlen - hlen); break; #endif } /* use IPsec policy and ttl from listening socket, on SYN ACK */ inp = sc->sc_tp ? sc->sc_tp->t_inpcb : NULL; /* * Fill in some straggling IP bits. Note the stack expects * ip_len to be in host order, for convenience. */ switch (sc->sc_src.sa.sa_family) { case AF_INET: ip->ip_len = htons(tlen); ip->ip_ttl = inp ? inp->inp_ip.ip_ttl : ip_defttl; if (inp != NULL) ip->ip_tos = inp->inp_ip.ip_tos; break; #ifdef INET6 case AF_INET6: ip6->ip6_vfc &= ~IPV6_VERSION_MASK; ip6->ip6_vfc |= IPV6_VERSION; ip6->ip6_plen = htons(tlen - hlen); /* ip6_hlim will be initialized afterwards */ /* leave flowlabel = 0, it is legal and require no state mgmt */ break; #endif } switch (sc->sc_src.sa.sa_family) { case AF_INET: error = ip_output(m, sc->sc_ipopts, &sc->sc_route4, (ip_mtudisc ? IP_MTUDISC : 0), NULL, inp, 0); break; #ifdef INET6 case AF_INET6: ip6->ip6_hlim = in6_selecthlim(inp); error = ip6_output(m, NULL /*XXX*/, &sc->sc_route6, 0, NULL, NULL); break; #endif default: error = EAFNOSUPPORT; break; } return (error); }
./CrossVul/dataset_final_sorted/CWE-20/c/bad_1420_1
crossvul-cpp_data_bad_5420_1
/* $Id: parsetagx.c,v 1.18 2006/06/07 03:52:03 inu Exp $ */ #include "fm.h" #include "myctype.h" #include "indep.h" #include "Str.h" #include "parsetagx.h" #include "hash.h" #include "html.c" /* parse HTML tag */ static int noConv(char *, char **); static int toNumber(char *, int *); static int toLength(char *, int *); static int toAlign(char *, int *); static int toVAlign(char *, int *); /* *INDENT-OFF* */ static int (*toValFunc[]) () = { noConv, /* VTYPE_NONE */ noConv, /* VTYPE_STR */ toNumber, /* VTYPE_NUMBER */ toLength, /* VTYPE_LENGTH */ toAlign, /* VTYPE_ALIGN */ toVAlign, /* VTYPE_VALIGN */ noConv, /* VTYPE_ACTION */ noConv, /* VTYPE_ENCTYPE */ noConv, /* VTYPE_METHOD */ noConv, /* VTYPE_MLENGTH */ noConv, /* VTYPE_TYPE */ }; /* *INDENT-ON* */ static int noConv(char *oval, char **str) { *str = oval; return 1; } static int toNumber(char *oval, int *num) { char *ep; int x; x = strtol(oval, &ep, 10); if (ep > oval) { *num = x; return 1; } else return 0; } static int toLength(char *oval, int *len) { int w; if (!IS_DIGIT(oval[0])) return 0; w = atoi(oval); if (w < 0) return 0; if (w == 0) w = 1; if (oval[strlen(oval) - 1] == '%') *len = -w; else *len = w; return 1; } static int toAlign(char *oval, int *align) { if (strcasecmp(oval, "left") == 0) *align = ALIGN_LEFT; else if (strcasecmp(oval, "right") == 0) *align = ALIGN_RIGHT; else if (strcasecmp(oval, "center") == 0) *align = ALIGN_CENTER; else if (strcasecmp(oval, "top") == 0) *align = ALIGN_TOP; else if (strcasecmp(oval, "bottom") == 0) *align = ALIGN_BOTTOM; else if (strcasecmp(oval, "middle") == 0) *align = ALIGN_MIDDLE; else return 0; return 1; } static int toVAlign(char *oval, int *valign) { if (strcasecmp(oval, "top") == 0 || strcasecmp(oval, "baseline") == 0) *valign = VALIGN_TOP; else if (strcasecmp(oval, "bottom") == 0) *valign = VALIGN_BOTTOM; else if (strcasecmp(oval, "middle") == 0) *valign = VALIGN_MIDDLE; else return 0; return 1; } extern Hash_si tagtable; #define MAX_TAG_LEN 64 struct parsed_tag * parse_tag(char **s, int internal) { struct parsed_tag *tag = NULL; int tag_id; char tagname[MAX_TAG_LEN], attrname[MAX_TAG_LEN]; char *p, *q; int i, attr_id = 0, nattr; /* Parse tag name */ q = (*s) + 1; p = tagname; if (*q == '/') { *(p++) = *(q++); SKIP_BLANKS(q); } while (*q && !IS_SPACE(*q) && !(tagname[0] != '/' && *q == '/') && *q != '>' && p - tagname < MAX_TAG_LEN - 1) { *(p++) = TOLOWER(*q); q++; } *p = '\0'; while (*q && !IS_SPACE(*q) && !(tagname[0] != '/' && *q == '/') && *q != '>') q++; tag_id = getHash_si(&tagtable, tagname, HTML_UNKNOWN); if (tag_id == HTML_UNKNOWN || (!internal && TagMAP[tag_id].flag & TFLG_INT)) goto skip_parse_tagarg; tag = New(struct parsed_tag); bzero(tag, sizeof(struct parsed_tag)); tag->tagid = tag_id; if ((nattr = TagMAP[tag_id].max_attribute) > 0) { tag->attrid = NewAtom_N(unsigned char, nattr); tag->value = New_N(char *, nattr); tag->map = NewAtom_N(unsigned char, MAX_TAGATTR); memset(tag->map, MAX_TAGATTR, MAX_TAGATTR); memset(tag->attrid, ATTR_UNKNOWN, nattr); for (i = 0; i < nattr; i++) tag->map[TagMAP[tag_id].accept_attribute[i]] = i; } /* Parse tag arguments */ SKIP_BLANKS(q); while (1) { Str value = NULL, value_tmp = NULL; if (*q == '>' || *q == '\0') goto done_parse_tag; p = attrname; while (*q && *q != '=' && !IS_SPACE(*q) && *q != '>' && p - attrname < MAX_TAG_LEN - 1) { *(p++) = TOLOWER(*q); q++; } *p = '\0'; while (*q && *q != '=' && !IS_SPACE(*q) && *q != '>') q++; SKIP_BLANKS(q); if (*q == '=') { /* get value */ value_tmp = Strnew(); q++; SKIP_BLANKS(q); if (*q == '"') { q++; while (*q && *q != '"') { Strcat_char(value_tmp, *q); if (!tag->need_reconstruct && is_html_quote(*q)) tag->need_reconstruct = TRUE; q++; } if (*q == '"') q++; } else if (*q == '\'') { q++; while (*q && *q != '\'') { Strcat_char(value_tmp, *q); if (!tag->need_reconstruct && is_html_quote(*q)) tag->need_reconstruct = TRUE; q++; } if (*q == '\'') q++; } else if (*q) { while (*q && !IS_SPACE(*q) && *q != '>') { Strcat_char(value_tmp, *q); if (!tag->need_reconstruct && is_html_quote(*q)) tag->need_reconstruct = TRUE; q++; } } } for (i = 0; i < nattr; i++) { if ((tag)->attrid[i] == ATTR_UNKNOWN && strcmp(AttrMAP[TagMAP[tag_id].accept_attribute[i]].name, attrname) == 0) { attr_id = TagMAP[tag_id].accept_attribute[i]; break; } } if (value_tmp) { int j, hidden=FALSE; for (j=0; j<i; j++) { if (tag->attrid[j] == ATTR_TYPE && tag->value[j] && strcmp("hidden",tag->value[j]) == 0) { hidden=TRUE; break; } } if ((tag_id == HTML_INPUT || tag_id == HTML_INPUT_ALT) && attr_id == ATTR_VALUE && hidden) { value = value_tmp; } else { char *x; value = Strnew(); for (x = value_tmp->ptr; *x; x++) { if (*x != '\n') Strcat_char(value, *x); } } } if (i != nattr) { if (!internal && ((AttrMAP[attr_id].flag & AFLG_INT) || (value && AttrMAP[attr_id].vtype == VTYPE_METHOD && !strcasecmp(value->ptr, "internal")))) { tag->need_reconstruct = TRUE; continue; } tag->attrid[i] = attr_id; if (value) tag->value[i] = html_unquote(value->ptr); else tag->value[i] = NULL; } else { tag->need_reconstruct = TRUE; } } skip_parse_tagarg: while (*q != '>' && *q) q++; done_parse_tag: if (*q == '>') q++; *s = q; return tag; } int parsedtag_set_value(struct parsed_tag *tag, int id, char *value) { int i; if (!parsedtag_accepts(tag, id)) return 0; i = tag->map[id]; tag->attrid[i] = id; if (value) tag->value[i] = allocStr(value, -1); else tag->value[i] = NULL; tag->need_reconstruct = TRUE; return 1; } int parsedtag_get_value(struct parsed_tag *tag, int id, void *value) { int i; if (!parsedtag_exists(tag, id) || !tag->value[i = tag->map[id]]) return 0; return toValFunc[AttrMAP[id].vtype] (tag->value[i], value); } Str parsedtag2str(struct parsed_tag *tag) { int i; int tag_id = tag->tagid; int nattr = TagMAP[tag_id].max_attribute; Str tagstr = Strnew(); Strcat_char(tagstr, '<'); Strcat_charp(tagstr, TagMAP[tag_id].name); for (i = 0; i < nattr; i++) { if (tag->attrid[i] != ATTR_UNKNOWN) { Strcat_char(tagstr, ' '); Strcat_charp(tagstr, AttrMAP[tag->attrid[i]].name); if (tag->value[i]) Strcat(tagstr, Sprintf("=\"%s\"", html_quote(tag->value[i]))); } } Strcat_char(tagstr, '>'); return tagstr; }
./CrossVul/dataset_final_sorted/CWE-20/c/bad_5420_1
crossvul-cpp_data_good_4899_0
/* * Code to handle user-settable options. This is all pretty much table- * driven. Checklist for adding a new option: * - Put it in the options array below (copy an existing entry). * - For a global option: Add a variable for it in option_defs.h. * - For a buffer or window local option: * - Add a PV_XX entry to the enum below. * - Add a variable to the window or buffer struct in buffer_defs.h. * - For a window option, add some code to copy_winopt(). * - For a buffer option, add some code to buf_copy_options(). * - For a buffer string option, add code to check_buf_options(). * - If it's a numeric option, add any necessary bounds checks to do_set(). * - If it's a list of flags, add some code in do_set(), search for WW_ALL. * - When adding an option with expansion (P_EXPAND), but with a different * default for Vi and Vim (no P_VI_DEF), add some code at VIMEXP. * - Add documentation! One line in doc/help.txt, full description in * options.txt, and any other related places. * - Add an entry in runtime/optwin.vim. * When making changes: * - Adjust the help for the option in doc/option.txt. */ #define IN_OPTION_C #include <assert.h> #include <inttypes.h> #include <stdbool.h> #include <stdint.h> #include <string.h> #include <stdint.h> #include <stdlib.h> #include <limits.h> #include "nvim/vim.h" #include "nvim/ascii.h" #include "nvim/edit.h" #include "nvim/option.h" #include "nvim/buffer.h" #include "nvim/charset.h" #include "nvim/cursor.h" #include "nvim/diff.h" #include "nvim/digraph.h" #include "nvim/eval.h" #include "nvim/ex_cmds2.h" #include "nvim/ex_docmd.h" #include "nvim/ex_getln.h" #include "nvim/fileio.h" #include "nvim/fold.h" #include "nvim/getchar.h" #include "nvim/hardcopy.h" #include "nvim/indent_c.h" #include "nvim/mbyte.h" #include "nvim/memfile.h" #include "nvim/memline.h" #include "nvim/memory.h" #include "nvim/message.h" #include "nvim/misc1.h" #include "nvim/keymap.h" #include "nvim/garray.h" #include "nvim/cursor_shape.h" #include "nvim/move.h" #include "nvim/mouse.h" #include "nvim/normal.h" #include "nvim/os_unix.h" #include "nvim/path.h" #include "nvim/regexp.h" #include "nvim/screen.h" #include "nvim/spell.h" #include "nvim/strings.h" #include "nvim/syntax.h" #include "nvim/ui.h" #include "nvim/undo.h" #include "nvim/window.h" #include "nvim/os/os.h" #include "nvim/os/input.h" /* * The options that are local to a window or buffer have "indir" set to one of * these values. Special values: * PV_NONE: global option. * PV_WIN is added: window-local option * PV_BUF is added: buffer-local option * PV_BOTH is added: global option which also has a local value. */ #define PV_BOTH 0x1000 #define PV_WIN 0x2000 #define PV_BUF 0x4000 #define PV_MASK 0x0fff #define OPT_WIN(x) (idopt_T)(PV_WIN + (int)(x)) #define OPT_BUF(x) (idopt_T)(PV_BUF + (int)(x)) #define OPT_BOTH(x) (idopt_T)(PV_BOTH + (int)(x)) /* WV_ and BV_ values get typecasted to this for the "indir" field */ typedef enum { PV_NONE = 0, PV_MAXVAL = 0xffff /* to avoid warnings for value out of range */ } idopt_T; /* * Options local to a window have a value local to a buffer and global to all * buffers. Indicate this by setting "var" to VAR_WIN. */ #define VAR_WIN ((char_u *)-1) /* * These are the global values for options which are also local to a buffer. * Only to be used in option.c! */ static int p_ai; static int p_bin; static int p_bomb; static char_u *p_bh; static char_u *p_bt; static int p_bl; static int p_ci; static int p_cin; static char_u *p_cink; static char_u *p_cino; static char_u *p_cinw; static char_u *p_com; static char_u *p_cms; static char_u *p_cpt; static char_u *p_cfu; static char_u *p_ofu; static int p_eol; static int p_fixeol; static int p_et; static char_u *p_fenc; static char_u *p_ff; static char_u *p_fo; static char_u *p_flp; static char_u *p_ft; static long p_iminsert; static long p_imsearch; static char_u *p_inex; static char_u *p_inde; static char_u *p_indk; static char_u *p_fex; static int p_inf; static char_u *p_isk; static int p_lisp; static int p_ml; static int p_ma; static int p_mod; static char_u *p_mps; static char_u *p_nf; static int p_pi; static char_u *p_qe; static int p_ro; static int p_si; static long p_sts; static char_u *p_sua; static long p_sw; static int p_swf; static long p_smc; static char_u *p_syn; static char_u *p_spc; static char_u *p_spf; static char_u *p_spl; static long p_ts; static long p_tw; static int p_udf; static long p_wm; static char_u *p_keymap; /* Saved values for when 'bin' is set. */ static int p_et_nobin; static int p_ml_nobin; static long p_tw_nobin; static long p_wm_nobin; // Saved values for when 'paste' is set. static int p_ai_nopaste; static int p_et_nopaste; static long p_sts_nopaste; static long p_tw_nopaste; static long p_wm_nopaste; typedef struct vimoption { char *fullname; /* full option name */ char *shortname; /* permissible abbreviation */ uint32_t flags; /* see below */ char_u *var; /* global option: pointer to variable; * window-local option: VAR_WIN; * buffer-local option: global value */ idopt_T indir; /* global option: PV_NONE; * local option: indirect option index */ char_u *def_val[2]; /* default values for variable (vi and vim) */ scid_T scriptID; /* script in which the option was last set */ # define SCRIPTID_INIT , 0 } vimoption_T; #define VI_DEFAULT 0 /* def_val[VI_DEFAULT] is Vi default value */ #define VIM_DEFAULT 1 /* def_val[VIM_DEFAULT] is Vim default value */ /* * Flags */ #define P_BOOL 0x01U /* the option is boolean */ #define P_NUM 0x02U /* the option is numeric */ #define P_STRING 0x04U /* the option is a string */ #define P_ALLOCED 0x08U /* the string option is in allocated memory, must use free_string_option() when assigning new value. Not set if default is the same. */ #define P_EXPAND 0x10U /* environment expansion. NOTE: P_EXPAND can never be used for local or hidden options */ #define P_NODEFAULT 0x40U /* don't set to default value */ #define P_DEF_ALLOCED 0x80U /* default value is in allocated memory, must use free() when assigning new value */ #define P_WAS_SET 0x100U /* option has been set/reset */ #define P_NO_MKRC 0x200U /* don't include in :mkvimrc output */ #define P_VI_DEF 0x400U /* Use Vi default for Vim */ #define P_VIM 0x800U /* Vim option */ /* when option changed, what to display: */ #define P_RSTAT 0x1000U /* redraw status lines */ #define P_RWIN 0x2000U /* redraw current window */ #define P_RBUF 0x4000U /* redraw current buffer */ #define P_RALL 0x6000U /* redraw all windows */ #define P_RCLR 0x7000U /* clear and redraw all */ #define P_COMMA 0x8000U ///< comma separated list #define P_ONECOMMA 0x18000U ///< P_COMMA and cannot have two consecutive ///< commas #define P_NODUP 0x20000U ///< don't allow duplicate strings #define P_FLAGLIST 0x40000U ///< list of single-char flags #define P_SECURE 0x80000U ///< cannot change in modeline or secure mode #define P_GETTEXT 0x100000U ///< expand default value with _() #define P_NOGLOB 0x200000U ///< do not use local value for global vimrc #define P_NFNAME 0x400000U ///< only normal file name chars allowed #define P_INSECURE 0x800000U ///< option was set from a modeline #define P_PRI_MKRC 0x1000000U ///< priority for :mkvimrc (setting option ///< has side effects) #define P_NO_ML 0x2000000U ///< not allowed in modeline #define P_CURSWANT 0x4000000U ///< update curswant required; not needed ///< when there is a redraw flag #define P_NO_DEF_EXP 0x8000000U ///< Do not expand default value. #define HIGHLIGHT_INIT \ "8:SpecialKey,~:EndOfBuffer,z:TermCursor,Z:TermCursorNC,@:NonText," \ "d:Directory,e:ErrorMsg,i:IncSearch,l:Search,m:MoreMsg,M:ModeMsg,n:LineNr," \ "N:CursorLineNr,r:Question,s:StatusLine,S:StatusLineNC,c:VertSplit,t:Title," \ "v:Visual,w:WarningMsg,W:WildMenu,f:Folded,F:FoldColumn," \ "A:DiffAdd,C:DiffChange,D:DiffDelete,T:DiffText,>:SignColumn,-:Conceal," \ "B:SpellBad,P:SpellCap,R:SpellRare,L:SpellLocal,+:Pmenu,=:PmenuSel," \ "x:PmenuSbar,X:PmenuThumb,*:TabLine,#:TabLineSel,_:TabLineFill," \ "!:CursorColumn,.:CursorLine,o:ColorColumn,q:QuickFixLine" /* * options[] is initialized here. * The order of the options MUST be alphabetic for ":set all" and findoption(). * All option names MUST start with a lowercase letter (for findoption()). * Exception: "t_" options are at the end. * The options with a NULL variable are 'hidden': a set command for them is * ignored and they are not printed. */ #ifdef INCLUDE_GENERATED_DECLARATIONS # include "options.generated.h" #endif #define PARAM_COUNT ARRAY_SIZE(options) static char *(p_ambw_values[]) = { "single", "double", NULL }; static char *(p_bg_values[]) = { "light", "dark", NULL }; static char *(p_nf_values[]) = { "bin", "octal", "hex", "alpha", NULL }; static char *(p_ff_values[]) = { FF_UNIX, FF_DOS, FF_MAC, NULL }; static char *(p_wop_values[]) = { "tagfile", NULL }; static char *(p_wak_values[]) = { "yes", "menu", "no", NULL }; static char *(p_mousem_values[]) = { "extend", "popup", "popup_setpos", "mac", NULL }; static char *(p_sel_values[]) = { "inclusive", "exclusive", "old", NULL }; static char *(p_slm_values[]) = { "mouse", "key", "cmd", NULL }; static char *(p_km_values[]) = { "startsel", "stopsel", NULL }; static char *(p_scbopt_values[]) = { "ver", "hor", "jump", NULL }; static char *(p_debug_values[]) = { "msg", "throw", "beep", NULL }; static char *(p_ead_values[]) = { "both", "ver", "hor", NULL }; static char *(p_buftype_values[]) = { "nofile", "nowrite", "quickfix", "help", "acwrite", "terminal", NULL }; static char *(p_bufhidden_values[]) = { "hide", "unload", "delete", "wipe", NULL }; static char *(p_bs_values[]) = { "indent", "eol", "start", NULL }; static char *(p_fdm_values[]) = { "manual", "expr", "marker", "indent", "syntax", "diff", NULL }; static char *(p_fcl_values[]) = { "all", NULL }; static char *(p_cot_values[]) = { "menu", "menuone", "longest", "preview", "noinsert", "noselect", NULL }; static char *(p_icm_values[]) = { "nosplit", "split", NULL }; #ifdef INCLUDE_GENERATED_DECLARATIONS # include "option.c.generated.h" #endif /// Append string with escaped commas static char *strcpy_comma_escaped(char *dest, const char *src, const size_t len) FUNC_ATTR_NONNULL_ALL FUNC_ATTR_WARN_UNUSED_RESULT { size_t shift = 0; for (size_t i = 0; i < len; i++) { if (src[i] == ',') { dest[i + shift++] = '\\'; } dest[i + shift] = src[i]; } return &dest[len + shift]; } /// Compute length of a colon-separated value, doubled and with some suffixes /// /// @param[in] val Colon-separated array value. /// @param[in] common_suf_len Length of the common suffix which is appended to /// each item in the array, twice. /// @param[in] single_suf_len Length of the suffix which is appended to each /// item in the array once. /// /// @return Length of the comma-separated string array that contains each item /// in the original array twice with suffixes with given length /// (common_suf is present after each new item, single_suf is present /// after half of the new items) and with commas after each item, commas /// inside the values are escaped. static inline size_t compute_double_colon_len(const char *const val, const size_t common_suf_len, const size_t single_suf_len) FUNC_ATTR_WARN_UNUSED_RESULT FUNC_ATTR_PURE { if (val == NULL || *val == NUL) { return 0; } size_t ret = 0; const void *iter = NULL; do { size_t dir_len; const char *dir; iter = vim_colon_env_iter(val, iter, &dir, &dir_len); if (dir != NULL && dir_len > 0) { ret += ((dir_len + memcnt(dir, ',', dir_len) + common_suf_len + !after_pathsep(dir, dir + dir_len)) * 2 + single_suf_len); } } while (iter != NULL); return ret; } #define NVIM_SIZE (sizeof("nvim") - 1) /// Add directories to a comma-separated array from a colon-separated one /// /// Commas are escaped in process. To each item PATHSEP "nvim" is appended in /// addition to suf1 and suf2. /// /// @param[in,out] dest Destination comma-separated array. /// @param[in] val Source colon-separated array. /// @param[in] suf1 If not NULL, suffix appended to destination. Prior to it /// directory separator is appended. Suffix must not contain /// commas. /// @param[in] len1 Length of the suf1. /// @param[in] suf2 If not NULL, another suffix appended to destination. Again /// with directory separator behind. Suffix must not contain /// commas. /// @param[in] len2 Length of the suf2. /// @param[in] forward If true, iterate over val in forward direction. /// Otherwise in reverse. /// /// @return (dest + appended_characters_length) static inline char *add_colon_dirs(char *dest, const char *const val, const char *const suf1, const size_t len1, const char *const suf2, const size_t len2, const bool forward) FUNC_ATTR_WARN_UNUSED_RESULT FUNC_ATTR_NONNULL_RET FUNC_ATTR_NONNULL_ARG(1) { if (val == NULL || *val == NUL) { return dest; } const void *iter = NULL; do { size_t dir_len; const char *dir; iter = (forward ? vim_colon_env_iter : vim_colon_env_iter_rev)( val, iter, &dir, &dir_len); if (dir != NULL && dir_len > 0) { dest = strcpy_comma_escaped(dest, dir, dir_len); if (!after_pathsep(dest - 1, dest)) { *dest++ = PATHSEP; } memmove(dest, "nvim", NVIM_SIZE); dest += NVIM_SIZE; if (suf1 != NULL) { *dest++ = PATHSEP; memmove(dest, suf1, len1); dest += len1; if (suf2 != NULL) { *dest++ = PATHSEP; memmove(dest, suf2, len2); dest += len2; } } *dest++ = ','; } } while (iter != NULL); return dest; } /// Add directory to a comma-separated list of directories /// /// In the added directory comma is escaped. /// /// @param[in,out] dest Destination comma-separated array. /// @param[in] dir Directory to append. /// @param[in] append_nvim If true, append "nvim" as the very first suffix. /// @param[in] suf1 If not NULL, suffix appended to destination. Prior to it /// directory separator is appended. Suffix must not contain /// commas. /// @param[in] len1 Length of the suf1. /// @param[in] suf2 If not NULL, another suffix appended to destination. Again /// with directory separator behind. Suffix must not contain /// commas. /// @param[in] len2 Length of the suf2. /// @param[in] forward If true, iterate over val in forward direction. /// Otherwise in reverse. /// /// @return (dest + appended_characters_length) static inline char *add_dir(char *dest, const char *const dir, const size_t dir_len, const bool append_nvim, const char *const suf1, const size_t len1, const char *const suf2, const size_t len2) FUNC_ATTR_NONNULL_RET FUNC_ATTR_NONNULL_ARG(1) FUNC_ATTR_WARN_UNUSED_RESULT { if (dir == NULL || dir_len == 0) { return dest; } dest = strcpy_comma_escaped(dest, dir, dir_len); if (append_nvim) { if (!after_pathsep(dest - 1, dest)) { *dest++ = PATHSEP; } memmove(dest, "nvim", NVIM_SIZE); dest += NVIM_SIZE; if (suf1 != NULL) { *dest++ = PATHSEP; memmove(dest, suf1, len1); dest += len1; if (suf2 != NULL) { *dest++ = PATHSEP; memmove(dest, suf2, len2); dest += len2; } } } *dest++ = ','; return dest; } /// Set &runtimepath to default value static void set_runtimepath_default(void) { size_t rtp_size = 0; char *const data_home = stdpaths_get_xdg_var(kXDGDataHome); char *const config_home = stdpaths_get_xdg_var(kXDGConfigHome); char *const vimruntime = vim_getenv("VIMRUNTIME"); char *const data_dirs = stdpaths_get_xdg_var(kXDGDataDirs); char *const config_dirs = stdpaths_get_xdg_var(kXDGConfigDirs); #define SITE_SIZE (sizeof("site") - 1) #define AFTER_SIZE (sizeof("after") - 1) size_t data_len = 0; size_t config_len = 0; size_t vimruntime_len = 0; if (data_home != NULL) { data_len = strlen(data_home); if (data_len != 0) { rtp_size += ((data_len + memcnt(data_home, ',', data_len) + NVIM_SIZE + 1 + SITE_SIZE + 1 + !after_pathsep(data_home, data_home + data_len)) * 2 + AFTER_SIZE + 1); } } if (config_home != NULL) { config_len = strlen(config_home); if (config_len != 0) { rtp_size += ((config_len + memcnt(config_home, ',', config_len) + NVIM_SIZE + 1 + !after_pathsep(config_home, config_home + config_len)) * 2 + AFTER_SIZE + 1); } } if (vimruntime != NULL) { vimruntime_len = strlen(vimruntime); if (vimruntime_len != 0) { rtp_size += vimruntime_len + memcnt(vimruntime, ',', vimruntime_len) + 1; } } rtp_size += compute_double_colon_len(data_dirs, NVIM_SIZE + 1 + SITE_SIZE + 1, AFTER_SIZE + 1); rtp_size += compute_double_colon_len(config_dirs, NVIM_SIZE + 1, AFTER_SIZE + 1); if (rtp_size == 0) { return; } char *const rtp = xmalloc(rtp_size); char *rtp_cur = rtp; rtp_cur = add_dir(rtp_cur, config_home, config_len, true, NULL, 0, NULL, 0); rtp_cur = add_colon_dirs(rtp_cur, config_dirs, NULL, 0, NULL, 0, true); rtp_cur = add_dir(rtp_cur, data_home, data_len, true, "site", SITE_SIZE, NULL, 0); rtp_cur = add_colon_dirs(rtp_cur, data_dirs, "site", SITE_SIZE, NULL, 0, true); rtp_cur = add_dir(rtp_cur, vimruntime, vimruntime_len, false, NULL, 0, NULL, 0); rtp_cur = add_colon_dirs(rtp_cur, data_dirs, "site", SITE_SIZE, "after", AFTER_SIZE, false); rtp_cur = add_dir(rtp_cur, data_home, data_len, true, "site", SITE_SIZE, "after", AFTER_SIZE); rtp_cur = add_colon_dirs(rtp_cur, config_dirs, "after", AFTER_SIZE, NULL, 0, false); rtp_cur = add_dir(rtp_cur, config_home, config_len, true, "after", AFTER_SIZE, NULL, 0); // Strip trailing comma. rtp_cur[-1] = NUL; assert((size_t) (rtp_cur - rtp) == rtp_size); #undef SITE_SIZE #undef AFTER_SIZE set_string_default("runtimepath", rtp, true); // Make a copy of 'rtp' for 'packpath' set_string_default("packpath", rtp, false); xfree(data_dirs); xfree(config_dirs); xfree(data_home); xfree(config_home); xfree(vimruntime); } #undef NVIM_SIZE /* * Initialize the options, first part. * * Called only once from main(), just after creating the first buffer. */ void set_init_1(void) { int opt_idx; langmap_init(); /* Be nocompatible */ p_cp = FALSE; /* * Find default value for 'shell' option. * Don't use it if it is empty. */ { const char *shell = os_getenv("SHELL"); if (shell != NULL) { set_string_default("sh", (char *) shell, false); } } /* * Set the default for 'backupskip' to include environment variables for * temp files. */ { # ifdef UNIX static char *(names[4]) = {"", "TMPDIR", "TEMP", "TMP"}; # else static char *(names[3]) = {"TMPDIR", "TEMP", "TMP"}; # endif int len; garray_T ga; ga_init(&ga, 1, 100); for (size_t n = 0; n < ARRAY_SIZE(names); ++n) { bool mustfree = true; char *p; # ifdef UNIX if (*names[n] == NUL) { p = "/tmp"; mustfree = false; } else # endif p = vim_getenv(names[n]); if (p != NULL && *p != NUL) { // First time count the NUL, otherwise count the ','. len = (int)strlen(p) + 3; ga_grow(&ga, len); if (!GA_EMPTY(&ga)) STRCAT(ga.ga_data, ","); STRCAT(ga.ga_data, p); add_pathsep(ga.ga_data); STRCAT(ga.ga_data, "*"); ga.ga_len += len; } if(mustfree) { xfree(p); } } if (ga.ga_data != NULL) { set_string_default("bsk", ga.ga_data, true); } } /* * 'maxmemtot' and 'maxmem' may have to be adjusted for available memory */ opt_idx = findoption((char_u *)"maxmemtot"); if (opt_idx >= 0) { { /* Use half of amount of memory available to Vim. */ /* If too much to fit in uintptr_t, get uintptr_t max */ uint64_t available_kib = os_get_total_mem_kib(); uintptr_t n = available_kib / 2 > UINTPTR_MAX ? UINTPTR_MAX : (uintptr_t)(available_kib /2); options[opt_idx].def_val[VI_DEFAULT] = (char_u *)n; opt_idx = findoption((char_u *)"maxmem"); if (opt_idx >= 0) { options[opt_idx].def_val[VI_DEFAULT] = (char_u *)n; } } } { char_u *cdpath; char_u *buf; int i; int j; /* Initialize the 'cdpath' option's default value. */ cdpath = (char_u *)vim_getenv("CDPATH"); if (cdpath != NULL) { buf = xmalloc(2 * STRLEN(cdpath) + 2); { buf[0] = ','; /* start with ",", current dir first */ j = 1; for (i = 0; cdpath[i] != NUL; ++i) { if (vim_ispathlistsep(cdpath[i])) buf[j++] = ','; else { if (cdpath[i] == ' ' || cdpath[i] == ',') buf[j++] = '\\'; buf[j++] = cdpath[i]; } } buf[j] = NUL; opt_idx = findoption((char_u *)"cdpath"); if (opt_idx >= 0) { options[opt_idx].def_val[VI_DEFAULT] = buf; options[opt_idx].flags |= P_DEF_ALLOCED; } else xfree(buf); /* cannot happen */ } xfree(cdpath); } } #if defined(MSWIN) || defined(MAC) /* Set print encoding on platforms that don't default to latin1 */ set_string_default("printencoding", "hp-roman8", false); #endif // 'printexpr' must be allocated to be able to evaluate it. set_string_default("printexpr", #ifdef UNIX "system(['lpr'] " "+ (empty(&printdevice)?[]:['-P', &printdevice]) " "+ [v:fname_in])" ". delete(v:fname_in)" "+ v:shell_error", #elif defined(MSWIN) "system(['copy', v:fname_in, " "empty(&printdevice)?'LPT1':&printdevice])" ". delete(v:fname_in)", #else "", #endif false); char *backupdir = stdpaths_user_data_subpath("backup", 0, true); const size_t backupdir_len = strlen(backupdir); backupdir = xrealloc(backupdir, backupdir_len + 3); memmove(backupdir + 2, backupdir, backupdir_len + 1); memmove(backupdir, ".,", 2); set_string_default("viewdir", stdpaths_user_data_subpath("view", 0, true), true); set_string_default("backupdir", backupdir, true); set_string_default("directory", stdpaths_user_data_subpath("swap", 2, true), true); set_string_default("undodir", stdpaths_user_data_subpath("undo", 0, true), true); // Set default for &runtimepath. All necessary expansions are performed in // this function. set_runtimepath_default(); /* * Set all the options (except the terminal options) to their default * value. Also set the global value for local options. */ set_options_default(0); curbuf->b_p_initialized = true; curbuf->b_p_ar = -1; /* no local 'autoread' value */ curbuf->b_p_ul = NO_LOCAL_UNDOLEVEL; check_buf_options(curbuf); check_win_options(curwin); check_options(); /* Set all options to their Vim default */ set_options_default(OPT_FREE); // set 'laststatus' last_status(false); /* Must be before option_expand(), because that one needs vim_isIDc() */ didset_options(); // Use the current chartab for the generic chartab. This is not in // didset_options() because it only depends on 'encoding'. init_spell_chartab(); /* * Expand environment variables and things like "~" for the defaults. * If option_expand() returns non-NULL the variable is expanded. This can * only happen for non-indirect options. * Also set the default to the expanded value, so ":set" does not list * them. * Don't set the P_ALLOCED flag, because we don't want to free the * default. */ for (opt_idx = 0; options[opt_idx].fullname; opt_idx++) { if (options[opt_idx].flags & P_NO_DEF_EXP) { continue; } char *p; if ((options[opt_idx].flags & P_GETTEXT) && options[opt_idx].var != NULL) { p = _(*(char **)options[opt_idx].var); } else { p = (char *) option_expand(opt_idx, NULL); } if (p != NULL) { p = xstrdup(p); *(char **)options[opt_idx].var = p; /* VIMEXP * Defaults for all expanded options are currently the same for Vi * and Vim. When this changes, add some code here! Also need to * split P_DEF_ALLOCED in two. */ if (options[opt_idx].flags & P_DEF_ALLOCED) xfree(options[opt_idx].def_val[VI_DEFAULT]); options[opt_idx].def_val[VI_DEFAULT] = (char_u *) p; options[opt_idx].flags |= P_DEF_ALLOCED; } } save_file_ff(curbuf); /* Buffer is unchanged */ /* Detect use of mlterm. * Mlterm is a terminal emulator akin to xterm that has some special * abilities (bidi namely). * NOTE: mlterm's author is being asked to 'set' a variable * instead of an environment variable due to inheritance. */ if (os_env_exists("MLTERM")) set_option_value((char_u *)"tbidi", 1L, NULL, 0); didset_options2(); // enc_locale() will try to find the encoding of the current locale. // This will be used when 'default' is used as encoding specifier // in 'fileencodings' char_u *p = enc_locale(); if (p == NULL) { // use utf-8 as 'default' if locale encoding can't be detected. p = vim_strsave((char_u *)"utf-8"); } fenc_default = p; #ifdef HAVE_WORKING_LIBINTL // GNU gettext 0.10.37 supports this feature: set the codeset used for // translated messages independently from the current locale. (void)bind_textdomain_codeset(PROJECT_NAME, (char *)p_enc); #endif /* Set the default for 'helplang'. */ set_helplang_default(get_mess_lang()); } /* * Set an option to its default value. * This does not take care of side effects! */ static void set_option_default ( int opt_idx, int opt_flags, /* OPT_FREE, OPT_LOCAL and/or OPT_GLOBAL */ int compatible /* use Vi default value */ ) { char_u *varp; /* pointer to variable for current option */ int dvi; /* index in def_val[] */ int both = (opt_flags & (OPT_LOCAL | OPT_GLOBAL)) == 0; varp = get_varp_scope(&(options[opt_idx]), both ? OPT_LOCAL : opt_flags); uint32_t flags = options[opt_idx].flags; if (varp != NULL) { /* skip hidden option, nothing to do for it */ dvi = ((flags & P_VI_DEF) || compatible) ? VI_DEFAULT : VIM_DEFAULT; if (flags & P_STRING) { /* Use set_string_option_direct() for local options to handle * freeing and allocating the value. */ if (options[opt_idx].indir != PV_NONE) set_string_option_direct(NULL, opt_idx, options[opt_idx].def_val[dvi], opt_flags, 0); else { if ((opt_flags & OPT_FREE) && (flags & P_ALLOCED)) free_string_option(*(char_u **)(varp)); *(char_u **)varp = options[opt_idx].def_val[dvi]; options[opt_idx].flags &= ~P_ALLOCED; } } else if (flags & P_NUM) { if (options[opt_idx].indir == PV_SCROLL) win_comp_scroll(curwin); else { *(long *)varp = (long)options[opt_idx].def_val[dvi]; /* May also set global value for local option. */ if (both) *(long *)get_varp_scope(&(options[opt_idx]), OPT_GLOBAL) = *(long *)varp; } } else { /* P_BOOL */ *(int *)varp = (int)(intptr_t)options[opt_idx].def_val[dvi]; #ifdef UNIX /* 'modeline' defaults to off for root */ if (options[opt_idx].indir == PV_ML && getuid() == ROOT_UID) *(int *)varp = FALSE; #endif /* May also set global value for local option. */ if (both) *(int *)get_varp_scope(&(options[opt_idx]), OPT_GLOBAL) = *(int *)varp; } /* The default value is not insecure. */ uint32_t *flagsp = insecure_flag(opt_idx, opt_flags); *flagsp = *flagsp & ~P_INSECURE; } set_option_scriptID_idx(opt_idx, opt_flags, current_SID); } /* * Set all options (except terminal options) to their default value. */ static void set_options_default ( int opt_flags /* OPT_FREE, OPT_LOCAL and/or OPT_GLOBAL */ ) { for (int i = 0; options[i].fullname; i++) { if (!(options[i].flags & P_NODEFAULT)) { set_option_default(i, opt_flags, p_cp); } } /* The 'scroll' option must be computed for all windows. */ FOR_ALL_TAB_WINDOWS(tp, wp) { win_comp_scroll(wp); } parse_cino(curbuf); } /// Set the Vi-default value of a string option. /// Used for 'sh', 'backupskip' and 'term'. /// /// @param name The name of the option /// @param val The value of the option /// @param allocated If true, do not copy default as it was already allocated. static void set_string_default(const char *name, char *val, bool allocated) FUNC_ATTR_NONNULL_ALL { int opt_idx = findoption((char_u *)name); if (opt_idx >= 0) { if (options[opt_idx].flags & P_DEF_ALLOCED) { xfree(options[opt_idx].def_val[VI_DEFAULT]); } options[opt_idx].def_val[VI_DEFAULT] = (char_u *) ( allocated ? (char_u *) val : (char_u *) xstrdup(val)); options[opt_idx].flags |= P_DEF_ALLOCED; } } /* * Set the Vi-default value of a number option. * Used for 'lines' and 'columns'. */ void set_number_default(char *name, long val) { int opt_idx; opt_idx = findoption((char_u *)name); if (opt_idx >= 0) options[opt_idx].def_val[VI_DEFAULT] = (char_u *)val; } #if defined(EXITFREE) /* * Free all options. */ void free_all_options(void) { int i; for (i = 0; options[i].fullname; i++) { if (options[i].indir == PV_NONE) { /* global option: free value and default value. */ if (options[i].flags & P_ALLOCED && options[i].var != NULL) free_string_option(*(char_u **)options[i].var); if (options[i].flags & P_DEF_ALLOCED) free_string_option(options[i].def_val[VI_DEFAULT]); } else if (options[i].var != VAR_WIN && (options[i].flags & P_STRING)) /* buffer-local option: free global value */ free_string_option(*(char_u **)options[i].var); } } #endif /* * Initialize the options, part two: After getting Rows and Columns and * setting 'term'. */ void set_init_2(void) { int idx; /* * 'scroll' defaults to half the window height. Note that this default is * wrong when the window height changes. */ set_number_default("scroll", Rows / 2); idx = findoption((char_u *)"scroll"); if (idx >= 0 && !(options[idx].flags & P_WAS_SET)) set_option_default(idx, OPT_LOCAL, p_cp); comp_col(); /* * 'window' is only for backwards compatibility with Vi. * Default is Rows - 1. */ if (!option_was_set((char_u *)"window")) p_window = Rows - 1; set_number_default("window", Rows - 1); parse_shape_opt(SHAPE_CURSOR); /* set cursor shapes from 'guicursor' */ (void)parse_printoptions(); /* parse 'printoptions' default value */ } /* * Initialize the options, part three: After reading the .vimrc */ void set_init_3(void) { // Set 'shellpipe' and 'shellredir', depending on the 'shell' option. // This is done after other initializations, where 'shell' might have been // set, but only if they have not been set before. int idx_srr; int do_srr; int idx_sp; int do_sp; idx_srr = findoption((char_u *)"srr"); if (idx_srr < 0) do_srr = FALSE; else do_srr = !(options[idx_srr].flags & P_WAS_SET); idx_sp = findoption((char_u *)"sp"); if (idx_sp < 0) do_sp = FALSE; else do_sp = !(options[idx_sp].flags & P_WAS_SET); size_t len = 0; char_u *p = (char_u *)invocation_path_tail(p_sh, &len); p = vim_strnsave(p, len); { /* * Default for p_sp is "| tee", for p_srr is ">". * For known shells it is changed here to include stderr. */ if ( fnamecmp(p, "csh") == 0 || fnamecmp(p, "tcsh") == 0 ) { if (do_sp) { p_sp = (char_u *)"|& tee"; options[idx_sp].def_val[VI_DEFAULT] = p_sp; } if (do_srr) { p_srr = (char_u *)">&"; options[idx_srr].def_val[VI_DEFAULT] = p_srr; } } else if ( fnamecmp(p, "sh") == 0 || fnamecmp(p, "ksh") == 0 || fnamecmp(p, "mksh") == 0 || fnamecmp(p, "pdksh") == 0 || fnamecmp(p, "zsh") == 0 || fnamecmp(p, "zsh-beta") == 0 || fnamecmp(p, "bash") == 0 || fnamecmp(p, "fish") == 0 ) { if (do_sp) { p_sp = (char_u *)"2>&1| tee"; options[idx_sp].def_val[VI_DEFAULT] = p_sp; } if (do_srr) { p_srr = (char_u *)">%s 2>&1"; options[idx_srr].def_val[VI_DEFAULT] = p_srr; } } xfree(p); } if (bufempty()) { int idx_ffs = findoption((char_u *)"ffs"); // Apply the first entry of 'fileformats' to the initial buffer. if (idx_ffs >= 0 && (options[idx_ffs].flags & P_WAS_SET)) { set_fileformat(default_fileformat(), OPT_LOCAL); } } set_title_defaults(); } /* * When 'helplang' is still at its default value, set it to "lang". * Only the first two characters of "lang" are used. */ void set_helplang_default(const char *lang) { int idx; if (lang == NULL || STRLEN(lang) < 2) /* safety check */ return; idx = findoption((char_u *)"hlg"); if (idx >= 0 && !(options[idx].flags & P_WAS_SET)) { if (options[idx].flags & P_ALLOCED) free_string_option(p_hlg); p_hlg = (char_u *)xstrdup(lang); /* zh_CN becomes "cn", zh_TW becomes "tw". */ if (STRNICMP(p_hlg, "zh_", 3) == 0 && STRLEN(p_hlg) >= 5) { p_hlg[0] = (char_u)TOLOWER_ASC(p_hlg[3]); p_hlg[1] = (char_u)TOLOWER_ASC(p_hlg[4]); } p_hlg[2] = NUL; options[idx].flags |= P_ALLOCED; } } /* * 'title' and 'icon' only default to true if they have not been set or reset * in .vimrc and we can read the old value. * When 'title' and 'icon' have been reset in .vimrc, we won't even check if * they can be reset. This reduces startup time when using X on a remote * machine. */ void set_title_defaults(void) { int idx1; /* * If GUI is (going to be) used, we can always set the window title and * icon name. Saves a bit of time, because the X11 display server does * not need to be contacted. */ idx1 = findoption((char_u *)"title"); if (idx1 >= 0 && !(options[idx1].flags & P_WAS_SET)) { options[idx1].def_val[VI_DEFAULT] = (char_u *)(intptr_t)0; p_title = 0; } idx1 = findoption((char_u *)"icon"); if (idx1 >= 0 && !(options[idx1].flags & P_WAS_SET)) { options[idx1].def_val[VI_DEFAULT] = (char_u *)(intptr_t)0; p_icon = 0; } } /* * Parse 'arg' for option settings. * * 'arg' may be IObuff, but only when no errors can be present and option * does not need to be expanded with option_expand(). * "opt_flags": * 0 for ":set" * OPT_GLOBAL for ":setglobal" * OPT_LOCAL for ":setlocal" and a modeline * OPT_MODELINE for a modeline * OPT_WINONLY to only set window-local options * OPT_NOWIN to skip setting window-local options * * returns FAIL if an error is detected, OK otherwise */ int do_set ( char_u *arg, /* option string (may be written to!) */ int opt_flags ) { int opt_idx; char_u *errmsg; char_u errbuf[80]; char_u *startarg; int prefix; /* 1: nothing, 0: "no", 2: "inv" in front of name */ char_u nextchar; /* next non-white char after option name */ int afterchar; /* character just after option name */ int len; int i; long value; int key; uint32_t flags; /* flags for current option */ char_u *varp = NULL; /* pointer to variable for current option */ int did_show = FALSE; /* already showed one value */ int adding; /* "opt+=arg" */ int prepending; /* "opt^=arg" */ int removing; /* "opt-=arg" */ int cp_val = 0; if (*arg == NUL) { showoptions(0, opt_flags); did_show = TRUE; goto theend; } while (*arg != NUL) { /* loop to process all options */ errmsg = NULL; startarg = arg; /* remember for error message */ if (STRNCMP(arg, "all", 3) == 0 && !isalpha(arg[3]) && !(opt_flags & OPT_MODELINE)) { /* * ":set all" show all options. * ":set all&" set all options to their default value. */ arg += 3; if (*arg == '&') { arg++; // Only for :set command set global value of local options. set_options_default(OPT_FREE | opt_flags); didset_options(); didset_options2(); redraw_all_later(CLEAR); } else { showoptions(1, opt_flags); did_show = TRUE; } } else if (STRNCMP(arg, "termcap", 7) == 0 && !(opt_flags & OPT_MODELINE)) { did_show = TRUE; arg += 7; } else { prefix = 1; if (STRNCMP(arg, "no", 2) == 0) { prefix = 0; arg += 2; } else if (STRNCMP(arg, "inv", 3) == 0) { prefix = 2; arg += 3; } /* find end of name */ key = 0; if (*arg == '<') { opt_idx = -1; /* look out for <t_>;> */ if (arg[1] == 't' && arg[2] == '_' && arg[3] && arg[4]) len = 5; else { len = 1; while (arg[len] != NUL && arg[len] != '>') ++len; } if (arg[len] != '>') { errmsg = e_invarg; goto skip; } if (arg[1] == 't' && arg[2] == '_') { // could be term code opt_idx = findoption_len(arg + 1, (size_t) (len - 1)); } len++; if (opt_idx == -1) { key = find_key_option(arg + 1); } } else { len = 0; // The two characters after "t_" may not be alphanumeric. if (arg[0] == 't' && arg[1] == '_' && arg[2] && arg[3]) { len = 4; } else { while (ASCII_ISALNUM(arg[len]) || arg[len] == '_') { len++; } } opt_idx = findoption_len(arg, (size_t) len); if (opt_idx == -1) { key = find_key_option(arg); } } /* remember character after option name */ afterchar = arg[len]; /* skip white space, allow ":set ai ?" */ while (ascii_iswhite(arg[len])) ++len; adding = FALSE; prepending = FALSE; removing = FALSE; if (arg[len] != NUL && arg[len + 1] == '=') { if (arg[len] == '+') { adding = TRUE; /* "+=" */ ++len; } else if (arg[len] == '^') { prepending = TRUE; /* "^=" */ ++len; } else if (arg[len] == '-') { removing = TRUE; /* "-=" */ ++len; } } nextchar = arg[len]; if (opt_idx == -1 && key == 0) { /* found a mismatch: skip */ errmsg = (char_u *)N_("E518: Unknown option"); goto skip; } if (opt_idx >= 0) { if (options[opt_idx].var == NULL) { /* hidden option: skip */ /* Only give an error message when requesting the value of * a hidden option, ignore setting it. */ if (vim_strchr((char_u *)"=:!&<", nextchar) == NULL && (!(options[opt_idx].flags & P_BOOL) || nextchar == '?')) errmsg = (char_u *)_(e_unsupportedoption); goto skip; } flags = options[opt_idx].flags; varp = get_varp_scope(&(options[opt_idx]), opt_flags); } else { flags = P_STRING; } /* Skip all options that are not window-local (used when showing * an already loaded buffer in a window). */ if ((opt_flags & OPT_WINONLY) && (opt_idx < 0 || options[opt_idx].var != VAR_WIN)) goto skip; /* Skip all options that are window-local (used for :vimgrep). */ if ((opt_flags & OPT_NOWIN) && opt_idx >= 0 && options[opt_idx].var == VAR_WIN) goto skip; /* Disallow changing some options from modelines. */ if (opt_flags & OPT_MODELINE) { if (flags & (P_SECURE | P_NO_ML)) { errmsg = (char_u *)_("E520: Not allowed in a modeline"); goto skip; } /* In diff mode some options are overruled. This avoids that * 'foldmethod' becomes "marker" instead of "diff" and that * "wrap" gets set. */ if (curwin->w_p_diff && opt_idx >= 0 /* shut up coverity warning */ && (options[opt_idx].indir == PV_FDM || options[opt_idx].indir == PV_WRAP)) goto skip; } /* Disallow changing some options in the sandbox */ if (sandbox != 0 && (flags & P_SECURE)) { errmsg = (char_u *)_(e_sandbox); goto skip; } if (vim_strchr((char_u *)"?=:!&<", nextchar) != NULL) { arg += len; cp_val = p_cp; if (nextchar == '&' && arg[1] == 'v' && arg[2] == 'i') { if (arg[3] == 'm') { /* "opt&vim": set to Vim default */ cp_val = FALSE; arg += 3; } else { /* "opt&vi": set to Vi default */ cp_val = TRUE; arg += 2; } } if (vim_strchr((char_u *)"?!&<", nextchar) != NULL && arg[1] != NUL && !ascii_iswhite(arg[1])) { errmsg = e_trailing; goto skip; } } /* * allow '=' and ':' as MSDOS command.com allows only one * '=' character per "set" command line. grrr. (jw) */ if (nextchar == '?' || (prefix == 1 && vim_strchr((char_u *)"=:&<", nextchar) == NULL && !(flags & P_BOOL))) { /* * print value */ if (did_show) msg_putchar('\n'); /* cursor below last one */ else { gotocmdline(TRUE); /* cursor at status line */ did_show = TRUE; /* remember that we did a line */ } if (opt_idx >= 0) { showoneopt(&options[opt_idx], opt_flags); if (p_verbose > 0) { /* Mention where the option was last set. */ if (varp == options[opt_idx].var) last_set_msg(options[opt_idx].scriptID); else if ((int)options[opt_idx].indir & PV_WIN) last_set_msg(curwin->w_p_scriptID[ (int)options[opt_idx].indir & PV_MASK]); else if ((int)options[opt_idx].indir & PV_BUF) last_set_msg(curbuf->b_p_scriptID[ (int)options[opt_idx].indir & PV_MASK]); } } else { errmsg = (char_u *)N_("E846: Key code not set"); goto skip; } if (nextchar != '?' && nextchar != NUL && !ascii_iswhite(afterchar)) errmsg = e_trailing; } else { if (flags & P_BOOL) { /* boolean */ if (nextchar == '=' || nextchar == ':') { errmsg = e_invarg; goto skip; } /* * ":set opt!": invert * ":set opt&": reset to default value * ":set opt<": reset to global value */ if (nextchar == '!') value = *(int *)(varp) ^ 1; else if (nextchar == '&') value = (int)(intptr_t)options[opt_idx].def_val[ ((flags & P_VI_DEF) || cp_val) ? VI_DEFAULT : VIM_DEFAULT]; else if (nextchar == '<') { /* For 'autoread' -1 means to use global value. */ if ((int *)varp == &curbuf->b_p_ar && opt_flags == OPT_LOCAL) value = -1; else value = *(int *)get_varp_scope(&(options[opt_idx]), OPT_GLOBAL); } else { /* * ":set invopt": invert * ":set opt" or ":set noopt": set or reset */ if (nextchar != NUL && !ascii_iswhite(afterchar)) { errmsg = e_trailing; goto skip; } if (prefix == 2) /* inv */ value = *(int *)(varp) ^ 1; else value = prefix; } errmsg = set_bool_option(opt_idx, varp, (int)value, opt_flags); } else { /* numeric or string */ if (vim_strchr((char_u *)"=:&<", nextchar) == NULL || prefix != 1) { errmsg = e_invarg; goto skip; } if (flags & P_NUM) { /* numeric */ /* * Different ways to set a number option: * & set to default value * < set to global value * <xx> accept special key codes for 'wildchar' * c accept any non-digit for 'wildchar' * [-]0-9 set number * other error */ ++arg; if (nextchar == '&') value = (long)options[opt_idx].def_val[ ((flags & P_VI_DEF) || cp_val) ? VI_DEFAULT : VIM_DEFAULT]; else if (nextchar == '<') { /* For 'undolevels' NO_LOCAL_UNDOLEVEL means to * use the global value. */ if ((long *)varp == &curbuf->b_p_ul && opt_flags == OPT_LOCAL) value = NO_LOCAL_UNDOLEVEL; else value = *(long *)get_varp_scope( &(options[opt_idx]), OPT_GLOBAL); } else if (((long *)varp == &p_wc || (long *)varp == &p_wcm) && (*arg == '<' || *arg == '^' || ((!arg[1] || ascii_iswhite(arg[1])) && !ascii_isdigit(*arg)))) { value = string_to_key(arg); if (value == 0 && (long *)varp != &p_wcm) { errmsg = e_invarg; goto skip; } } else if (*arg == '-' || ascii_isdigit(*arg)) { // Allow negative (for 'undolevels'), octal and // hex numbers. vim_str2nr(arg, NULL, &i, STR2NR_ALL, &value, NULL, 0); if (arg[i] != NUL && !ascii_iswhite(arg[i])) { errmsg = e_invarg; goto skip; } } else { errmsg = (char_u *)N_("E521: Number required after ="); goto skip; } if (adding) value = *(long *)varp + value; if (prepending) value = *(long *)varp * value; if (removing) value = *(long *)varp - value; errmsg = set_num_option(opt_idx, varp, value, errbuf, sizeof(errbuf), opt_flags); } else if (opt_idx >= 0) { /* string */ char_u *save_arg = NULL; char_u *s = NULL; char_u *oldval = NULL; // previous value if *varp char_u *newval; char_u *origval = NULL; char *saved_origval = NULL; unsigned newlen; int comma; int bs; int new_value_alloced; /* new string option was allocated */ /* When using ":set opt=val" for a global option * with a local value the local value will be * reset, use the global value here. */ if ((opt_flags & (OPT_LOCAL | OPT_GLOBAL)) == 0 && ((int)options[opt_idx].indir & PV_BOTH)) varp = options[opt_idx].var; /* The old value is kept until we are sure that the * new value is valid. */ oldval = *(char_u **)varp; if (nextchar == '&') { /* set to default val */ newval = options[opt_idx].def_val[ ((flags & P_VI_DEF) || cp_val) ? VI_DEFAULT : VIM_DEFAULT]; /* expand environment variables and ~ (since the * default value was already expanded, only * required when an environment variable was set * later */ new_value_alloced = true; if (newval == NULL) { newval = empty_option; } else if (!(options[opt_idx].flags | P_NO_DEF_EXP)) { s = option_expand(opt_idx, newval); if (s == NULL) { s = newval; } newval = vim_strsave(s); } else { newval = (char_u *)xstrdup((char *)newval); } } else if (nextchar == '<') { // set to global val newval = vim_strsave(*(char_u **)get_varp_scope( &(options[opt_idx]), OPT_GLOBAL)); new_value_alloced = TRUE; } else { ++arg; /* jump to after the '=' or ':' */ /* * Set 'keywordprg' to ":help" if an empty * value was passed to :set by the user. * Misuse errbuf[] for the resulting string. */ if (varp == (char_u *)&p_kp && (*arg == NUL || *arg == ' ')) { STRCPY(errbuf, ":help"); save_arg = arg; arg = errbuf; } /* * Convert 'backspace' number to string, for * adding, prepending and removing string. */ else if (varp == (char_u *)&p_bs && ascii_isdigit(**(char_u **)varp)) { i = getdigits_int((char_u **)varp); switch (i) { case 0: *(char_u **)varp = empty_option; break; case 1: *(char_u **)varp = vim_strsave( (char_u *)"indent,eol"); break; case 2: *(char_u **)varp = vim_strsave( (char_u *)"indent,eol,start"); break; } xfree(oldval); oldval = *(char_u **)varp; } /* * Convert 'whichwrap' number to string, for * backwards compatibility with Vim 3.0. * Misuse errbuf[] for the resulting string. */ else if (varp == (char_u *)&p_ww && ascii_isdigit(*arg)) { *errbuf = NUL; i = getdigits_int(&arg); if (i & 1) STRCAT(errbuf, "b,"); if (i & 2) STRCAT(errbuf, "s,"); if (i & 4) STRCAT(errbuf, "h,l,"); if (i & 8) STRCAT(errbuf, "<,>,"); if (i & 16) STRCAT(errbuf, "[,],"); if (*errbuf != NUL) /* remove trailing , */ errbuf[STRLEN(errbuf) - 1] = NUL; save_arg = arg; arg = errbuf; } /* * Remove '>' before 'dir' and 'bdir', for * backwards compatibility with version 3.0 */ else if ( *arg == '>' && (varp == (char_u *)&p_dir || varp == (char_u *)&p_bdir)) { ++arg; } /* When setting the local value of a global * option, the old value may be the global value. */ if (((int)options[opt_idx].indir & PV_BOTH) && (opt_flags & OPT_LOCAL)) origval = *(char_u **)get_varp( &options[opt_idx]); else origval = oldval; /* * Copy the new string into allocated memory. * Can't use set_string_option_direct(), because * we need to remove the backslashes. */ /* get a bit too much */ newlen = (unsigned)STRLEN(arg) + 1; if (adding || prepending || removing) newlen += (unsigned)STRLEN(origval) + 1; newval = xmalloc(newlen); s = newval; /* * Copy the string, skip over escaped chars. * For WIN32 backslashes before normal * file name characters are not removed, and keep * backslash at start, for "\\machine\path", but * do remove it for "\\\\machine\\path". * The reverse is found in ExpandOldSetting(). */ while (*arg && !ascii_iswhite(*arg)) { if (*arg == '\\' && arg[1] != NUL #ifdef BACKSLASH_IN_FILENAME && !((flags & P_EXPAND) && vim_isfilec(arg[1]) && (arg[1] != '\\' || (s == newval && arg[2] != '\\'))) #endif ) ++arg; /* remove backslash */ if (has_mbyte && (i = (*mb_ptr2len)(arg)) > 1) { /* copy multibyte char */ memmove(s, arg, (size_t)i); arg += i; s += i; } else *s++ = *arg++; } *s = NUL; /* * Expand environment variables and ~. * Don't do it when adding without inserting a * comma. */ if (!(adding || prepending || removing) || (flags & P_COMMA)) { s = option_expand(opt_idx, newval); if (s != NULL) { xfree(newval); newlen = (unsigned)STRLEN(s) + 1; if (adding || prepending || removing) newlen += (unsigned)STRLEN(origval) + 1; newval = xmalloc(newlen); STRCPY(newval, s); } } /* locate newval[] in origval[] when removing it * and when adding to avoid duplicates */ i = 0; /* init for GCC */ if (removing || (flags & P_NODUP)) { i = (int)STRLEN(newval); bs = 0; for (s = origval; *s; ++s) { if ((!(flags & P_COMMA) || s == origval || (s[-1] == ',' && !(bs & 1))) && STRNCMP(s, newval, i) == 0 && (!(flags & P_COMMA) || s[i] == ',' || s[i] == NUL)) { break; } // Count backslashes. Only a comma with an even number of // backslashes or a single backslash preceded by a comma // before it is recognized as a separator if ((s > origval + 1 && s[-1] == '\\' && s[-2] != ',') || (s == origval + 1 && s[-1] == '\\')) { bs++; } else { bs = 0; } } // do not add if already there if ((adding || prepending) && *s) { prepending = FALSE; adding = FALSE; STRCPY(newval, origval); } } /* concatenate the two strings; add a ',' if * needed */ if (adding || prepending) { comma = ((flags & P_COMMA) && *origval != NUL && *newval != NUL); if (adding) { i = (int)STRLEN(origval); // Strip a trailing comma, would get 2. if (comma && i > 1 && (flags & P_ONECOMMA) == P_ONECOMMA && origval[i - 1] == ',' && origval[i - 2] != '\\') { i--; } memmove(newval + i + comma, newval, STRLEN(newval) + 1); memmove(newval, origval, (size_t)i); } else { i = (int)STRLEN(newval); STRMOVE(newval + i + comma, origval); } if (comma) newval[i] = ','; } /* Remove newval[] from origval[]. (Note: "i" has * been set above and is used here). */ if (removing) { STRCPY(newval, origval); if (*s) { /* may need to remove a comma */ if (flags & P_COMMA) { if (s == origval) { /* include comma after string */ if (s[i] == ',') ++i; } else { /* include comma before string */ --s; ++i; } } STRMOVE(newval + (s - origval), s + i); } } if (flags & P_FLAGLIST) { // Remove flags that appear twice. for (s = newval; *s; s++) { // if options have P_FLAGLIST and P_ONECOMMA such as // 'whichwrap' if (flags & P_ONECOMMA) { if (*s != ',' && *(s + 1) == ',' && vim_strchr(s + 2, *s) != NULL) { // Remove the duplicated value and the next comma. STRMOVE(s, s + 2); s -= 2; } } else { if ((!(flags & P_COMMA) || *s != ',') && vim_strchr(s + 1, *s) != NULL) { STRMOVE(s, s + 1); s--; } } } } if (save_arg != NULL) /* number for 'whichwrap' */ arg = save_arg; new_value_alloced = TRUE; } /* Set the new value. */ *(char_u **)(varp) = newval; if (!starting && origval != NULL) { // origval may be freed by // did_set_string_option(), make a copy. saved_origval = xstrdup((char *) origval); } /* Handle side effects, and set the global value for * ":set" on local options. */ errmsg = did_set_string_option(opt_idx, (char_u **)varp, new_value_alloced, oldval, errbuf, opt_flags); // If error detected, print the error message. if (errmsg != NULL) { xfree(saved_origval); goto skip; } if (saved_origval != NULL) { char buf_type[7]; vim_snprintf(buf_type, ARRAY_SIZE(buf_type), "%s", (opt_flags & OPT_LOCAL) ? "local" : "global"); set_vim_var_string(VV_OPTION_NEW, *(char **) varp, -1); set_vim_var_string(VV_OPTION_OLD, saved_origval, -1); set_vim_var_string(VV_OPTION_TYPE, buf_type, -1); apply_autocmds(EVENT_OPTIONSET, (char_u *)options[opt_idx].fullname, NULL, false, NULL); reset_v_option_vars(); xfree(saved_origval); } } else { // key code option(FIXME(tarruda): Show a warning or something // similar) } } if (opt_idx >= 0) did_set_option(opt_idx, opt_flags, !prepending && !adding && !removing); } skip: /* * Advance to next argument. * - skip until a blank found, taking care of backslashes * - skip blanks * - skip one "=val" argument (for hidden options ":set gfn =xx") */ for (i = 0; i < 2; ++i) { while (*arg != NUL && !ascii_iswhite(*arg)) if (*arg++ == '\\' && *arg != NUL) ++arg; arg = skipwhite(arg); if (*arg != '=') break; } } if (errmsg != NULL) { STRLCPY(IObuff, _(errmsg), IOSIZE); i = (int)STRLEN(IObuff) + 2; if (i + (arg - startarg) < IOSIZE) { /* append the argument with the error */ STRCAT(IObuff, ": "); assert(arg >= startarg); memmove(IObuff + i, startarg, (size_t)(arg - startarg)); IObuff[i + (arg - startarg)] = NUL; } /* make sure all characters are printable */ trans_characters(IObuff, IOSIZE); ++no_wait_return; /* wait_return done later */ emsg(IObuff); /* show error highlighted */ --no_wait_return; return FAIL; } arg = skipwhite(arg); } theend: if (silent_mode && did_show) { /* After displaying option values in silent mode. */ silent_mode = FALSE; info_message = TRUE; /* use mch_msg(), not mch_errmsg() */ msg_putchar('\n'); ui_flush(); silent_mode = TRUE; info_message = FALSE; /* use mch_msg(), not mch_errmsg() */ } return OK; } /* * Call this when an option has been given a new value through a user command. * Sets the P_WAS_SET flag and takes care of the P_INSECURE flag. */ static void did_set_option ( int opt_idx, int opt_flags, /* possibly with OPT_MODELINE */ int new_value /* value was replaced completely */ ) { options[opt_idx].flags |= P_WAS_SET; /* When an option is set in the sandbox, from a modeline or in secure mode * set the P_INSECURE flag. Otherwise, if a new value is stored reset the * flag. */ uint32_t *p = insecure_flag(opt_idx, opt_flags); if (secure || sandbox != 0 || (opt_flags & OPT_MODELINE)) *p = *p | P_INSECURE; else if (new_value) *p = *p & ~P_INSECURE; } static char_u *illegal_char(char_u *errbuf, int c) { if (errbuf == NULL) return (char_u *)""; sprintf((char *)errbuf, _("E539: Illegal character <%s>"), (char *)transchar(c)); return errbuf; } /* * Convert a key name or string into a key value. * Used for 'wildchar' and 'cedit' options. */ static int string_to_key(char_u *arg) { if (*arg == '<') return find_key_option(arg + 1); if (*arg == '^') return Ctrl_chr(arg[1]); return *arg; } /* * Check value of 'cedit' and set cedit_key. * Returns NULL if value is OK, error message otherwise. */ static char_u *check_cedit(void) { int n; if (*p_cedit == NUL) cedit_key = -1; else { n = string_to_key(p_cedit); if (vim_isprintc(n)) return e_invarg; cedit_key = n; } return NULL; } /* * When changing 'title', 'titlestring', 'icon' or 'iconstring', call * maketitle() to create and display it. * When switching the title or icon off, call ui_set_{icon,title}(NULL) to get * the old value back. */ static void did_set_title ( int icon /* Did set icon instead of title */ ) { if (starting != NO_SCREEN) { maketitle(); if (icon) { if (!p_icon) { ui_set_icon(NULL); } } else { if (!p_title) { ui_set_title(NULL); } } } } /* * set_options_bin - called when 'bin' changes value. */ void set_options_bin ( int oldval, int newval, int opt_flags /* OPT_LOCAL and/or OPT_GLOBAL */ ) { /* * The option values that are changed when 'bin' changes are * copied when 'bin is set and restored when 'bin' is reset. */ if (newval) { if (!oldval) { /* switched on */ if (!(opt_flags & OPT_GLOBAL)) { curbuf->b_p_tw_nobin = curbuf->b_p_tw; curbuf->b_p_wm_nobin = curbuf->b_p_wm; curbuf->b_p_ml_nobin = curbuf->b_p_ml; curbuf->b_p_et_nobin = curbuf->b_p_et; } if (!(opt_flags & OPT_LOCAL)) { p_tw_nobin = p_tw; p_wm_nobin = p_wm; p_ml_nobin = p_ml; p_et_nobin = p_et; } } if (!(opt_flags & OPT_GLOBAL)) { curbuf->b_p_tw = 0; /* no automatic line wrap */ curbuf->b_p_wm = 0; /* no automatic line wrap */ curbuf->b_p_ml = 0; /* no modelines */ curbuf->b_p_et = 0; /* no expandtab */ } if (!(opt_flags & OPT_LOCAL)) { p_tw = 0; p_wm = 0; p_ml = FALSE; p_et = FALSE; p_bin = TRUE; /* needed when called for the "-b" argument */ } } else if (oldval) { /* switched off */ if (!(opt_flags & OPT_GLOBAL)) { curbuf->b_p_tw = curbuf->b_p_tw_nobin; curbuf->b_p_wm = curbuf->b_p_wm_nobin; curbuf->b_p_ml = curbuf->b_p_ml_nobin; curbuf->b_p_et = curbuf->b_p_et_nobin; } if (!(opt_flags & OPT_LOCAL)) { p_tw = p_tw_nobin; p_wm = p_wm_nobin; p_ml = p_ml_nobin; p_et = p_et_nobin; } } } /* * Find the parameter represented by the given character (eg ', :, ", or /), * and return its associated value in the 'shada' string. * Only works for number parameters, not for 'r' or 'n'. * If the parameter is not specified in the string or there is no following * number, return -1. */ int get_shada_parameter(int type) { char_u *p; p = find_shada_parameter(type); if (p != NULL && ascii_isdigit(*p)) return atoi((char *)p); return -1; } /* * Find the parameter represented by the given character (eg ''', ':', '"', or * '/') in the 'shada' option and return a pointer to the string after it. * Return NULL if the parameter is not specified in the string. */ char_u *find_shada_parameter(int type) { char_u *p; for (p = p_shada; *p; ++p) { if (*p == type) return p + 1; if (*p == 'n') /* 'n' is always the last one */ break; p = vim_strchr(p, ','); /* skip until next ',' */ if (p == NULL) /* hit the end without finding parameter */ break; } return NULL; } /* * Expand environment variables for some string options. * These string options cannot be indirect! * If "val" is NULL expand the current value of the option. * Return pointer to NameBuff, or NULL when not expanded. */ static char_u *option_expand(int opt_idx, char_u *val) { /* if option doesn't need expansion nothing to do */ if (!(options[opt_idx].flags & P_EXPAND) || options[opt_idx].var == NULL) return NULL; if (val == NULL) { val = *(char_u **)options[opt_idx].var; } // If val is longer than MAXPATHL no meaningful expansion can be done, // expand_env() would truncate the string. if (val == NULL || STRLEN(val) > MAXPATHL) { return NULL; } /* * Expanding this with NameBuff, expand_env() must not be passed IObuff. * Escape spaces when expanding 'tags', they are used to separate file * names. * For 'spellsuggest' expand after "file:". */ expand_env_esc(val, NameBuff, MAXPATHL, (char_u **)options[opt_idx].var == &p_tags, FALSE, (char_u **)options[opt_idx].var == &p_sps ? (char_u *)"file:" : NULL); if (STRCMP(NameBuff, val) == 0) /* they are the same */ return NULL; return NameBuff; } /* * After setting various option values: recompute variables that depend on * option values. */ static void didset_options(void) { /* initialize the table for 'iskeyword' et.al. */ (void)init_chartab(); (void)opt_strings_flags(p_cmp, p_cmp_values, &cmp_flags, true); (void)opt_strings_flags(p_bkc, p_bkc_values, &bkc_flags, true); (void)opt_strings_flags(p_bo, p_bo_values, &bo_flags, true); (void)opt_strings_flags(p_ssop, p_ssop_values, &ssop_flags, true); (void)opt_strings_flags(p_vop, p_ssop_values, &vop_flags, true); (void)opt_strings_flags(p_fdo, p_fdo_values, &fdo_flags, true); (void)opt_strings_flags(p_dy, p_dy_values, &dy_flags, true); (void)opt_strings_flags(p_tc, p_tc_values, &tc_flags, false); (void)opt_strings_flags(p_ve, p_ve_values, &ve_flags, true); (void)spell_check_msm(); (void)spell_check_sps(); (void)compile_cap_prog(curwin->w_s); (void)did_set_spell_option(true); // set cedit_key (void)check_cedit(); briopt_check(curwin); // initialize the table for 'breakat'. fill_breakat_flags(); } // More side effects of setting options. static void didset_options2(void) { // Initialize the highlight_attr[] table. (void)highlight_changed(); // Parse default for 'clipboard'. (void)opt_strings_flags(p_cb, p_cb_values, &cb_flags, true); // Parse default for 'fillchars'. (void)set_chars_option(&p_fcs); // Parse default for 'listchars'. (void)set_chars_option(&p_lcs); // Parse default for 'wildmode'. check_opt_wim(); } /* * Check for string options that are NULL (normally only termcap options). */ void check_options(void) { int opt_idx; for (opt_idx = 0; options[opt_idx].fullname != NULL; opt_idx++) if ((options[opt_idx].flags & P_STRING) && options[opt_idx].var != NULL) check_string_option((char_u **)get_varp(&(options[opt_idx]))); } /* * Check string options in a buffer for NULL value. */ void check_buf_options(buf_T *buf) { check_string_option(&buf->b_p_bh); check_string_option(&buf->b_p_bt); check_string_option(&buf->b_p_fenc); check_string_option(&buf->b_p_ff); check_string_option(&buf->b_p_def); check_string_option(&buf->b_p_inc); check_string_option(&buf->b_p_inex); check_string_option(&buf->b_p_inde); check_string_option(&buf->b_p_indk); check_string_option(&buf->b_p_fex); check_string_option(&buf->b_p_kp); check_string_option(&buf->b_p_mps); check_string_option(&buf->b_p_fo); check_string_option(&buf->b_p_flp); check_string_option(&buf->b_p_isk); check_string_option(&buf->b_p_com); check_string_option(&buf->b_p_cms); check_string_option(&buf->b_p_nf); check_string_option(&buf->b_p_qe); check_string_option(&buf->b_p_syn); check_string_option(&buf->b_s.b_syn_isk); check_string_option(&buf->b_s.b_p_spc); check_string_option(&buf->b_s.b_p_spf); check_string_option(&buf->b_s.b_p_spl); check_string_option(&buf->b_p_sua); check_string_option(&buf->b_p_cink); check_string_option(&buf->b_p_cino); parse_cino(buf); check_string_option(&buf->b_p_ft); check_string_option(&buf->b_p_cinw); check_string_option(&buf->b_p_cpt); check_string_option(&buf->b_p_cfu); check_string_option(&buf->b_p_ofu); check_string_option(&buf->b_p_keymap); check_string_option(&buf->b_p_gp); check_string_option(&buf->b_p_mp); check_string_option(&buf->b_p_efm); check_string_option(&buf->b_p_ep); check_string_option(&buf->b_p_path); check_string_option(&buf->b_p_tags); check_string_option(&buf->b_p_tc); check_string_option(&buf->b_p_dict); check_string_option(&buf->b_p_tsr); check_string_option(&buf->b_p_lw); check_string_option(&buf->b_p_bkc); } /* * Free the string allocated for an option. * Checks for the string being empty_option. This may happen if we're out of * memory, vim_strsave() returned NULL, which was replaced by empty_option by * check_options(). * Does NOT check for P_ALLOCED flag! */ void free_string_option(char_u *p) { if (p != empty_option) xfree(p); } void clear_string_option(char_u **pp) { if (*pp != empty_option) xfree(*pp); *pp = empty_option; } static void check_string_option(char_u **pp) { if (*pp == NULL) *pp = empty_option; } /* * Return TRUE when option "opt" was set from a modeline or in secure mode. * Return FALSE when it wasn't. * Return -1 for an unknown option. */ int was_set_insecurely(char_u *opt, int opt_flags) { int idx = findoption(opt); if (idx >= 0) { uint32_t *flagp = insecure_flag(idx, opt_flags); return (*flagp & P_INSECURE) != 0; } EMSG2(_(e_intern2), "was_set_insecurely()"); return -1; } /* * Get a pointer to the flags used for the P_INSECURE flag of option * "opt_idx". For some local options a local flags field is used. */ static uint32_t *insecure_flag(int opt_idx, int opt_flags) { if (opt_flags & OPT_LOCAL) switch ((int)options[opt_idx].indir) { case PV_STL: return &curwin->w_p_stl_flags; case PV_FDE: return &curwin->w_p_fde_flags; case PV_FDT: return &curwin->w_p_fdt_flags; case PV_INDE: return &curbuf->b_p_inde_flags; case PV_FEX: return &curbuf->b_p_fex_flags; case PV_INEX: return &curbuf->b_p_inex_flags; } /* Nothing special, return global flags field. */ return &options[opt_idx].flags; } /* * Redraw the window title and/or tab page text later. */ static void redraw_titles(void) { need_maketitle = TRUE; redraw_tabline = TRUE; } static int shada_idx = -1; /* * Set a string option to a new value (without checking the effect). * The string is copied into allocated memory. * if ("opt_idx" == -1) "name" is used, otherwise "opt_idx" is used. * When "set_sid" is zero set the scriptID to current_SID. When "set_sid" is * SID_NONE don't set the scriptID. Otherwise set the scriptID to "set_sid". */ void set_string_option_direct ( char_u *name, int opt_idx, char_u *val, int opt_flags, /* OPT_FREE, OPT_LOCAL and/or OPT_GLOBAL */ int set_sid ) { char_u *s; char_u **varp; int both = (opt_flags & (OPT_LOCAL | OPT_GLOBAL)) == 0; int idx = opt_idx; if (idx == -1) { /* use name */ idx = findoption(name); if (idx < 0) { /* not found (should not happen) */ EMSG2(_(e_intern2), "set_string_option_direct()"); EMSG2(_("For option %s"), name); return; } } if (options[idx].var == NULL) /* can't set hidden option */ return; assert((void *) options[idx].var != (void *) &p_shada); s = vim_strsave(val); { varp = (char_u **)get_varp_scope(&(options[idx]), both ? OPT_LOCAL : opt_flags); if ((opt_flags & OPT_FREE) && (options[idx].flags & P_ALLOCED)) free_string_option(*varp); *varp = s; /* For buffer/window local option may also set the global value. */ if (both) set_string_option_global(idx, varp); options[idx].flags |= P_ALLOCED; /* When setting both values of a global option with a local value, * make the local value empty, so that the global value is used. */ if (((int)options[idx].indir & PV_BOTH) && both) { free_string_option(*varp); *varp = empty_option; } if (set_sid != SID_NONE) set_option_scriptID_idx(idx, opt_flags, set_sid == 0 ? current_SID : set_sid); } } /* * Set global value for string option when it's a local option. */ static void set_string_option_global ( int opt_idx, /* option index */ char_u **varp /* pointer to option variable */ ) { char_u **p, *s; /* the global value is always allocated */ if (options[opt_idx].var == VAR_WIN) p = (char_u **)GLOBAL_WO(varp); else p = (char_u **)options[opt_idx].var; if (options[opt_idx].indir != PV_NONE && p != varp) { s = vim_strsave(*varp); free_string_option(*p); *p = s; } } /// Set a string option to a new value, handling the effects /// /// @param[in] opt_idx Option to set. /// @param[in] value New value. /// @param[in] opt_flags Option flags: expected to contain #OPT_LOCAL and/or /// #OPT_GLOBAL. /// /// @return NULL on success, error message on error. static char *set_string_option(const int opt_idx, const char *const value, const int opt_flags) FUNC_ATTR_NONNULL_ARG(2) FUNC_ATTR_WARN_UNUSED_RESULT { if (options[opt_idx].var == NULL) { // don't set hidden option return NULL; } char *const s = xstrdup(value); char **const varp = (char **)get_varp_scope( &(options[opt_idx]), ((opt_flags & (OPT_LOCAL | OPT_GLOBAL)) == 0 ? (((int)options[opt_idx].indir & PV_BOTH) ? OPT_GLOBAL : OPT_LOCAL) : opt_flags)); char *const oldval = *varp; *varp = s; char *const saved_oldval = (starting ? NULL : xstrdup(oldval)); char *const r = (char *)did_set_string_option( opt_idx, (char_u **)varp, (int)true, (char_u *)oldval, NULL, opt_flags); if (r == NULL) { did_set_option(opt_idx, opt_flags, true); } // call autocommand after handling side effects if (saved_oldval != NULL) { char buf_type[7]; vim_snprintf(buf_type, ARRAY_SIZE(buf_type), "%s", (opt_flags & OPT_LOCAL) ? "local" : "global"); set_vim_var_string(VV_OPTION_NEW, (char *)(*varp), -1); set_vim_var_string(VV_OPTION_OLD, saved_oldval, -1); set_vim_var_string(VV_OPTION_TYPE, buf_type, -1); apply_autocmds(EVENT_OPTIONSET, (char_u *)options[opt_idx].fullname, NULL, false, NULL); reset_v_option_vars(); xfree(saved_oldval); } return r; } /// Return true if "val" is a valid 'filetype' name. /// Also used for 'syntax' and 'keymap'. static bool valid_filetype(char_u *val) { for (char_u *s = val; *s != NUL; s++) { if (!ASCII_ISALNUM(*s) && vim_strchr((char_u *)".-_", *s) == NULL) { return false; } } return true; } /* * Handle string options that need some action to perform when changed. * Returns NULL for success, or an error message for an error. */ static char_u * did_set_string_option ( int opt_idx, /* index in options[] table */ char_u **varp, /* pointer to the option variable */ int new_value_alloced, /* new value was allocated */ char_u *oldval, /* previous value of the option */ char_u *errbuf, /* buffer for errors, or NULL */ int opt_flags /* OPT_LOCAL and/or OPT_GLOBAL */ ) { char_u *errmsg = NULL; char_u *s, *p; int did_chartab = FALSE; char_u **gvarp; bool free_oldval = (options[opt_idx].flags & P_ALLOCED); /* Get the global option to compare with, otherwise we would have to check * two values for all local options. */ gvarp = (char_u **)get_varp_scope(&(options[opt_idx]), OPT_GLOBAL); /* Disallow changing some options from secure mode */ if ((secure || sandbox != 0) && (options[opt_idx].flags & P_SECURE)) { errmsg = e_secure; } /* Check for a "normal" file name in some options. Disallow a path * separator (slash and/or backslash), wildcards and characters that are * often illegal in a file name. */ else if ((options[opt_idx].flags & P_NFNAME) && vim_strpbrk(*varp, (char_u *)"/\\*?[|<>") != NULL) { errmsg = e_invarg; } /* 'backupcopy' */ else if (gvarp == &p_bkc) { char_u *bkc = p_bkc; unsigned int *flags = &bkc_flags; if (opt_flags & OPT_LOCAL) { bkc = curbuf->b_p_bkc; flags = &curbuf->b_bkc_flags; } if ((opt_flags & OPT_LOCAL) && *bkc == NUL) { // make the local value empty: use the global value *flags = 0; } else { if (opt_strings_flags(bkc, p_bkc_values, flags, true) != OK) { errmsg = e_invarg; } if (((*flags & BKC_AUTO) != 0) + ((*flags & BKC_YES) != 0) + ((*flags & BKC_NO) != 0) != 1) { // Must have exactly one of "auto", "yes" and "no". (void)opt_strings_flags(oldval, p_bkc_values, flags, true); errmsg = e_invarg; } } } /* 'backupext' and 'patchmode' */ else if (varp == &p_bex || varp == &p_pm) { if (STRCMP(*p_bex == '.' ? p_bex + 1 : p_bex, *p_pm == '.' ? p_pm + 1 : p_pm) == 0) errmsg = (char_u *)N_("E589: 'backupext' and 'patchmode' are equal"); } /* 'breakindentopt' */ else if (varp == &curwin->w_p_briopt) { if (briopt_check(curwin) == FAIL) errmsg = e_invarg; } else if (varp == &p_isi || varp == &(curbuf->b_p_isk) || varp == &p_isp || varp == &p_isf) { // 'isident', 'iskeyword', 'isprint or 'isfname' option: refill g_chartab[] // If the new option is invalid, use old value. 'lisp' option: refill // g_chartab[] for '-' char if (init_chartab() == FAIL) { did_chartab = TRUE; /* need to restore it below */ errmsg = e_invarg; /* error in value */ } } /* 'helpfile' */ else if (varp == &p_hf) { /* May compute new values for $VIM and $VIMRUNTIME */ if (didset_vim) { vim_setenv("VIM", ""); didset_vim = FALSE; } if (didset_vimruntime) { vim_setenv("VIMRUNTIME", ""); didset_vimruntime = FALSE; } } /* 'colorcolumn' */ else if (varp == &curwin->w_p_cc) errmsg = check_colorcolumn(curwin); /* 'helplang' */ else if (varp == &p_hlg) { /* Check for "", "ab", "ab,cd", etc. */ for (s = p_hlg; *s != NUL; s += 3) { if (s[1] == NUL || ((s[2] != ',' || s[3] == NUL) && s[2] != NUL)) { errmsg = e_invarg; break; } if (s[2] == NUL) break; } } /* 'highlight' */ else if (varp == &p_hl) { if (highlight_changed() == FAIL) errmsg = e_invarg; /* invalid flags */ } /* 'nrformats' */ else if (gvarp == &p_nf) { if (check_opt_strings(*varp, p_nf_values, TRUE) != OK) errmsg = e_invarg; } else if (varp == &p_ssop) { // 'sessionoptions' if (opt_strings_flags(p_ssop, p_ssop_values, &ssop_flags, true) != OK) errmsg = e_invarg; if ((ssop_flags & SSOP_CURDIR) && (ssop_flags & SSOP_SESDIR)) { /* Don't allow both "sesdir" and "curdir". */ (void)opt_strings_flags(oldval, p_ssop_values, &ssop_flags, true); errmsg = e_invarg; } } else if (varp == &p_vop) { // 'viewoptions' if (opt_strings_flags(p_vop, p_ssop_values, &vop_flags, true) != OK) errmsg = e_invarg; } /* 'scrollopt' */ else if (varp == &p_sbo) { if (check_opt_strings(p_sbo, p_scbopt_values, TRUE) != OK) errmsg = e_invarg; } else if (varp == &p_ambw || (int *)varp == &p_emoji) { // 'ambiwidth' if (check_opt_strings(p_ambw, p_ambw_values, false) != OK) { errmsg = e_invarg; } else if (set_chars_option(&p_lcs) != NULL) { errmsg = (char_u *)_("E834: Conflicts with value of 'listchars'"); } else if (set_chars_option(&p_fcs) != NULL) { errmsg = (char_u *)_("E835: Conflicts with value of 'fillchars'"); } } /* 'background' */ else if (varp == &p_bg) { if (check_opt_strings(p_bg, p_bg_values, FALSE) == OK) { int dark = (*p_bg == 'd'); init_highlight(FALSE, FALSE); if (dark != (*p_bg == 'd') && get_var_value((char_u *)"g:colors_name") != NULL) { /* The color scheme must have set 'background' back to another * value, that's not what we want here. Disable the color * scheme and set the colors again. */ do_unlet((char_u *)"g:colors_name", TRUE); free_string_option(p_bg); p_bg = vim_strsave((char_u *)(dark ? "dark" : "light")); check_string_option(&p_bg); init_highlight(FALSE, FALSE); } } else errmsg = e_invarg; } /* 'wildmode' */ else if (varp == &p_wim) { if (check_opt_wim() == FAIL) errmsg = e_invarg; } /* 'wildoptions' */ else if (varp == &p_wop) { if (check_opt_strings(p_wop, p_wop_values, TRUE) != OK) errmsg = e_invarg; } /* 'winaltkeys' */ else if (varp == &p_wak) { if (*p_wak == NUL || check_opt_strings(p_wak, p_wak_values, FALSE) != OK) errmsg = e_invarg; } /* 'eventignore' */ else if (varp == &p_ei) { if (check_ei() == FAIL) errmsg = e_invarg; /* 'encoding' and 'fileencoding' */ } else if (varp == &p_enc || gvarp == &p_fenc) { if (gvarp == &p_fenc) { if (!MODIFIABLE(curbuf) && opt_flags != OPT_GLOBAL) { errmsg = e_modifiable; } else if (vim_strchr(*varp, ',') != NULL) { // No comma allowed in 'fileencoding'; catches confusing it // with 'fileencodings'. errmsg = e_invarg; } else { // May show a "+" in the title now. redraw_titles(); // Add 'fileencoding' to the swap file. ml_setflags(curbuf); } } if (errmsg == NULL) { /* canonize the value, so that STRCMP() can be used on it */ p = enc_canonize(*varp); xfree(*varp); *varp = p; if (varp == &p_enc) { // only encoding=utf-8 allowed if (STRCMP(p_enc, "utf-8") != 0) { errmsg = e_invarg; } } } } else if (varp == &p_penc) { /* Canonize printencoding if VIM standard one */ p = enc_canonize(p_penc); xfree(p_penc); p_penc = p; } else if (varp == &curbuf->b_p_keymap) { if (!valid_filetype(*varp)) { errmsg = e_invarg; } else { // load or unload key mapping tables errmsg = keymap_init(); } if (errmsg == NULL) { if (*curbuf->b_p_keymap != NUL) { /* Installed a new keymap, switch on using it. */ curbuf->b_p_iminsert = B_IMODE_LMAP; if (curbuf->b_p_imsearch != B_IMODE_USE_INSERT) curbuf->b_p_imsearch = B_IMODE_LMAP; } else { /* Cleared the keymap, may reset 'iminsert' and 'imsearch'. */ if (curbuf->b_p_iminsert == B_IMODE_LMAP) curbuf->b_p_iminsert = B_IMODE_NONE; if (curbuf->b_p_imsearch == B_IMODE_LMAP) curbuf->b_p_imsearch = B_IMODE_USE_INSERT; } if ((opt_flags & OPT_LOCAL) == 0) { set_iminsert_global(); set_imsearch_global(); } status_redraw_curbuf(); } } /* 'fileformat' */ else if (gvarp == &p_ff) { if (!MODIFIABLE(curbuf) && !(opt_flags & OPT_GLOBAL)) errmsg = e_modifiable; else if (check_opt_strings(*varp, p_ff_values, FALSE) != OK) errmsg = e_invarg; else { redraw_titles(); /* update flag in swap file */ ml_setflags(curbuf); /* Redraw needed when switching to/from "mac": a CR in the text * will be displayed differently. */ if (get_fileformat(curbuf) == EOL_MAC || *oldval == 'm') redraw_curbuf_later(NOT_VALID); } } /* 'fileformats' */ else if (varp == &p_ffs) { if (check_opt_strings(p_ffs, p_ff_values, TRUE) != OK) { errmsg = e_invarg; } } /* 'matchpairs' */ else if (gvarp == &p_mps) { if (has_mbyte) { for (p = *varp; *p != NUL; ++p) { int x2 = -1; int x3 = -1; if (*p != NUL) p += mb_ptr2len(p); if (*p != NUL) x2 = *p++; if (*p != NUL) { x3 = mb_ptr2char(p); p += mb_ptr2len(p); } if (x2 != ':' || x3 == -1 || (*p != NUL && *p != ',')) { errmsg = e_invarg; break; } if (*p == NUL) break; } } else { /* Check for "x:y,x:y" */ for (p = *varp; *p != NUL; p += 4) { if (p[1] != ':' || p[2] == NUL || (p[3] != NUL && p[3] != ',')) { errmsg = e_invarg; break; } if (p[3] == NUL) break; } } } /* 'comments' */ else if (gvarp == &p_com) { for (s = *varp; *s; ) { while (*s && *s != ':') { if (vim_strchr((char_u *)COM_ALL, *s) == NULL && !ascii_isdigit(*s) && *s != '-') { errmsg = illegal_char(errbuf, *s); break; } ++s; } if (*s++ == NUL) errmsg = (char_u *)N_("E524: Missing colon"); else if (*s == ',' || *s == NUL) errmsg = (char_u *)N_("E525: Zero length string"); if (errmsg != NULL) break; while (*s && *s != ',') { if (*s == '\\' && s[1] != NUL) ++s; ++s; } s = skip_to_option_part(s); } } /* 'listchars' */ else if (varp == &p_lcs) { errmsg = set_chars_option(varp); } /* 'fillchars' */ else if (varp == &p_fcs) { errmsg = set_chars_option(varp); } /* 'cedit' */ else if (varp == &p_cedit) { errmsg = check_cedit(); } /* 'verbosefile' */ else if (varp == &p_vfile) { verbose_stop(); if (*p_vfile != NUL && verbose_open() == FAIL) errmsg = e_invarg; /* 'shada' */ } else if (varp == &p_shada) { // TODO(ZyX-I): Remove this code in the future, alongside with &viminfo // option. opt_idx = ((options[opt_idx].fullname[0] == 'v') ? (shada_idx == -1 ? ((shada_idx = findoption((char_u *) "shada"))) : shada_idx) : opt_idx); for (s = p_shada; *s; ) { /* Check it's a valid character */ if (vim_strchr((char_u *)"!\"%'/:<@cfhnrs", *s) == NULL) { errmsg = illegal_char(errbuf, *s); break; } if (*s == 'n') { /* name is always last one */ break; } else if (*s == 'r') { /* skip until next ',' */ while (*++s && *s != ',') ; } else if (*s == '%') { /* optional number */ while (ascii_isdigit(*++s)) ; } else if (*s == '!' || *s == 'h' || *s == 'c') ++s; /* no extra chars */ else { /* must have a number */ while (ascii_isdigit(*++s)) ; if (!ascii_isdigit(*(s - 1))) { if (errbuf != NULL) { sprintf((char *)errbuf, _("E526: Missing number after <%s>"), transchar_byte(*(s - 1))); errmsg = errbuf; } else errmsg = (char_u *)""; break; } } if (*s == ',') ++s; else if (*s) { if (errbuf != NULL) errmsg = (char_u *)N_("E527: Missing comma"); else errmsg = (char_u *)""; break; } } if (*p_shada && errmsg == NULL && get_shada_parameter('\'') < 0) errmsg = (char_u *)N_("E528: Must specify a ' value"); } /* 'showbreak' */ else if (varp == &p_sbr) { for (s = p_sbr; *s; ) { if (ptr2cells(s) != 1) errmsg = (char_u *)N_("E595: contains unprintable or wide character"); mb_ptr_adv(s); } } /* 'guicursor' */ else if (varp == &p_guicursor) errmsg = parse_shape_opt(SHAPE_CURSOR); else if (varp == &p_popt) errmsg = parse_printoptions(); else if (varp == &p_pmfn) errmsg = parse_printmbfont(); /* 'langmap' */ else if (varp == &p_langmap) langmap_set(); /* 'breakat' */ else if (varp == &p_breakat) fill_breakat_flags(); /* 'titlestring' and 'iconstring' */ else if (varp == &p_titlestring || varp == &p_iconstring) { int flagval = (varp == &p_titlestring) ? STL_IN_TITLE : STL_IN_ICON; /* NULL => statusline syntax */ if (vim_strchr(*varp, '%') && check_stl_option(*varp) == NULL) stl_syntax |= flagval; else stl_syntax &= ~flagval; did_set_title(varp == &p_iconstring); } /* 'selection' */ else if (varp == &p_sel) { if (*p_sel == NUL || check_opt_strings(p_sel, p_sel_values, FALSE) != OK) errmsg = e_invarg; } /* 'selectmode' */ else if (varp == &p_slm) { if (check_opt_strings(p_slm, p_slm_values, TRUE) != OK) errmsg = e_invarg; } /* 'keymodel' */ else if (varp == &p_km) { if (check_opt_strings(p_km, p_km_values, TRUE) != OK) errmsg = e_invarg; else { km_stopsel = (vim_strchr(p_km, 'o') != NULL); km_startsel = (vim_strchr(p_km, 'a') != NULL); } } /* 'mousemodel' */ else if (varp == &p_mousem) { if (check_opt_strings(p_mousem, p_mousem_values, FALSE) != OK) errmsg = e_invarg; } else if (varp == &p_swb) { // 'switchbuf' if (opt_strings_flags(p_swb, p_swb_values, &swb_flags, true) != OK) errmsg = e_invarg; } /* 'debug' */ else if (varp == &p_debug) { if (check_opt_strings(p_debug, p_debug_values, TRUE) != OK) errmsg = e_invarg; } else if (varp == &p_dy) { // 'display' if (opt_strings_flags(p_dy, p_dy_values, &dy_flags, true) != OK) errmsg = e_invarg; else (void)init_chartab(); } /* 'eadirection' */ else if (varp == &p_ead) { if (check_opt_strings(p_ead, p_ead_values, FALSE) != OK) errmsg = e_invarg; } else if (varp == &p_cb) { // 'clipboard' if (opt_strings_flags(p_cb, p_cb_values, &cb_flags, true) != OK) { errmsg = e_invarg; } } else if (varp == &(curwin->w_s->b_p_spl) // 'spell' || varp == &(curwin->w_s->b_p_spf)) { // When 'spelllang' or 'spellfile' is set and there is a window for this // buffer in which 'spell' is set load the wordlists. errmsg = did_set_spell_option(varp == &(curwin->w_s->b_p_spf)); } /* When 'spellcapcheck' is set compile the regexp program. */ else if (varp == &(curwin->w_s->b_p_spc)) { errmsg = compile_cap_prog(curwin->w_s); } /* 'spellsuggest' */ else if (varp == &p_sps) { if (spell_check_sps() != OK) errmsg = e_invarg; } /* 'mkspellmem' */ else if (varp == &p_msm) { if (spell_check_msm() != OK) errmsg = e_invarg; } /* When 'bufhidden' is set, check for valid value. */ else if (gvarp == &p_bh) { if (check_opt_strings(curbuf->b_p_bh, p_bufhidden_values, FALSE) != OK) errmsg = e_invarg; } /* When 'buftype' is set, check for valid value. */ else if (gvarp == &p_bt) { if ((curbuf->terminal && curbuf->b_p_bt[0] != 't') || (!curbuf->terminal && curbuf->b_p_bt[0] == 't') || check_opt_strings(curbuf->b_p_bt, p_buftype_values, FALSE) != OK) { errmsg = e_invarg; } else { if (curwin->w_status_height) { curwin->w_redr_status = TRUE; redraw_later(VALID); } curbuf->b_help = (curbuf->b_p_bt[0] == 'h'); redraw_titles(); } } /* 'statusline' or 'rulerformat' */ else if (gvarp == &p_stl || varp == &p_ruf) { int wid; if (varp == &p_ruf) /* reset ru_wid first */ ru_wid = 0; s = *varp; if (varp == &p_ruf && *s == '%') { /* set ru_wid if 'ruf' starts with "%99(" */ if (*++s == '-') /* ignore a '-' */ s++; wid = getdigits_int(&s); if (wid && *s == '(' && (errmsg = check_stl_option(p_ruf)) == NULL) ru_wid = wid; else errmsg = check_stl_option(p_ruf); } /* check 'statusline' only if it doesn't start with "%!" */ else if (varp == &p_ruf || s[0] != '%' || s[1] != '!') errmsg = check_stl_option(s); if (varp == &p_ruf && errmsg == NULL) comp_col(); } /* check if it is a valid value for 'complete' -- Acevedo */ else if (gvarp == &p_cpt) { for (s = *varp; *s; ) { while (*s == ',' || *s == ' ') s++; if (!*s) break; if (vim_strchr((char_u *)".wbuksid]tU", *s) == NULL) { errmsg = illegal_char(errbuf, *s); break; } if (*++s != NUL && *s != ',' && *s != ' ') { if (s[-1] == 'k' || s[-1] == 's') { /* skip optional filename after 'k' and 's' */ while (*s && *s != ',' && *s != ' ') { if (*s == '\\') ++s; ++s; } } else { if (errbuf != NULL) { sprintf((char *)errbuf, _("E535: Illegal character after <%c>"), *--s); errmsg = errbuf; } else errmsg = (char_u *)""; break; } } } } /* 'completeopt' */ else if (varp == &p_cot) { if (check_opt_strings(p_cot, p_cot_values, true) != OK) { errmsg = e_invarg; } else { completeopt_was_set(); } } /* 'pastetoggle': translate key codes like in a mapping */ else if (varp == &p_pt) { if (*p_pt) { (void)replace_termcodes(p_pt, STRLEN(p_pt), &p, true, true, false, CPO_TO_CPO_FLAGS); if (p != NULL) { if (new_value_alloced) free_string_option(p_pt); p_pt = p; new_value_alloced = TRUE; } } } /* 'backspace' */ else if (varp == &p_bs) { if (ascii_isdigit(*p_bs)) { if (*p_bs >'2' || p_bs[1] != NUL) errmsg = e_invarg; } else if (check_opt_strings(p_bs, p_bs_values, TRUE) != OK) errmsg = e_invarg; } else if (varp == &p_bo) { if (opt_strings_flags(p_bo, p_bo_values, &bo_flags, true) != OK) { errmsg = e_invarg; } } else if (gvarp == &p_tc) { // 'tagcase' unsigned int *flags; if (opt_flags & OPT_LOCAL) { p = curbuf->b_p_tc; flags = &curbuf->b_tc_flags; } else { p = p_tc; flags = &tc_flags; } if ((opt_flags & OPT_LOCAL) && *p == NUL) { // make the local value empty: use the global value *flags = 0; } else if (*p == NUL || opt_strings_flags(p, p_tc_values, flags, false) != OK) { errmsg = e_invarg; } } else if (varp == &p_cmp) { // 'casemap' if (opt_strings_flags(p_cmp, p_cmp_values, &cmp_flags, true) != OK) errmsg = e_invarg; } /* 'diffopt' */ else if (varp == &p_dip) { if (diffopt_changed() == FAIL) errmsg = e_invarg; } /* 'foldmethod' */ else if (gvarp == &curwin->w_allbuf_opt.wo_fdm) { if (check_opt_strings(*varp, p_fdm_values, FALSE) != OK || *curwin->w_p_fdm == NUL) errmsg = e_invarg; else { foldUpdateAll(curwin); if (foldmethodIsDiff(curwin)) newFoldLevel(); } } /* 'foldexpr' */ else if (varp == &curwin->w_p_fde) { if (foldmethodIsExpr(curwin)) foldUpdateAll(curwin); } /* 'foldmarker' */ else if (gvarp == &curwin->w_allbuf_opt.wo_fmr) { p = vim_strchr(*varp, ','); if (p == NULL) errmsg = (char_u *)N_("E536: comma required"); else if (p == *varp || p[1] == NUL) errmsg = e_invarg; else if (foldmethodIsMarker(curwin)) foldUpdateAll(curwin); } /* 'commentstring' */ else if (gvarp == &p_cms) { if (**varp != NUL && strstr((char *)*varp, "%s") == NULL) errmsg = (char_u *)N_( "E537: 'commentstring' must be empty or contain %s"); } else if (varp == &p_fdo) { // 'foldopen' if (opt_strings_flags(p_fdo, p_fdo_values, &fdo_flags, true) != OK) errmsg = e_invarg; } /* 'foldclose' */ else if (varp == &p_fcl) { if (check_opt_strings(p_fcl, p_fcl_values, TRUE) != OK) errmsg = e_invarg; } /* 'foldignore' */ else if (gvarp == &curwin->w_allbuf_opt.wo_fdi) { if (foldmethodIsIndent(curwin)) foldUpdateAll(curwin); } else if (varp == &p_ve) { // 'virtualedit' if (opt_strings_flags(p_ve, p_ve_values, &ve_flags, true) != OK) errmsg = e_invarg; else if (STRCMP(p_ve, oldval) != 0) { /* Recompute cursor position in case the new 've' setting * changes something. */ validate_virtcol(); coladvance(curwin->w_virtcol); } } else if (varp == &p_csqf) { if (p_csqf != NULL) { p = p_csqf; while (*p != NUL) { if (vim_strchr((char_u *)CSQF_CMDS, *p) == NULL || p[1] == NUL || vim_strchr((char_u *)CSQF_FLAGS, p[1]) == NULL || (p[2] != NUL && p[2] != ',')) { errmsg = e_invarg; break; } else if (p[2] == NUL) break; else p += 3; } } } /* 'cinoptions' */ else if (gvarp == &p_cino) { /* TODO: recognize errors */ parse_cino(curbuf); // inccommand } else if (varp == &p_icm) { if (check_opt_strings(p_icm, p_icm_values, false) != OK) { errmsg = e_invarg; } } else if (gvarp == &p_ft) { if (!valid_filetype(*varp)) { errmsg = e_invarg; } } else if (gvarp == &p_syn) { if (!valid_filetype(*varp)) { errmsg = e_invarg; } } else { // Options that are a list of flags. p = NULL; if (varp == &p_ww) p = (char_u *)WW_ALL; if (varp == &p_shm) p = (char_u *)SHM_ALL; else if (varp == &(p_cpo)) p = (char_u *)CPO_VI; else if (varp == &(curbuf->b_p_fo)) p = (char_u *)FO_ALL; else if (varp == &curwin->w_p_cocu) p = (char_u *)COCU_ALL; else if (varp == &p_mouse) { p = (char_u *)MOUSE_ALL; } if (p != NULL) { for (s = *varp; *s; ++s) if (vim_strchr(p, *s) == NULL) { errmsg = illegal_char(errbuf, *s); break; } } } /* * If error detected, restore the previous value. */ if (errmsg != NULL) { if (new_value_alloced) free_string_option(*varp); *varp = oldval; /* * When resetting some values, need to act on it. */ if (did_chartab) (void)init_chartab(); if (varp == &p_hl) (void)highlight_changed(); } else { /* Remember where the option was set. */ set_option_scriptID_idx(opt_idx, opt_flags, current_SID); /* * Free string options that are in allocated memory. * Use "free_oldval", because recursiveness may change the flags under * our fingers (esp. init_highlight()). */ if (free_oldval) free_string_option(oldval); if (new_value_alloced) options[opt_idx].flags |= P_ALLOCED; else options[opt_idx].flags &= ~P_ALLOCED; if ((opt_flags & (OPT_LOCAL | OPT_GLOBAL)) == 0 && ((int)options[opt_idx].indir & PV_BOTH)) { /* global option with local value set to use global value; free * the local value and make it empty */ p = get_varp_scope(&(options[opt_idx]), OPT_LOCAL); free_string_option(*(char_u **)p); *(char_u **)p = empty_option; } /* May set global value for local option. */ else if (!(opt_flags & OPT_LOCAL) && opt_flags != OPT_GLOBAL) set_string_option_global(opt_idx, varp); /* * Trigger the autocommand only after setting the flags. */ /* When 'syntax' is set, load the syntax of that name */ if (varp == &(curbuf->b_p_syn)) { apply_autocmds(EVENT_SYNTAX, curbuf->b_p_syn, curbuf->b_fname, TRUE, curbuf); } else if (varp == &(curbuf->b_p_ft)) { /* 'filetype' is set, trigger the FileType autocommand */ did_filetype = TRUE; apply_autocmds(EVENT_FILETYPE, curbuf->b_p_ft, curbuf->b_fname, TRUE, curbuf); } if (varp == &(curwin->w_s->b_p_spl)) { char_u fname[200]; char_u *q = curwin->w_s->b_p_spl; /* Skip the first name if it is "cjk". */ if (STRNCMP(q, "cjk,", 4) == 0) q += 4; /* * Source the spell/LANG.vim in 'runtimepath'. * They could set 'spellcapcheck' depending on the language. * Use the first name in 'spelllang' up to '_region' or * '.encoding'. */ for (p = q; *p != NUL; ++p) if (vim_strchr((char_u *)"_.,", *p) != NULL) break; vim_snprintf((char *)fname, sizeof(fname), "spell/%.*s.vim", (int)(p - q), q); source_runtime(fname, DIP_ALL); } } if (varp == &p_mouse) { if (*p_mouse == NUL) { ui_mouse_off(); } else { setmouse(); // in case 'mouse' changed } } if (curwin->w_curswant != MAXCOL && (options[opt_idx].flags & (P_CURSWANT | P_RALL)) != 0) curwin->w_set_curswant = TRUE; check_redraw(options[opt_idx].flags); return errmsg; } /* * Simple int comparison function for use with qsort() */ static int int_cmp(const void *a, const void *b) { return *(const int *)a - *(const int *)b; } /* * Handle setting 'colorcolumn' or 'textwidth' in window "wp". * Returns error message, NULL if it's OK. */ char_u *check_colorcolumn(win_T *wp) { char_u *s; int col; unsigned int count = 0; int color_cols[256]; int j = 0; if (wp->w_buffer == NULL) return NULL; /* buffer was closed */ for (s = wp->w_p_cc; *s != NUL && count < 255; ) { if (*s == '-' || *s == '+') { /* -N and +N: add to 'textwidth' */ col = (*s == '-') ? -1 : 1; ++s; if (!ascii_isdigit(*s)) return e_invarg; col = col * getdigits_int(&s); if (wp->w_buffer->b_p_tw == 0) goto skip; /* 'textwidth' not set, skip this item */ assert((col >= 0 && wp->w_buffer->b_p_tw <= INT_MAX - col && wp->w_buffer->b_p_tw + col >= INT_MIN) || (col < 0 && wp->w_buffer->b_p_tw >= INT_MIN - col && wp->w_buffer->b_p_tw + col <= INT_MAX)); col += (int)wp->w_buffer->b_p_tw; if (col < 0) goto skip; } else if (ascii_isdigit(*s)) col = getdigits_int(&s); else return e_invarg; color_cols[count++] = col - 1; /* 1-based to 0-based */ skip: if (*s == NUL) break; if (*s != ',') return e_invarg; if (*++s == NUL) return e_invarg; /* illegal trailing comma as in "set cc=80," */ } xfree(wp->w_p_cc_cols); if (count == 0) wp->w_p_cc_cols = NULL; else { wp->w_p_cc_cols = xmalloc(sizeof(int) * (count + 1)); /* sort the columns for faster usage on screen redraw inside * win_line() */ qsort(color_cols, count, sizeof(int), int_cmp); for (unsigned int i = 0; i < count; ++i) /* skip duplicates */ if (j == 0 || wp->w_p_cc_cols[j - 1] != color_cols[i]) wp->w_p_cc_cols[j++] = color_cols[i]; wp->w_p_cc_cols[j] = -1; /* end marker */ } return NULL; /* no error */ } /* * Handle setting 'listchars' or 'fillchars'. * Returns error message, NULL if it's OK. */ static char_u *set_chars_option(char_u **varp) { int round, i, len, entries; char_u *p, *s; int c1, c2 = 0; struct charstab { int *cp; char *name; }; static struct charstab filltab[] = { {&fill_stl, "stl"}, {&fill_stlnc, "stlnc"}, {&fill_vert, "vert"}, {&fill_fold, "fold"}, {&fill_diff, "diff"}, }; static struct charstab lcstab[] = { {&lcs_eol, "eol"}, {&lcs_ext, "extends"}, {&lcs_nbsp, "nbsp"}, {&lcs_prec, "precedes"}, {&lcs_space, "space"}, {&lcs_tab2, "tab"}, {&lcs_trail, "trail"}, {&lcs_conceal, "conceal"}, }; struct charstab *tab; if (varp == &p_lcs) { tab = lcstab; entries = ARRAY_SIZE(lcstab); } else { tab = filltab; entries = ARRAY_SIZE(filltab); } /* first round: check for valid value, second round: assign values */ for (round = 0; round <= 1; ++round) { if (round > 0) { /* After checking that the value is valid: set defaults: space for * 'fillchars', NUL for 'listchars' */ for (i = 0; i < entries; ++i) if (tab[i].cp != NULL) *(tab[i].cp) = (varp == &p_lcs ? NUL : ' '); if (varp == &p_lcs) lcs_tab1 = NUL; else fill_diff = '-'; } p = *varp; while (*p) { for (i = 0; i < entries; ++i) { len = (int)STRLEN(tab[i].name); if (STRNCMP(p, tab[i].name, len) == 0 && p[len] == ':' && p[len + 1] != NUL) { s = p + len + 1; c1 = mb_ptr2char_adv(&s); if (mb_char2cells(c1) > 1) continue; if (tab[i].cp == &lcs_tab2) { if (*s == NUL) continue; c2 = mb_ptr2char_adv(&s); if (mb_char2cells(c2) > 1) continue; } if (*s == ',' || *s == NUL) { if (round) { if (tab[i].cp == &lcs_tab2) { lcs_tab1 = c1; lcs_tab2 = c2; } else if (tab[i].cp != NULL) *(tab[i].cp) = c1; } p = s; break; } } } if (i == entries) return e_invarg; if (*p == ',') ++p; } } return NULL; /* no error */ } /* * Check validity of options with the 'statusline' format. * Return error message or NULL. */ char_u *check_stl_option(char_u *s) { int itemcnt = 0; int groupdepth = 0; static char_u errbuf[80]; while (*s && itemcnt < STL_MAX_ITEM) { /* Check for valid keys after % sequences */ while (*s && *s != '%') s++; if (!*s) break; s++; if (*s != '%' && *s != ')') { itemcnt++; } if (*s == '%' || *s == STL_TRUNCMARK || *s == STL_SEPARATE) { s++; continue; } if (*s == ')') { s++; if (--groupdepth < 0) break; continue; } if (*s == '-') s++; while (ascii_isdigit(*s)) s++; if (*s == STL_USER_HL) continue; if (*s == '.') { s++; while (*s && ascii_isdigit(*s)) s++; } if (*s == '(') { groupdepth++; continue; } if (vim_strchr(STL_ALL, *s) == NULL) { return illegal_char(errbuf, *s); } if (*s == '{') { s++; while (*s != '}' && *s) s++; if (*s != '}') return (char_u *)N_("E540: Unclosed expression sequence"); } } if (itemcnt >= STL_MAX_ITEM) return (char_u *)N_("E541: too many items"); if (groupdepth != 0) return (char_u *)N_("E542: unbalanced groups"); return NULL; } static char_u *did_set_spell_option(bool is_spellfile) { char_u *errmsg = NULL; if (is_spellfile) { int l = (int)STRLEN(curwin->w_s->b_p_spf); if (l > 0 && (l < 4 || STRCMP(curwin->w_s->b_p_spf + l - 4, ".add") != 0)) { errmsg = e_invarg; } } if (errmsg == NULL) { FOR_ALL_WINDOWS_IN_TAB(wp, curtab) { if (wp->w_buffer == curbuf && wp->w_p_spell) { errmsg = did_set_spelllang(wp); break; } } } return errmsg; } /* * Set curbuf->b_cap_prog to the regexp program for 'spellcapcheck'. * Return error message when failed, NULL when OK. */ static char_u *compile_cap_prog(synblock_T *synblock) { regprog_T *rp = synblock->b_cap_prog; char_u *re; if (*synblock->b_p_spc == NUL) synblock->b_cap_prog = NULL; else { /* Prepend a ^ so that we only match at one column */ re = concat_str((char_u *)"^", synblock->b_p_spc); synblock->b_cap_prog = vim_regcomp(re, RE_MAGIC); xfree(re); if (synblock->b_cap_prog == NULL) { synblock->b_cap_prog = rp; /* restore the previous program */ return e_invarg; } } vim_regfree(rp); return NULL; } /* * Set the scriptID for an option, taking care of setting the buffer- or * window-local value. */ static void set_option_scriptID_idx(int opt_idx, int opt_flags, int id) { int both = (opt_flags & (OPT_LOCAL | OPT_GLOBAL)) == 0; int indir = (int)options[opt_idx].indir; /* Remember where the option was set. For local options need to do that * in the buffer or window structure. */ if (both || (opt_flags & OPT_GLOBAL) || (indir & (PV_BUF|PV_WIN)) == 0) options[opt_idx].scriptID = id; if (both || (opt_flags & OPT_LOCAL)) { if (indir & PV_BUF) curbuf->b_p_scriptID[indir & PV_MASK] = id; else if (indir & PV_WIN) curwin->w_p_scriptID[indir & PV_MASK] = id; } } /* * Set the value of a boolean option, and take care of side effects. * Returns NULL for success, or an error message for an error. */ static char_u * set_bool_option ( int opt_idx, /* index in options[] table */ char_u *varp, /* pointer to the option variable */ int value, /* new value */ int opt_flags /* OPT_LOCAL and/or OPT_GLOBAL */ ) { int old_value = *(int *)varp; /* Disallow changing some options from secure mode */ if ((secure || sandbox != 0) && (options[opt_idx].flags & P_SECURE)) { return e_secure; } *(int *)varp = value; /* set the new value */ /* Remember where the option was set. */ set_option_scriptID_idx(opt_idx, opt_flags, current_SID); /* May set global value for local option. */ if ((opt_flags & (OPT_LOCAL | OPT_GLOBAL)) == 0) *(int *)get_varp_scope(&(options[opt_idx]), OPT_GLOBAL) = value; // Ensure that options set to p_force_on cannot be disabled. if ((int *)varp == &p_force_on && p_force_on == FALSE) { p_force_on = TRUE; return e_unsupportedoption; } // Ensure that options set to p_force_off cannot be enabled. else if ((int *)varp == &p_force_off && p_force_off == TRUE) { p_force_off = FALSE; return e_unsupportedoption; } /* 'undofile' */ else if ((int *)varp == &curbuf->b_p_udf || (int *)varp == &p_udf) { /* Only take action when the option was set. When reset we do not * delete the undo file, the option may be set again without making * any changes in between. */ if (curbuf->b_p_udf || p_udf) { char_u hash[UNDO_HASH_SIZE]; buf_T *save_curbuf = curbuf; for (curbuf = firstbuf; curbuf != NULL; curbuf = curbuf->b_next) { /* When 'undofile' is set globally: for every buffer, otherwise * only for the current buffer: Try to read in the undofile, * if one exists, the buffer wasn't changed and the buffer was * loaded */ if ((curbuf == save_curbuf || (opt_flags & OPT_GLOBAL) || opt_flags == 0) && !curbufIsChanged() && curbuf->b_ml.ml_mfp != NULL) { u_compute_hash(hash); u_read_undo(NULL, hash, curbuf->b_fname); } } curbuf = save_curbuf; } } else if ((int *)varp == &curbuf->b_p_ro) { /* when 'readonly' is reset globally, also reset readonlymode */ if (!curbuf->b_p_ro && (opt_flags & OPT_LOCAL) == 0) readonlymode = FALSE; /* when 'readonly' is set may give W10 again */ if (curbuf->b_p_ro) curbuf->b_did_warn = false; redraw_titles(); } /* when 'modifiable' is changed, redraw the window title */ else if ((int *)varp == &curbuf->b_p_ma) { redraw_titles(); } /* when 'endofline' is changed, redraw the window title */ else if ((int *)varp == &curbuf->b_p_eol) { redraw_titles(); } else if ((int *)varp == &curbuf->b_p_fixeol) { // when 'fixeol' is changed, redraw the window title redraw_titles(); } /* when 'bomb' is changed, redraw the window title and tab page text */ else if ((int *)varp == &curbuf->b_p_bomb) { redraw_titles(); } /* when 'bin' is set also set some other options */ else if ((int *)varp == &curbuf->b_p_bin) { set_options_bin(old_value, curbuf->b_p_bin, opt_flags); redraw_titles(); } /* when 'buflisted' changes, trigger autocommands */ else if ((int *)varp == &curbuf->b_p_bl && old_value != curbuf->b_p_bl) { apply_autocmds(curbuf->b_p_bl ? EVENT_BUFADD : EVENT_BUFDELETE, NULL, NULL, TRUE, curbuf); } /* when 'swf' is set, create swapfile, when reset remove swapfile */ else if ((int *)varp == (int *)&curbuf->b_p_swf) { if (curbuf->b_p_swf && p_uc) ml_open_file(curbuf); /* create the swap file */ else /* no need to reset curbuf->b_may_swap, ml_open_file() will check * buf->b_p_swf */ mf_close_file(curbuf, true); /* remove the swap file */ } /* when 'terse' is set change 'shortmess' */ else if ((int *)varp == &p_terse) { char_u *p; p = vim_strchr(p_shm, SHM_SEARCH); /* insert 's' in p_shm */ if (p_terse && p == NULL) { STRCPY(IObuff, p_shm); STRCAT(IObuff, "s"); set_string_option_direct((char_u *)"shm", -1, IObuff, OPT_FREE, 0); } /* remove 's' from p_shm */ else if (!p_terse && p != NULL) STRMOVE(p, p + 1); } /* when 'paste' is set or reset also change other options */ else if ((int *)varp == &p_paste) { paste_option_changed(); } /* when 'insertmode' is set from an autocommand need to do work here */ else if ((int *)varp == &p_im) { if (p_im) { if ((State & INSERT) == 0) { need_start_insertmode = true; } stop_insert_mode = false; } else if (old_value) { // only reset if it was set previously need_start_insertmode = false; stop_insert_mode = true; if (restart_edit != 0 && mode_displayed) { clear_cmdline = true; // remove "(insert)" } restart_edit = 0; } } /* when 'ignorecase' is set or reset and 'hlsearch' is set, redraw */ else if ((int *)varp == &p_ic && p_hls) { redraw_all_later(SOME_VALID); } /* when 'hlsearch' is set or reset: reset no_hlsearch */ else if ((int *)varp == &p_hls) { SET_NO_HLSEARCH(FALSE); } /* when 'scrollbind' is set: snapshot the current position to avoid a jump * at the end of normal_cmd() */ else if ((int *)varp == &curwin->w_p_scb) { if (curwin->w_p_scb) { do_check_scrollbind(FALSE); curwin->w_scbind_pos = curwin->w_topline; } } /* There can be only one window with 'previewwindow' set. */ else if ((int *)varp == &curwin->w_p_pvw) { if (curwin->w_p_pvw) { FOR_ALL_WINDOWS_IN_TAB(win, curtab) { if (win->w_p_pvw && win != curwin) { curwin->w_p_pvw = FALSE; return (char_u *)N_("E590: A preview window already exists"); } } } } else if (varp == (char_u *)&(curbuf->b_p_lisp)) { // When 'lisp' option changes include/exclude '-' in // keyword characters. (void)buf_init_chartab(curbuf, false); // ignore errors } else if ((int *)varp == &p_title) { // when 'title' changed, may need to change the title; same for 'icon' did_set_title(false); } else if ((int *)varp == &p_icon) { did_set_title(true); } else if ((int *)varp == &curbuf->b_changed) { if (!value) { save_file_ff(curbuf); // Buffer is unchanged } redraw_titles(); modified_was_set = value; } #ifdef BACKSLASH_IN_FILENAME else if ((int *)varp == &p_ssl) { if (p_ssl) { psepc = '/'; psepcN = '\\'; pseps[0] = '/'; } else { psepc = '\\'; psepcN = '/'; pseps[0] = '\\'; } /* need to adjust the file name arguments and buffer names. */ buflist_slash_adjust(); alist_slash_adjust(); scriptnames_slash_adjust(); } #endif /* If 'wrap' is set, set w_leftcol to zero. */ else if ((int *)varp == &curwin->w_p_wrap) { if (curwin->w_p_wrap) curwin->w_leftcol = 0; } else if ((int *)varp == &p_ea) { if (p_ea && !old_value) { win_equal(curwin, false, 0); } } else if ((int *)varp == &p_acd) { // Change directories when the 'acd' option is set now. do_autochdir(); } /* 'diff' */ else if ((int *)varp == &curwin->w_p_diff) { /* May add or remove the buffer from the list of diff buffers. */ diff_buf_adjust(curwin); if (foldmethodIsDiff(curwin)) foldUpdateAll(curwin); } /* 'spell' */ else if ((int *)varp == &curwin->w_p_spell) { if (curwin->w_p_spell) { char_u *errmsg = did_set_spelllang(curwin); if (errmsg != NULL) EMSG(_(errmsg)); } } else if ((int *)varp == &p_altkeymap) { if (old_value != p_altkeymap) { if (!p_altkeymap) { p_hkmap = p_fkmap; p_fkmap = 0; } else { p_fkmap = p_hkmap; p_hkmap = 0; } (void)init_chartab(); } } /* * In case some second language keymapping options have changed, check * and correct the setting in a consistent way. */ /* * If hkmap or fkmap are set, reset Arabic keymapping. */ if ((p_hkmap || p_fkmap) && p_altkeymap) { p_altkeymap = p_fkmap; curwin->w_p_arab = FALSE; (void)init_chartab(); } /* * If hkmap set, reset Farsi keymapping. */ if (p_hkmap && p_altkeymap) { p_altkeymap = 0; p_fkmap = 0; curwin->w_p_arab = FALSE; (void)init_chartab(); } /* * If fkmap set, reset Hebrew keymapping. */ if (p_fkmap && !p_altkeymap) { p_altkeymap = 1; p_hkmap = 0; curwin->w_p_arab = FALSE; (void)init_chartab(); } if ((int *)varp == &curwin->w_p_arab) { if (curwin->w_p_arab) { /* * 'arabic' is set, handle various sub-settings. */ if (!p_tbidi) { /* set rightleft mode */ if (!curwin->w_p_rl) { curwin->w_p_rl = TRUE; changed_window_setting(); } /* Enable Arabic shaping (major part of what Arabic requires) */ if (!p_arshape) { p_arshape = TRUE; redraw_later_clear(); } } /* Arabic requires a utf-8 encoding, inform the user if its not * set. */ if (STRCMP(p_enc, "utf-8") != 0) { static char *w_arabic = N_( "W17: Arabic requires UTF-8, do ':set encoding=utf-8'"); msg_source(hl_attr(HLF_W)); MSG_ATTR(_(w_arabic), hl_attr(HLF_W)); set_vim_var_string(VV_WARNINGMSG, _(w_arabic), -1); } /* set 'delcombine' */ p_deco = TRUE; /* Force-set the necessary keymap for arabic */ set_option_value((char_u *)"keymap", 0L, (char_u *)"arabic", OPT_LOCAL); p_altkeymap = 0; p_hkmap = 0; p_fkmap = 0; (void)init_chartab(); } else { /* * 'arabic' is reset, handle various sub-settings. */ if (!p_tbidi) { /* reset rightleft mode */ if (curwin->w_p_rl) { curwin->w_p_rl = FALSE; changed_window_setting(); } /* 'arabicshape' isn't reset, it is a global option and * another window may still need it "on". */ } /* 'delcombine' isn't reset, it is a global option and another * window may still want it "on". */ /* Revert to the default keymap */ curbuf->b_p_iminsert = B_IMODE_NONE; curbuf->b_p_imsearch = B_IMODE_USE_INSERT; } } /* * End of handling side effects for bool options. */ // after handling side effects, call autocommand options[opt_idx].flags |= P_WAS_SET; if (!starting) { char buf_old[2]; char buf_new[2]; char buf_type[7]; vim_snprintf(buf_old, ARRAY_SIZE(buf_old), "%d", old_value ? true: false); vim_snprintf(buf_new, ARRAY_SIZE(buf_new), "%d", value ? true: false); vim_snprintf(buf_type, ARRAY_SIZE(buf_type), "%s", (opt_flags & OPT_LOCAL) ? "local" : "global"); set_vim_var_string(VV_OPTION_NEW, buf_new, -1); set_vim_var_string(VV_OPTION_OLD, buf_old, -1); set_vim_var_string(VV_OPTION_TYPE, buf_type, -1); apply_autocmds(EVENT_OPTIONSET, (char_u *) options[opt_idx].fullname, NULL, false, NULL); reset_v_option_vars(); } comp_col(); /* in case 'ruler' or 'showcmd' changed */ if (curwin->w_curswant != MAXCOL && (options[opt_idx].flags & (P_CURSWANT | P_RALL)) != 0) curwin->w_set_curswant = TRUE; check_redraw(options[opt_idx].flags); return NULL; } /* * Set the value of a number option, and take care of side effects. * Returns NULL for success, or an error message for an error. */ static char_u * set_num_option ( int opt_idx, /* index in options[] table */ char_u *varp, /* pointer to the option variable */ long value, /* new value */ char_u *errbuf, /* buffer for error messages */ size_t errbuflen, /* length of "errbuf" */ int opt_flags /* OPT_LOCAL, OPT_GLOBAL and OPT_MODELINE */ ) { char_u *errmsg = NULL; long old_value = *(long *)varp; long old_Rows = Rows; /* remember old Rows */ long old_Columns = Columns; /* remember old Columns */ long *pp = (long *)varp; /* Disallow changing some options from secure mode. */ if ((secure || sandbox != 0) && (options[opt_idx].flags & P_SECURE)) { return e_secure; } *pp = value; /* Remember where the option was set. */ set_option_scriptID_idx(opt_idx, opt_flags, current_SID); if (curbuf->b_p_sw < 0) { errmsg = e_positive; curbuf->b_p_sw = curbuf->b_p_ts; } /* * Number options that need some action when changed */ if (pp == &p_wh || pp == &p_hh) { if (p_wh < 1) { errmsg = e_positive; p_wh = 1; } if (p_wmh > p_wh) { errmsg = e_winheight; p_wh = p_wmh; } if (p_hh < 0) { errmsg = e_positive; p_hh = 0; } /* Change window height NOW */ if (lastwin != firstwin) { if (pp == &p_wh && curwin->w_height < p_wh) win_setheight((int)p_wh); if (pp == &p_hh && curbuf->b_help && curwin->w_height < p_hh) win_setheight((int)p_hh); } } /* 'winminheight' */ else if (pp == &p_wmh) { if (p_wmh < 0) { errmsg = e_positive; p_wmh = 0; } if (p_wmh > p_wh) { errmsg = e_winheight; p_wmh = p_wh; } win_setminheight(); } else if (pp == &p_wiw) { if (p_wiw < 1) { errmsg = e_positive; p_wiw = 1; } if (p_wmw > p_wiw) { errmsg = e_winwidth; p_wiw = p_wmw; } /* Change window width NOW */ if (lastwin != firstwin && curwin->w_width < p_wiw) win_setwidth((int)p_wiw); } /* 'winminwidth' */ else if (pp == &p_wmw) { if (p_wmw < 0) { errmsg = e_positive; p_wmw = 0; } if (p_wmw > p_wiw) { errmsg = e_winwidth; p_wmw = p_wiw; } win_setminheight(); } else if (pp == &p_ls) { /* (re)set last window status line */ last_status(false); } /* (re)set tab page line */ else if (pp == &p_stal) { shell_new_rows(); /* recompute window positions and heights */ } /* 'foldlevel' */ else if (pp == &curwin->w_p_fdl) { if (curwin->w_p_fdl < 0) curwin->w_p_fdl = 0; newFoldLevel(); } /* 'foldminlines' */ else if (pp == &curwin->w_p_fml) { foldUpdateAll(curwin); } /* 'foldnestmax' */ else if (pp == &curwin->w_p_fdn) { if (foldmethodIsSyntax(curwin) || foldmethodIsIndent(curwin)) foldUpdateAll(curwin); } /* 'foldcolumn' */ else if (pp == &curwin->w_p_fdc) { if (curwin->w_p_fdc < 0) { errmsg = e_positive; curwin->w_p_fdc = 0; } else if (curwin->w_p_fdc > 12) { errmsg = e_invarg; curwin->w_p_fdc = 12; } // 'shiftwidth' or 'tabstop' } else if (pp == &curbuf->b_p_sw || pp == (long *)&curbuf->b_p_ts) { if (foldmethodIsIndent(curwin)) { foldUpdateAll(curwin); } // When 'shiftwidth' changes, or it's zero and 'tabstop' changes: // parse 'cinoptions'. if (pp == &curbuf->b_p_sw || curbuf->b_p_sw == 0) { parse_cino(curbuf); } } /* 'maxcombine' */ else if (pp == &p_mco) { if (p_mco > MAX_MCO) p_mco = MAX_MCO; else if (p_mco < 0) p_mco = 0; screenclear(); /* will re-allocate the screen */ } else if (pp == &curbuf->b_p_iminsert) { if (curbuf->b_p_iminsert < 0 || curbuf->b_p_iminsert > B_IMODE_LAST) { errmsg = e_invarg; curbuf->b_p_iminsert = B_IMODE_NONE; } p_iminsert = curbuf->b_p_iminsert; showmode(); /* Show/unshow value of 'keymap' in status lines. */ status_redraw_curbuf(); } else if (pp == &p_window) { if (p_window < 1) p_window = 1; else if (p_window >= Rows) p_window = Rows - 1; } else if (pp == &curbuf->b_p_imsearch) { if (curbuf->b_p_imsearch < -1 || curbuf->b_p_imsearch > B_IMODE_LAST) { errmsg = e_invarg; curbuf->b_p_imsearch = B_IMODE_NONE; } p_imsearch = curbuf->b_p_imsearch; } /* if 'titlelen' has changed, redraw the title */ else if (pp == &p_titlelen) { if (p_titlelen < 0) { errmsg = e_positive; p_titlelen = 85; } if (starting != NO_SCREEN && old_value != p_titlelen) need_maketitle = TRUE; } /* if p_ch changed value, change the command line height */ else if (pp == &p_ch) { if (p_ch < 1) { errmsg = e_positive; p_ch = 1; } if (p_ch > Rows - min_rows() + 1) p_ch = Rows - min_rows() + 1; /* Only compute the new window layout when startup has been * completed. Otherwise the frame sizes may be wrong. */ if (p_ch != old_value && full_screen ) command_height(); } /* when 'updatecount' changes from zero to non-zero, open swap files */ else if (pp == &p_uc) { if (p_uc < 0) { errmsg = e_positive; p_uc = 100; } if (p_uc && !old_value) ml_open_files(); } else if (pp == &curwin->w_p_cole) { if (curwin->w_p_cole < 0) { errmsg = e_positive; curwin->w_p_cole = 0; } else if (curwin->w_p_cole > 3) { errmsg = e_invarg; curwin->w_p_cole = 3; } } /* sync undo before 'undolevels' changes */ else if (pp == &p_ul) { /* use the old value, otherwise u_sync() may not work properly */ p_ul = old_value; u_sync(TRUE); p_ul = value; } else if (pp == &curbuf->b_p_ul) { /* use the old value, otherwise u_sync() may not work properly */ curbuf->b_p_ul = old_value; u_sync(TRUE); curbuf->b_p_ul = value; } /* 'numberwidth' must be positive */ else if (pp == &curwin->w_p_nuw) { if (curwin->w_p_nuw < 1) { errmsg = e_positive; curwin->w_p_nuw = 1; } if (curwin->w_p_nuw > 10) { errmsg = e_invarg; curwin->w_p_nuw = 10; } curwin->w_nrwidth_line_count = 0; } else if (pp == &curbuf->b_p_tw) { if (curbuf->b_p_tw < 0) { errmsg = e_positive; curbuf->b_p_tw = 0; } FOR_ALL_TAB_WINDOWS(tp, wp) { check_colorcolumn(wp); } } /* * Check the bounds for numeric options here */ if (Rows < min_rows() && full_screen) { if (errbuf != NULL) { vim_snprintf((char *)errbuf, errbuflen, _("E593: Need at least %d lines"), min_rows()); errmsg = errbuf; } Rows = min_rows(); } if (Columns < MIN_COLUMNS && full_screen) { if (errbuf != NULL) { vim_snprintf((char *)errbuf, errbuflen, _("E594: Need at least %d columns"), MIN_COLUMNS); errmsg = errbuf; } Columns = MIN_COLUMNS; } limit_screen_size(); /* * If the screen (shell) height has been changed, assume it is the * physical screenheight. */ if (old_Rows != Rows || old_Columns != Columns) { /* Changing the screen size is not allowed while updating the screen. */ if (updating_screen) { *pp = old_value; } else if (full_screen) { screen_resize((int)Columns, (int)Rows); } else { /* Postpone the resizing; check the size and cmdline position for * messages. */ check_shellsize(); if (cmdline_row > Rows - p_ch && Rows > p_ch) { assert(p_ch >= 0 && Rows - p_ch <= INT_MAX); cmdline_row = (int)(Rows - p_ch); } } if (p_window >= Rows || !option_was_set((char_u *)"window")) p_window = Rows - 1; } if (curbuf->b_p_ts <= 0) { errmsg = e_positive; curbuf->b_p_ts = 8; } if (p_tm < 0) { errmsg = e_positive; p_tm = 0; } if ((curwin->w_p_scr <= 0 || (curwin->w_p_scr > curwin->w_height && curwin->w_height > 0)) && full_screen) { if (pp == &(curwin->w_p_scr)) { if (curwin->w_p_scr != 0) errmsg = e_scroll; win_comp_scroll(curwin); } /* If 'scroll' became invalid because of a side effect silently adjust * it. */ else if (curwin->w_p_scr <= 0) curwin->w_p_scr = 1; else /* curwin->w_p_scr > curwin->w_height */ curwin->w_p_scr = curwin->w_height; } if (p_hi < 0) { errmsg = e_positive; p_hi = 0; } else if (p_hi > 10000) { errmsg = e_invarg; p_hi = 10000; } if (p_re < 0 || p_re > 2) { errmsg = e_invarg; p_re = 0; } if (p_report < 0) { errmsg = e_positive; p_report = 1; } if ((p_sj < -100 || p_sj >= Rows) && full_screen) { if (Rows != old_Rows) /* Rows changed, just adjust p_sj */ p_sj = Rows / 2; else { errmsg = e_scroll; p_sj = 1; } } if (p_so < 0 && full_screen) { errmsg = e_scroll; p_so = 0; } if (p_siso < 0 && full_screen) { errmsg = e_positive; p_siso = 0; } if (p_cwh < 1) { errmsg = e_positive; p_cwh = 1; } if (p_ut < 0) { errmsg = e_positive; p_ut = 2000; } if (p_ss < 0) { errmsg = e_positive; p_ss = 0; } /* May set global value for local option. */ if ((opt_flags & (OPT_LOCAL | OPT_GLOBAL)) == 0) *(long *)get_varp_scope(&(options[opt_idx]), OPT_GLOBAL) = *pp; options[opt_idx].flags |= P_WAS_SET; if (!starting && errmsg == NULL) { char buf_old[NUMBUFLEN]; char buf_new[NUMBUFLEN]; char buf_type[7]; vim_snprintf(buf_old, ARRAY_SIZE(buf_old), "%ld", old_value); vim_snprintf(buf_new, ARRAY_SIZE(buf_new), "%ld", value); vim_snprintf(buf_type, ARRAY_SIZE(buf_type), "%s", (opt_flags & OPT_LOCAL) ? "local" : "global"); set_vim_var_string(VV_OPTION_NEW, buf_new, -1); set_vim_var_string(VV_OPTION_OLD, buf_old, -1); set_vim_var_string(VV_OPTION_TYPE, buf_type, -1); apply_autocmds(EVENT_OPTIONSET, (char_u *) options[opt_idx].fullname, NULL, false, NULL); reset_v_option_vars(); } comp_col(); /* in case 'columns' or 'ls' changed */ if (curwin->w_curswant != MAXCOL && (options[opt_idx].flags & (P_CURSWANT | P_RALL)) != 0) curwin->w_set_curswant = TRUE; check_redraw(options[opt_idx].flags); return errmsg; } /* * Called after an option changed: check if something needs to be redrawn. */ static void check_redraw(uint32_t flags) { /* Careful: P_RCLR and P_RALL are a combination of other P_ flags */ bool doclear = (flags & P_RCLR) == P_RCLR; bool all = ((flags & P_RALL) == P_RALL || doclear); if ((flags & P_RSTAT) || all) /* mark all status lines dirty */ status_redraw_all(); if ((flags & P_RBUF) || (flags & P_RWIN) || all) changed_window_setting(); if (flags & P_RBUF) redraw_curbuf_later(NOT_VALID); if (doclear) redraw_all_later(CLEAR); else if (all) redraw_all_later(NOT_VALID); } /// Find index for named option /// /// @param[in] arg Option to find index for. /// @param[in] len Length of the option. /// /// @return Index of the option or -1 if option was not found. int findoption_len(const char_u *const arg, const size_t len) { char *s, *p; static int quick_tab[27] = { 0, 0 }; // quick access table int is_term_opt; /* * For first call: Initialize the quick-access table. * It contains the index for the first option that starts with a certain * letter. There are 26 letters, plus the first "t_" option. */ if (quick_tab[1] == 0) { p = options[0].fullname; for (short int i = 1; (s = options[i].fullname) != NULL; i++) { if (s[0] != p[0]) { if (s[0] == 't' && s[1] == '_') quick_tab[26] = i; else quick_tab[CharOrdLow(s[0])] = i; } p = s; } } /* * Check for name starting with an illegal character. */ if (len == 0 || arg[0] < 'a' || arg[0] > 'z') { return -1; } int opt_idx; is_term_opt = (len > 2 && arg[0] == 't' && arg[1] == '_'); if (is_term_opt) { opt_idx = quick_tab[26]; } else { opt_idx = quick_tab[CharOrdLow(arg[0])]; } // Match full name for (; (s = options[opt_idx].fullname) != NULL; opt_idx++) { if (STRNCMP(arg, s, len) == 0 && s[len] == NUL) { break; } } if (s == NULL && !is_term_opt) { opt_idx = quick_tab[CharOrdLow(arg[0])]; // Match short name for (; options[opt_idx].fullname != NULL; opt_idx++) { s = options[opt_idx].shortname; if (s != NULL && STRNCMP(arg, s, len) == 0 && s[len] == NUL) { break; } s = NULL; } } if (s == NULL) opt_idx = -1; return opt_idx; } bool is_tty_option(char *name) { return (name[0] == 't' && name[1] == '_') || !strcmp((char *)name, "term"); } #define TCO_BUFFER_SIZE 8 bool get_tty_option(char *name, char **value) { if (!strcmp(name, "t_Co")) { if (value) { if (t_colors <= 1) { *value = xstrdup(""); } else { *value = xmalloc(TCO_BUFFER_SIZE); snprintf(*value, TCO_BUFFER_SIZE, "%d", t_colors); } } return true; } if (!strcmp(name, "term") || !strcmp(name, "ttytype")) { if (value) { *value = xstrdup("nvim"); } return true; } if (is_tty_option(name)) { if (value) { // XXX: All other t_* options were removed in 3baba1e7. *value = xstrdup(""); } return true; } return false; } bool set_tty_option(char *name, char *value) { if (!strcmp(name, "t_Co")) { int colors = atoi(value); // Only reinitialize colors if t_Co value has really changed to // avoid expensive reload of colorscheme if t_Co is set to the // same value multiple times if (colors != t_colors) { t_colors = colors; // We now have a different color setup, initialize it again. init_highlight(TRUE, FALSE); } return true; } return is_tty_option(name) || !strcmp(name, "term") || !strcmp(name, "ttytype"); } /* * Find index for option 'arg'. * Return -1 if not found. */ static int findoption(char_u *arg) { return findoption_len(arg, STRLEN(arg)); } /* * Get the value for an option. * * Returns: * Number or Toggle option: 1, *numval gets value. * String option: 0, *stringval gets allocated string. * Hidden Number or Toggle option: -1. * hidden String option: -2. * unknown option: -3. */ int get_option_value ( char_u *name, long *numval, char_u **stringval, /* NULL when only checking existence */ int opt_flags ) { if (get_tty_option((char *)name, (char **)stringval)) { return 0; } int opt_idx; char_u *varp; opt_idx = findoption(name); if (opt_idx < 0) /* unknown option */ return -3; varp = get_varp_scope(&(options[opt_idx]), opt_flags); if (options[opt_idx].flags & P_STRING) { if (varp == NULL) /* hidden option */ return -2; if (stringval != NULL) { *stringval = vim_strsave(*(char_u **)(varp)); } return 0; } if (varp == NULL) /* hidden option */ return -1; if (options[opt_idx].flags & P_NUM) *numval = *(long *)varp; else { /* Special case: 'modified' is b_changed, but we also want to consider * it set when 'ff' or 'fenc' changed. */ if ((int *)varp == &curbuf->b_changed) { *numval = curbufIsChanged(); } else { *numval = *(int *)varp; } } return 1; } // Returns the option attributes and its value. Unlike the above function it // will return either global value or local value of the option depending on // what was requested, but it will never return global value if it was // requested to return local one and vice versa. Neither it will return // buffer-local value if it was requested to return window-local one. // // Pretends that option is absent if it is not present in the requested scope // (i.e. has no global, window-local or buffer-local value depending on // opt_type). Uses // // Returned flags: // 0 hidden or unknown option, also option that does not have requested // type (see SREQ_* in option_defs.h) // see SOPT_* in option_defs.h for other flags // // Possible opt_type values: see SREQ_* in option_defs.h int get_option_value_strict(char *name, int64_t *numval, char **stringval, int opt_type, void *from) { if (get_tty_option(name, stringval)) { return SOPT_STRING | SOPT_GLOBAL; } char_u *varp = NULL; vimoption_T *p; int rv = 0; int opt_idx = findoption((uint8_t *)name); if (opt_idx < 0) { return 0; } p = &(options[opt_idx]); // Hidden option if (p->var == NULL) { return 0; } if (p->flags & P_BOOL) { rv |= SOPT_BOOL; } else if (p->flags & P_NUM) { rv |= SOPT_NUM; } else if (p->flags & P_STRING) { rv |= SOPT_STRING; } if (p->indir == PV_NONE) { if (opt_type == SREQ_GLOBAL) rv |= SOPT_GLOBAL; else return 0; // Did not request global-only option } else { if (p->indir & PV_BOTH) { rv |= SOPT_GLOBAL; } else if (opt_type == SREQ_GLOBAL) { return 0; // Requested global option } if (p->indir & PV_WIN) { if (opt_type == SREQ_BUF) { return 0; // Did not request window-local option } else { rv |= SOPT_WIN; } } else if (p->indir & PV_BUF) { if (opt_type == SREQ_WIN) { return 0; // Did not request buffer-local option } else { rv |= SOPT_BUF; } } } if (stringval == NULL) { return rv; } if (opt_type == SREQ_GLOBAL) { varp = p->var; } else { if (opt_type == SREQ_BUF) { // Special case: 'modified' is b_changed, but we also want to // consider it set when 'ff' or 'fenc' changed. if (p->indir == PV_MOD) { *numval = bufIsChanged((buf_T *) from); varp = NULL; } else { aco_save_T aco; aucmd_prepbuf(&aco, (buf_T *) from); varp = get_varp(p); aucmd_restbuf(&aco); } } else if (opt_type == SREQ_WIN) { win_T *save_curwin; save_curwin = curwin; curwin = (win_T *) from; curbuf = curwin->w_buffer; varp = get_varp(p); curwin = save_curwin; curbuf = curwin->w_buffer; } if (varp == p->var) { return (rv | SOPT_UNSET); } } if (varp != NULL) { if (p->flags & P_STRING) { *stringval = xstrdup(*(char **)(varp)); } else if (p->flags & P_NUM) { *numval = *(long *) varp; } else { *numval = *(int *)varp; } } return rv; } /* * Set the value of option "name". * Use "string" for string options, use "number" for other options. * * Returns NULL on success or error message on error. */ char_u * set_option_value ( char_u *name, long number, char_u *string, int opt_flags /* OPT_LOCAL or 0 (both) */ ) { if (set_tty_option((char *)name, (char *)string)) { return NULL; } int opt_idx; char_u *varp; opt_idx = findoption(name); if (opt_idx < 0) EMSG2(_("E355: Unknown option: %s"), name); else { uint32_t flags = options[opt_idx].flags; // Disallow changing some options in the sandbox if (sandbox > 0 && (flags & P_SECURE)) { EMSG(_(e_sandbox)); return NULL; } if (flags & P_STRING) { const char *s = (const char *)string; if (s == NULL) { s = ""; } return (char_u *)set_string_option(opt_idx, s, opt_flags); } else { varp = get_varp_scope(&(options[opt_idx]), opt_flags); if (varp != NULL) { /* hidden option is not changed */ if (number == 0 && string != NULL) { int idx; // Either we are given a string or we are setting option // to zero. for (idx = 0; string[idx] == '0'; idx++) {} if (string[idx] != NUL || idx == 0) { // There's another character after zeros or the string // is empty. In both cases, we are trying to set a // num option using a string. EMSG3(_("E521: Number required: &%s = '%s'"), name, string); return NULL; // do nothing as we hit an error } } if (flags & P_NUM) return set_num_option(opt_idx, varp, number, NULL, 0, opt_flags); else return set_bool_option(opt_idx, varp, (int)number, opt_flags); } } } return NULL; } char_u *get_highlight_default(void) { int i; i = findoption((char_u *)"hl"); if (i >= 0) return options[i].def_val[VI_DEFAULT]; return (char_u *)NULL; } /* * Translate a string like "t_xx", "<t_xx>" or "<S-Tab>" to a key number. */ int find_key_option_len(const char_u *arg, size_t len) { int key; int modifiers; // Don't use get_special_key_code() for t_xx, we don't want it to call // add_termcap_entry(). if (len >= 4 && arg[0] == 't' && arg[1] == '_') { key = TERMCAP2KEY(arg[2], arg[3]); } else { arg--; // put arg at the '<' modifiers = 0; key = find_special_key(&arg, len + 1, &modifiers, true, true); if (modifiers) { // can't handle modifiers here key = 0; } } return key; } static int find_key_option(const char_u *arg) { return find_key_option_len(arg, STRLEN(arg)); } /* * if 'all' == 0: show changed options * if 'all' == 1: show all normal options */ static void showoptions ( int all, int opt_flags /* OPT_LOCAL and/or OPT_GLOBAL */ ) { vimoption_T *p; int col; char_u *varp; int item_count; int run; int row, rows; int cols; int i; int len; #define INC 20 #define GAP 3 vimoption_T **items = xmalloc(sizeof(vimoption_T *) * PARAM_COUNT); /* Highlight title */ if (all == 2) MSG_PUTS_TITLE(_("\n--- Terminal codes ---")); else if (opt_flags & OPT_GLOBAL) MSG_PUTS_TITLE(_("\n--- Global option values ---")); else if (opt_flags & OPT_LOCAL) MSG_PUTS_TITLE(_("\n--- Local option values ---")); else MSG_PUTS_TITLE(_("\n--- Options ---")); /* * do the loop two times: * 1. display the short items * 2. display the long items (only strings and numbers) */ for (run = 1; run <= 2 && !got_int; ++run) { /* * collect the items in items[] */ item_count = 0; for (p = &options[0]; p->fullname != NULL; p++) { varp = NULL; if (opt_flags != 0) { if (p->indir != PV_NONE) varp = get_varp_scope(p, opt_flags); } else varp = get_varp(p); if (varp != NULL && (all == 1 || (all == 0 && !optval_default(p, varp)))) { if (p->flags & P_BOOL) len = 1; /* a toggle option fits always */ else { option_value2string(p, opt_flags); len = (int)STRLEN(p->fullname) + vim_strsize(NameBuff) + 1; } if ((len <= INC - GAP && run == 1) || (len > INC - GAP && run == 2)) { items[item_count++] = p; } } } /* * display the items */ if (run == 1) { assert(Columns <= LONG_MAX - GAP && Columns + GAP >= LONG_MIN + 3 && (Columns + GAP - 3) / INC >= INT_MIN && (Columns + GAP - 3) / INC <= INT_MAX); cols = (int)((Columns + GAP - 3) / INC); if (cols == 0) cols = 1; rows = (item_count + cols - 1) / cols; } else /* run == 2 */ rows = item_count; for (row = 0; row < rows && !got_int; ++row) { msg_putchar('\n'); /* go to next line */ if (got_int) /* 'q' typed in more */ break; col = 0; for (i = row; i < item_count; i += rows) { msg_col = col; /* make columns */ showoneopt(items[i], opt_flags); col += INC; } ui_flush(); os_breakcheck(); } } xfree(items); } /* * Return TRUE if option "p" has its default value. */ static int optval_default(vimoption_T *p, char_u *varp) { int dvi; if (varp == NULL) return TRUE; /* hidden option is always at default */ dvi = ((p->flags & P_VI_DEF) || p_cp) ? VI_DEFAULT : VIM_DEFAULT; if (p->flags & P_NUM) return *(long *)varp == (long)p->def_val[dvi]; if (p->flags & P_BOOL) return *(int *)varp == (int)(intptr_t)p->def_val[dvi]; /* P_STRING */ return STRCMP(*(char_u **)varp, p->def_val[dvi]) == 0; } /* * showoneopt: show the value of one option * must not be called with a hidden option! */ static void showoneopt ( vimoption_T *p, int opt_flags /* OPT_LOCAL or OPT_GLOBAL */ ) { char_u *varp; int save_silent = silent_mode; silent_mode = FALSE; info_message = TRUE; /* use mch_msg(), not mch_errmsg() */ varp = get_varp_scope(p, opt_flags); // for 'modified' we also need to check if 'ff' or 'fenc' changed. if ((p->flags & P_BOOL) && ((int *)varp == &curbuf->b_changed ? !curbufIsChanged() : !*(int *)varp)) { MSG_PUTS("no"); } else if ((p->flags & P_BOOL) && *(int *)varp < 0) { MSG_PUTS("--"); } else { MSG_PUTS(" "); } MSG_PUTS(p->fullname); if (!(p->flags & P_BOOL)) { msg_putchar('='); /* put value string in NameBuff */ option_value2string(p, opt_flags); msg_outtrans(NameBuff); } silent_mode = save_silent; info_message = FALSE; } /* * Write modified options as ":set" commands to a file. * * There are three values for "opt_flags": * OPT_GLOBAL: Write global option values and fresh values of * buffer-local options (used for start of a session * file). * OPT_GLOBAL + OPT_LOCAL: Idem, add fresh values of window-local options for * curwin (used for a vimrc file). * OPT_LOCAL: Write buffer-local option values for curbuf, fresh * and local values for window-local options of * curwin. Local values are also written when at the * default value, because a modeline or autocommand * may have set them when doing ":edit file" and the * user has set them back at the default or fresh * value. * When "local_only" is TRUE, don't write fresh * values, only local values (for ":mkview"). * (fresh value = value used for a new buffer or window for a local option). * * Return FAIL on error, OK otherwise. */ int makeset(FILE *fd, int opt_flags, int local_only) { vimoption_T *p; char_u *varp; /* currently used value */ char_u *varp_fresh; /* local value */ char_u *varp_local = NULL; /* fresh value */ char *cmd; int round; int pri; /* * Some options are never written: * - Options that don't have a default (terminal name, columns, lines). * - Terminal options. * - Hidden options. * * Do the loop over "options[]" twice: once for options with the * P_PRI_MKRC flag and once without. */ for (pri = 1; pri >= 0; --pri) { for (p = &options[0]; p->fullname; p++) if (!(p->flags & P_NO_MKRC) && ((pri == 1) == ((p->flags & P_PRI_MKRC) != 0))) { /* skip global option when only doing locals */ if (p->indir == PV_NONE && !(opt_flags & OPT_GLOBAL)) continue; /* Do not store options like 'bufhidden' and 'syntax' in a vimrc * file, they are always buffer-specific. */ if ((opt_flags & OPT_GLOBAL) && (p->flags & P_NOGLOB)) continue; varp = get_varp_scope(p, opt_flags); /* Hidden options are never written. */ if (!varp) continue; /* Global values are only written when not at the default value. */ if ((opt_flags & OPT_GLOBAL) && optval_default(p, varp)) continue; round = 2; if (p->indir != PV_NONE) { if (p->var == VAR_WIN) { /* skip window-local option when only doing globals */ if (!(opt_flags & OPT_LOCAL)) continue; /* When fresh value of window-local option is not at the * default, need to write it too. */ if (!(opt_flags & OPT_GLOBAL) && !local_only) { varp_fresh = get_varp_scope(p, OPT_GLOBAL); if (!optval_default(p, varp_fresh)) { round = 1; varp_local = varp; varp = varp_fresh; } } } } /* Round 1: fresh value for window-local options. * Round 2: other values */ for (; round <= 2; varp = varp_local, ++round) { if (round == 1 || (opt_flags & OPT_GLOBAL)) cmd = "set"; else cmd = "setlocal"; if (p->flags & P_BOOL) { if (put_setbool(fd, cmd, p->fullname, *(int *)varp) == FAIL) return FAIL; } else if (p->flags & P_NUM) { if (put_setnum(fd, cmd, p->fullname, (long *)varp) == FAIL) return FAIL; } else { /* P_STRING */ int do_endif = FALSE; // Don't set 'syntax' and 'filetype' again if the value is // already right, avoids reloading the syntax file. if (p->indir == PV_SYN || p->indir == PV_FT) { if (fprintf(fd, "if &%s != '%s'", p->fullname, *(char_u **)(varp)) < 0 || put_eol(fd) < 0) { return FAIL; } do_endif = true; } if (put_setstring(fd, cmd, p->fullname, (char_u **)varp, (p->flags & P_EXPAND) != 0) == FAIL) return FAIL; if (do_endif) { if (put_line(fd, "endif") == FAIL) return FAIL; } } } } } return OK; } /* * Generate set commands for the local fold options only. Used when * 'sessionoptions' or 'viewoptions' contains "folds" but not "options". */ int makefoldset(FILE *fd) { if (put_setstring(fd, "setlocal", "fdm", &curwin->w_p_fdm, FALSE) == FAIL || put_setstring(fd, "setlocal", "fde", &curwin->w_p_fde, FALSE) == FAIL || put_setstring(fd, "setlocal", "fmr", &curwin->w_p_fmr, FALSE) == FAIL || put_setstring(fd, "setlocal", "fdi", &curwin->w_p_fdi, FALSE) == FAIL || put_setnum(fd, "setlocal", "fdl", &curwin->w_p_fdl) == FAIL || put_setnum(fd, "setlocal", "fml", &curwin->w_p_fml) == FAIL || put_setnum(fd, "setlocal", "fdn", &curwin->w_p_fdn) == FAIL || put_setbool(fd, "setlocal", "fen", curwin->w_p_fen) == FAIL ) return FAIL; return OK; } static int put_setstring(FILE *fd, char *cmd, char *name, char_u **valuep, int expand) { char_u *s; char_u *buf; if (fprintf(fd, "%s %s=", cmd, name) < 0) return FAIL; if (*valuep != NULL) { /* Output 'pastetoggle' as key names. For other * options some characters have to be escaped with * CTRL-V or backslash */ if (valuep == &p_pt) { s = *valuep; while (*s != NUL) if (put_escstr(fd, str2special(&s, FALSE), 2) == FAIL) return FAIL; } else if (expand) { buf = xmalloc(MAXPATHL); home_replace(NULL, *valuep, buf, MAXPATHL, FALSE); if (put_escstr(fd, buf, 2) == FAIL) { xfree(buf); return FAIL; } xfree(buf); } else if (put_escstr(fd, *valuep, 2) == FAIL) return FAIL; } if (put_eol(fd) < 0) return FAIL; return OK; } static int put_setnum(FILE *fd, char *cmd, char *name, long *valuep) { long wc; if (fprintf(fd, "%s %s=", cmd, name) < 0) return FAIL; if (wc_use_keyname((char_u *)valuep, &wc)) { /* print 'wildchar' and 'wildcharm' as a key name */ if (fputs((char *)get_special_key_name((int)wc, 0), fd) < 0) return FAIL; } else if (fprintf(fd, "%" PRId64, (int64_t)*valuep) < 0) return FAIL; if (put_eol(fd) < 0) return FAIL; return OK; } static int put_setbool(FILE *fd, char *cmd, char *name, int value) { if (value < 0) /* global/local option using global value */ return OK; if (fprintf(fd, "%s %s%s", cmd, value ? "" : "no", name) < 0 || put_eol(fd) < 0) return FAIL; return OK; } /* * Compute columns for ruler and shown command. 'sc_col' is also used to * decide what the maximum length of a message on the status line can be. * If there is a status line for the last window, 'sc_col' is independent * of 'ru_col'. */ #define COL_RULER 17 /* columns needed by standard ruler */ void comp_col(void) { int last_has_status = (p_ls == 2 || (p_ls == 1 && firstwin != lastwin)); sc_col = 0; ru_col = 0; if (p_ru) { ru_col = (ru_wid ? ru_wid : COL_RULER) + 1; /* no last status line, adjust sc_col */ if (!last_has_status) sc_col = ru_col; } if (p_sc) { sc_col += SHOWCMD_COLS; if (!p_ru || last_has_status) /* no need for separating space */ ++sc_col; } assert(sc_col >= 0 && INT_MIN + sc_col <= Columns && Columns - sc_col <= INT_MAX); sc_col = (int)(Columns - sc_col); assert(ru_col >= 0 && INT_MIN + ru_col <= Columns && Columns - ru_col <= INT_MAX); ru_col = (int)(Columns - ru_col); if (sc_col <= 0) /* screen too narrow, will become a mess */ sc_col = 1; if (ru_col <= 0) ru_col = 1; } // Unset local option value, similar to ":set opt<". void unset_global_local_option(char *name, void *from) { vimoption_T *p; buf_T *buf = (buf_T *)from; int opt_idx = findoption((uint8_t *)name); if (opt_idx < 0) { EMSG2(_("E355: Unknown option: %s"), name); return; } p = &(options[opt_idx]); switch ((int)p->indir) { // global option with local value: use local value if it's been set case PV_EP: clear_string_option(&buf->b_p_ep); break; case PV_KP: clear_string_option(&buf->b_p_kp); break; case PV_PATH: clear_string_option(&buf->b_p_path); break; case PV_AR: buf->b_p_ar = -1; break; case PV_BKC: clear_string_option(&buf->b_p_bkc); buf->b_bkc_flags = 0; break; case PV_TAGS: clear_string_option(&buf->b_p_tags); break; case PV_TC: clear_string_option(&buf->b_p_tc); buf->b_tc_flags = 0; break; case PV_DEF: clear_string_option(&buf->b_p_def); break; case PV_INC: clear_string_option(&buf->b_p_inc); break; case PV_DICT: clear_string_option(&buf->b_p_dict); break; case PV_TSR: clear_string_option(&buf->b_p_tsr); break; case PV_EFM: clear_string_option(&buf->b_p_efm); break; case PV_GP: clear_string_option(&buf->b_p_gp); break; case PV_MP: clear_string_option(&buf->b_p_mp); break; case PV_STL: clear_string_option(&((win_T *)from)->w_p_stl); break; case PV_UL: buf->b_p_ul = NO_LOCAL_UNDOLEVEL; break; case PV_LW: clear_string_option(&buf->b_p_lw); break; } } /* * Get pointer to option variable, depending on local or global scope. */ static char_u *get_varp_scope(vimoption_T *p, int opt_flags) { if ((opt_flags & OPT_GLOBAL) && p->indir != PV_NONE) { if (p->var == VAR_WIN) return (char_u *)GLOBAL_WO(get_varp(p)); return p->var; } if ((opt_flags & OPT_LOCAL) && ((int)p->indir & PV_BOTH)) { switch ((int)p->indir) { case PV_EFM: return (char_u *)&(curbuf->b_p_efm); case PV_GP: return (char_u *)&(curbuf->b_p_gp); case PV_MP: return (char_u *)&(curbuf->b_p_mp); case PV_EP: return (char_u *)&(curbuf->b_p_ep); case PV_KP: return (char_u *)&(curbuf->b_p_kp); case PV_PATH: return (char_u *)&(curbuf->b_p_path); case PV_AR: return (char_u *)&(curbuf->b_p_ar); case PV_TAGS: return (char_u *)&(curbuf->b_p_tags); case PV_TC: return (char_u *)&(curbuf->b_p_tc); case PV_DEF: return (char_u *)&(curbuf->b_p_def); case PV_INC: return (char_u *)&(curbuf->b_p_inc); case PV_DICT: return (char_u *)&(curbuf->b_p_dict); case PV_TSR: return (char_u *)&(curbuf->b_p_tsr); case PV_STL: return (char_u *)&(curwin->w_p_stl); case PV_UL: return (char_u *)&(curbuf->b_p_ul); case PV_LW: return (char_u *)&(curbuf->b_p_lw); case PV_BKC: return (char_u *)&(curbuf->b_p_bkc); } return NULL; /* "cannot happen" */ } return get_varp(p); } /* * Get pointer to option variable. */ static char_u *get_varp(vimoption_T *p) { /* hidden option, always return NULL */ if (p->var == NULL) return NULL; switch ((int)p->indir) { case PV_NONE: return p->var; /* global option with local value: use local value if it's been set */ case PV_EP: return *curbuf->b_p_ep != NUL ? (char_u *)&curbuf->b_p_ep : p->var; case PV_KP: return *curbuf->b_p_kp != NUL ? (char_u *)&curbuf->b_p_kp : p->var; case PV_PATH: return *curbuf->b_p_path != NUL ? (char_u *)&(curbuf->b_p_path) : p->var; case PV_AR: return curbuf->b_p_ar >= 0 ? (char_u *)&(curbuf->b_p_ar) : p->var; case PV_TAGS: return *curbuf->b_p_tags != NUL ? (char_u *)&(curbuf->b_p_tags) : p->var; case PV_TC: return *curbuf->b_p_tc != NUL ? (char_u *)&(curbuf->b_p_tc) : p->var; case PV_BKC: return *curbuf->b_p_bkc != NUL ? (char_u *)&(curbuf->b_p_bkc) : p->var; case PV_DEF: return *curbuf->b_p_def != NUL ? (char_u *)&(curbuf->b_p_def) : p->var; case PV_INC: return *curbuf->b_p_inc != NUL ? (char_u *)&(curbuf->b_p_inc) : p->var; case PV_DICT: return *curbuf->b_p_dict != NUL ? (char_u *)&(curbuf->b_p_dict) : p->var; case PV_TSR: return *curbuf->b_p_tsr != NUL ? (char_u *)&(curbuf->b_p_tsr) : p->var; case PV_EFM: return *curbuf->b_p_efm != NUL ? (char_u *)&(curbuf->b_p_efm) : p->var; case PV_GP: return *curbuf->b_p_gp != NUL ? (char_u *)&(curbuf->b_p_gp) : p->var; case PV_MP: return *curbuf->b_p_mp != NUL ? (char_u *)&(curbuf->b_p_mp) : p->var; case PV_STL: return *curwin->w_p_stl != NUL ? (char_u *)&(curwin->w_p_stl) : p->var; case PV_UL: return curbuf->b_p_ul != NO_LOCAL_UNDOLEVEL ? (char_u *)&(curbuf->b_p_ul) : p->var; case PV_LW: return *curbuf->b_p_lw != NUL ? (char_u *)&(curbuf->b_p_lw) : p->var; case PV_ARAB: return (char_u *)&(curwin->w_p_arab); case PV_LIST: return (char_u *)&(curwin->w_p_list); case PV_SPELL: return (char_u *)&(curwin->w_p_spell); case PV_CUC: return (char_u *)&(curwin->w_p_cuc); case PV_CUL: return (char_u *)&(curwin->w_p_cul); case PV_CC: return (char_u *)&(curwin->w_p_cc); case PV_DIFF: return (char_u *)&(curwin->w_p_diff); case PV_FDC: return (char_u *)&(curwin->w_p_fdc); case PV_FEN: return (char_u *)&(curwin->w_p_fen); case PV_FDI: return (char_u *)&(curwin->w_p_fdi); case PV_FDL: return (char_u *)&(curwin->w_p_fdl); case PV_FDM: return (char_u *)&(curwin->w_p_fdm); case PV_FML: return (char_u *)&(curwin->w_p_fml); case PV_FDN: return (char_u *)&(curwin->w_p_fdn); case PV_FDE: return (char_u *)&(curwin->w_p_fde); case PV_FDT: return (char_u *)&(curwin->w_p_fdt); case PV_FMR: return (char_u *)&(curwin->w_p_fmr); case PV_NU: return (char_u *)&(curwin->w_p_nu); case PV_RNU: return (char_u *)&(curwin->w_p_rnu); case PV_NUW: return (char_u *)&(curwin->w_p_nuw); case PV_WFH: return (char_u *)&(curwin->w_p_wfh); case PV_WFW: return (char_u *)&(curwin->w_p_wfw); case PV_PVW: return (char_u *)&(curwin->w_p_pvw); case PV_RL: return (char_u *)&(curwin->w_p_rl); case PV_RLC: return (char_u *)&(curwin->w_p_rlc); case PV_SCROLL: return (char_u *)&(curwin->w_p_scr); case PV_WRAP: return (char_u *)&(curwin->w_p_wrap); case PV_LBR: return (char_u *)&(curwin->w_p_lbr); case PV_BRI: return (char_u *)&(curwin->w_p_bri); case PV_BRIOPT: return (char_u *)&(curwin->w_p_briopt); case PV_SCBIND: return (char_u *)&(curwin->w_p_scb); case PV_CRBIND: return (char_u *)&(curwin->w_p_crb); case PV_COCU: return (char_u *)&(curwin->w_p_cocu); case PV_COLE: return (char_u *)&(curwin->w_p_cole); case PV_AI: return (char_u *)&(curbuf->b_p_ai); case PV_BIN: return (char_u *)&(curbuf->b_p_bin); case PV_BOMB: return (char_u *)&(curbuf->b_p_bomb); case PV_BH: return (char_u *)&(curbuf->b_p_bh); case PV_BT: return (char_u *)&(curbuf->b_p_bt); case PV_BL: return (char_u *)&(curbuf->b_p_bl); case PV_CI: return (char_u *)&(curbuf->b_p_ci); case PV_CIN: return (char_u *)&(curbuf->b_p_cin); case PV_CINK: return (char_u *)&(curbuf->b_p_cink); case PV_CINO: return (char_u *)&(curbuf->b_p_cino); case PV_CINW: return (char_u *)&(curbuf->b_p_cinw); case PV_COM: return (char_u *)&(curbuf->b_p_com); case PV_CMS: return (char_u *)&(curbuf->b_p_cms); case PV_CPT: return (char_u *)&(curbuf->b_p_cpt); case PV_CFU: return (char_u *)&(curbuf->b_p_cfu); case PV_OFU: return (char_u *)&(curbuf->b_p_ofu); case PV_EOL: return (char_u *)&(curbuf->b_p_eol); case PV_FIXEOL: return (char_u *)&(curbuf->b_p_fixeol); case PV_ET: return (char_u *)&(curbuf->b_p_et); case PV_FENC: return (char_u *)&(curbuf->b_p_fenc); case PV_FF: return (char_u *)&(curbuf->b_p_ff); case PV_FT: return (char_u *)&(curbuf->b_p_ft); case PV_FO: return (char_u *)&(curbuf->b_p_fo); case PV_FLP: return (char_u *)&(curbuf->b_p_flp); case PV_IMI: return (char_u *)&(curbuf->b_p_iminsert); case PV_IMS: return (char_u *)&(curbuf->b_p_imsearch); case PV_INF: return (char_u *)&(curbuf->b_p_inf); case PV_ISK: return (char_u *)&(curbuf->b_p_isk); case PV_INEX: return (char_u *)&(curbuf->b_p_inex); case PV_INDE: return (char_u *)&(curbuf->b_p_inde); case PV_INDK: return (char_u *)&(curbuf->b_p_indk); case PV_FEX: return (char_u *)&(curbuf->b_p_fex); case PV_LISP: return (char_u *)&(curbuf->b_p_lisp); case PV_ML: return (char_u *)&(curbuf->b_p_ml); case PV_MPS: return (char_u *)&(curbuf->b_p_mps); case PV_MA: return (char_u *)&(curbuf->b_p_ma); case PV_MOD: return (char_u *)&(curbuf->b_changed); case PV_NF: return (char_u *)&(curbuf->b_p_nf); case PV_PI: return (char_u *)&(curbuf->b_p_pi); case PV_QE: return (char_u *)&(curbuf->b_p_qe); case PV_RO: return (char_u *)&(curbuf->b_p_ro); case PV_SI: return (char_u *)&(curbuf->b_p_si); case PV_STS: return (char_u *)&(curbuf->b_p_sts); case PV_SUA: return (char_u *)&(curbuf->b_p_sua); case PV_SWF: return (char_u *)&(curbuf->b_p_swf); case PV_SMC: return (char_u *)&(curbuf->b_p_smc); case PV_SYN: return (char_u *)&(curbuf->b_p_syn); case PV_SPC: return (char_u *)&(curwin->w_s->b_p_spc); case PV_SPF: return (char_u *)&(curwin->w_s->b_p_spf); case PV_SPL: return (char_u *)&(curwin->w_s->b_p_spl); case PV_SW: return (char_u *)&(curbuf->b_p_sw); case PV_TS: return (char_u *)&(curbuf->b_p_ts); case PV_TW: return (char_u *)&(curbuf->b_p_tw); case PV_UDF: return (char_u *)&(curbuf->b_p_udf); case PV_WM: return (char_u *)&(curbuf->b_p_wm); case PV_KMAP: return (char_u *)&(curbuf->b_p_keymap); default: EMSG(_("E356: get_varp ERROR")); } /* always return a valid pointer to avoid a crash! */ return (char_u *)&(curbuf->b_p_wm); } /* * Get the value of 'equalprg', either the buffer-local one or the global one. */ char_u *get_equalprg(void) { if (*curbuf->b_p_ep == NUL) return p_ep; return curbuf->b_p_ep; } /* * Copy options from one window to another. * Used when splitting a window. */ void win_copy_options(win_T *wp_from, win_T *wp_to) { copy_winopt(&wp_from->w_onebuf_opt, &wp_to->w_onebuf_opt); copy_winopt(&wp_from->w_allbuf_opt, &wp_to->w_allbuf_opt); /* Is this right? */ wp_to->w_farsi = wp_from->w_farsi; briopt_check(wp_to); } /* * Copy the options from one winopt_T to another. * Doesn't free the old option values in "to", use clear_winopt() for that. * The 'scroll' option is not copied, because it depends on the window height. * The 'previewwindow' option is reset, there can be only one preview window. */ void copy_winopt(winopt_T *from, winopt_T *to) { to->wo_arab = from->wo_arab; to->wo_list = from->wo_list; to->wo_nu = from->wo_nu; to->wo_rnu = from->wo_rnu; to->wo_nuw = from->wo_nuw; to->wo_rl = from->wo_rl; to->wo_rlc = vim_strsave(from->wo_rlc); to->wo_stl = vim_strsave(from->wo_stl); to->wo_wrap = from->wo_wrap; to->wo_wrap_save = from->wo_wrap_save; to->wo_lbr = from->wo_lbr; to->wo_bri = from->wo_bri; to->wo_briopt = vim_strsave(from->wo_briopt); to->wo_scb = from->wo_scb; to->wo_scb_save = from->wo_scb_save; to->wo_crb = from->wo_crb; to->wo_crb_save = from->wo_crb_save; to->wo_spell = from->wo_spell; to->wo_cuc = from->wo_cuc; to->wo_cul = from->wo_cul; to->wo_cc = vim_strsave(from->wo_cc); to->wo_diff = from->wo_diff; to->wo_diff_saved = from->wo_diff_saved; to->wo_cocu = vim_strsave(from->wo_cocu); to->wo_cole = from->wo_cole; to->wo_fdc = from->wo_fdc; to->wo_fdc_save = from->wo_fdc_save; to->wo_fen = from->wo_fen; to->wo_fen_save = from->wo_fen_save; to->wo_fdi = vim_strsave(from->wo_fdi); to->wo_fml = from->wo_fml; to->wo_fdl = from->wo_fdl; to->wo_fdl_save = from->wo_fdl_save; to->wo_fdm = vim_strsave(from->wo_fdm); to->wo_fdm_save = from->wo_diff_saved ? vim_strsave(from->wo_fdm_save) : empty_option; to->wo_fdn = from->wo_fdn; to->wo_fde = vim_strsave(from->wo_fde); to->wo_fdt = vim_strsave(from->wo_fdt); to->wo_fmr = vim_strsave(from->wo_fmr); check_winopt(to); /* don't want NULL pointers */ } /* * Check string options in a window for a NULL value. */ void check_win_options(win_T *win) { check_winopt(&win->w_onebuf_opt); check_winopt(&win->w_allbuf_opt); } /* * Check for NULL pointers in a winopt_T and replace them with empty_option. */ static void check_winopt(winopt_T *wop) { check_string_option(&wop->wo_fdi); check_string_option(&wop->wo_fdm); check_string_option(&wop->wo_fdm_save); check_string_option(&wop->wo_fde); check_string_option(&wop->wo_fdt); check_string_option(&wop->wo_fmr); check_string_option(&wop->wo_rlc); check_string_option(&wop->wo_stl); check_string_option(&wop->wo_cc); check_string_option(&wop->wo_cocu); check_string_option(&wop->wo_briopt); } /* * Free the allocated memory inside a winopt_T. */ void clear_winopt(winopt_T *wop) { clear_string_option(&wop->wo_fdi); clear_string_option(&wop->wo_fdm); clear_string_option(&wop->wo_fdm_save); clear_string_option(&wop->wo_fde); clear_string_option(&wop->wo_fdt); clear_string_option(&wop->wo_fmr); clear_string_option(&wop->wo_rlc); clear_string_option(&wop->wo_stl); clear_string_option(&wop->wo_cc); clear_string_option(&wop->wo_cocu); clear_string_option(&wop->wo_briopt); } /* * Copy global option values to local options for one buffer. * Used when creating a new buffer and sometimes when entering a buffer. * flags: * BCO_ENTER We will enter the buf buffer. * BCO_ALWAYS Always copy the options, but only set b_p_initialized when * appropriate. * BCO_NOHELP Don't copy the values to a help buffer. */ void buf_copy_options(buf_T *buf, int flags) { int should_copy = TRUE; char_u *save_p_isk = NULL; /* init for GCC */ int dont_do_help; int did_isk = FALSE; /* * Don't do anything if the buffer is invalid. */ if (buf == NULL || !buf_valid(buf)) return; /* * Skip this when the option defaults have not been set yet. Happens when * main() allocates the first buffer. */ if (p_cpo != NULL) { /* * Always copy when entering and 'cpo' contains 'S'. * Don't copy when already initialized. * Don't copy when 'cpo' contains 's' and not entering. * 'S' BCO_ENTER initialized 's' should_copy * yes yes X X TRUE * yes no yes X FALSE * no X yes X FALSE * X no no yes FALSE * X no no no TRUE * no yes no X TRUE */ if ((vim_strchr(p_cpo, CPO_BUFOPTGLOB) == NULL || !(flags & BCO_ENTER)) && (buf->b_p_initialized || (!(flags & BCO_ENTER) && vim_strchr(p_cpo, CPO_BUFOPT) != NULL))) should_copy = FALSE; if (should_copy || (flags & BCO_ALWAYS)) { /* Don't copy the options specific to a help buffer when * BCO_NOHELP is given or the options were initialized already * (jumping back to a help file with CTRL-T or CTRL-O) */ dont_do_help = ((flags & BCO_NOHELP) && buf->b_help) || buf->b_p_initialized; if (dont_do_help) { /* don't free b_p_isk */ save_p_isk = buf->b_p_isk; buf->b_p_isk = NULL; } /* * Always free the allocated strings. * If not already initialized, set 'readonly' and copy 'fileformat'. */ if (!buf->b_p_initialized) { free_buf_options(buf, TRUE); buf->b_p_ro = FALSE; /* don't copy readonly */ buf->b_p_fenc = vim_strsave(p_fenc); buf->b_p_ff = vim_strsave(p_ff); buf->b_p_bh = empty_option; buf->b_p_bt = empty_option; } else free_buf_options(buf, FALSE); buf->b_p_ai = p_ai; buf->b_p_ai_nopaste = p_ai_nopaste; buf->b_p_sw = p_sw; buf->b_p_tw = p_tw; buf->b_p_tw_nopaste = p_tw_nopaste; buf->b_p_tw_nobin = p_tw_nobin; buf->b_p_wm = p_wm; buf->b_p_wm_nopaste = p_wm_nopaste; buf->b_p_wm_nobin = p_wm_nobin; buf->b_p_bin = p_bin; buf->b_p_bomb = p_bomb; buf->b_p_et = p_et; buf->b_p_fixeol = p_fixeol; buf->b_p_et_nobin = p_et_nobin; buf->b_p_et_nopaste = p_et_nopaste; buf->b_p_ml = p_ml; buf->b_p_ml_nobin = p_ml_nobin; buf->b_p_inf = p_inf; buf->b_p_swf = p_swf; buf->b_p_cpt = vim_strsave(p_cpt); buf->b_p_cfu = vim_strsave(p_cfu); buf->b_p_ofu = vim_strsave(p_ofu); buf->b_p_sts = p_sts; buf->b_p_sts_nopaste = p_sts_nopaste; buf->b_p_com = vim_strsave(p_com); buf->b_p_cms = vim_strsave(p_cms); buf->b_p_fo = vim_strsave(p_fo); buf->b_p_flp = vim_strsave(p_flp); buf->b_p_nf = vim_strsave(p_nf); buf->b_p_mps = vim_strsave(p_mps); buf->b_p_si = p_si; buf->b_p_ci = p_ci; buf->b_p_cin = p_cin; buf->b_p_cink = vim_strsave(p_cink); buf->b_p_cino = vim_strsave(p_cino); /* Don't copy 'filetype', it must be detected */ buf->b_p_ft = empty_option; buf->b_p_pi = p_pi; buf->b_p_cinw = vim_strsave(p_cinw); buf->b_p_lisp = p_lisp; /* Don't copy 'syntax', it must be set */ buf->b_p_syn = empty_option; buf->b_p_smc = p_smc; buf->b_s.b_syn_isk = empty_option; buf->b_s.b_p_spc = vim_strsave(p_spc); (void)compile_cap_prog(&buf->b_s); buf->b_s.b_p_spf = vim_strsave(p_spf); buf->b_s.b_p_spl = vim_strsave(p_spl); buf->b_p_inde = vim_strsave(p_inde); buf->b_p_indk = vim_strsave(p_indk); buf->b_p_fex = vim_strsave(p_fex); buf->b_p_sua = vim_strsave(p_sua); buf->b_p_keymap = vim_strsave(p_keymap); buf->b_kmap_state |= KEYMAP_INIT; /* This isn't really an option, but copying the langmap and IME * state from the current buffer is better than resetting it. */ buf->b_p_iminsert = p_iminsert; buf->b_p_imsearch = p_imsearch; /* options that are normally global but also have a local value * are not copied, start using the global value */ buf->b_p_ar = -1; buf->b_p_ul = NO_LOCAL_UNDOLEVEL; buf->b_p_bkc = empty_option; buf->b_bkc_flags = 0; buf->b_p_gp = empty_option; buf->b_p_mp = empty_option; buf->b_p_efm = empty_option; buf->b_p_ep = empty_option; buf->b_p_kp = empty_option; buf->b_p_path = empty_option; buf->b_p_tags = empty_option; buf->b_p_tc = empty_option; buf->b_tc_flags = 0; buf->b_p_def = empty_option; buf->b_p_inc = empty_option; buf->b_p_inex = vim_strsave(p_inex); buf->b_p_dict = empty_option; buf->b_p_tsr = empty_option; buf->b_p_qe = vim_strsave(p_qe); buf->b_p_udf = p_udf; buf->b_p_lw = empty_option; /* * Don't copy the options set by ex_help(), use the saved values, * when going from a help buffer to a non-help buffer. * Don't touch these at all when BCO_NOHELP is used and going from * or to a help buffer. */ if (dont_do_help) buf->b_p_isk = save_p_isk; else { buf->b_p_isk = vim_strsave(p_isk); did_isk = true; buf->b_p_ts = p_ts; buf->b_help = false; if (buf->b_p_bt[0] == 'h') clear_string_option(&buf->b_p_bt); buf->b_p_ma = p_ma; } } /* * When the options should be copied (ignoring BCO_ALWAYS), set the * flag that indicates that the options have been initialized. */ if (should_copy) buf->b_p_initialized = true; } check_buf_options(buf); /* make sure we don't have NULLs */ if (did_isk) (void)buf_init_chartab(buf, FALSE); } /* * Reset the 'modifiable' option and its default value. */ void reset_modifiable(void) { int opt_idx; curbuf->b_p_ma = FALSE; p_ma = FALSE; opt_idx = findoption((char_u *)"ma"); if (opt_idx >= 0) options[opt_idx].def_val[VI_DEFAULT] = FALSE; } /* * Set the global value for 'iminsert' to the local value. */ void set_iminsert_global(void) { p_iminsert = curbuf->b_p_iminsert; } /* * Set the global value for 'imsearch' to the local value. */ void set_imsearch_global(void) { p_imsearch = curbuf->b_p_imsearch; } static int expand_option_idx = -1; static char_u expand_option_name[5] = {'t', '_', NUL, NUL, NUL}; static int expand_option_flags = 0; void set_context_in_set_cmd ( expand_T *xp, char_u *arg, int opt_flags /* OPT_GLOBAL and/or OPT_LOCAL */ ) { char_u nextchar; uint32_t flags = 0; /* init for GCC */ int opt_idx = 0; /* init for GCC */ char_u *p; char_u *s; int is_term_option = FALSE; int key; expand_option_flags = opt_flags; xp->xp_context = EXPAND_SETTINGS; if (*arg == NUL) { xp->xp_pattern = arg; return; } p = arg + STRLEN(arg) - 1; if (*p == ' ' && *(p - 1) != '\\') { xp->xp_pattern = p + 1; return; } while (p > arg) { s = p; /* count number of backslashes before ' ' or ',' */ if (*p == ' ' || *p == ',') { while (s > arg && *(s - 1) == '\\') --s; } /* break at a space with an even number of backslashes */ if (*p == ' ' && ((p - s) & 1) == 0) { ++p; break; } --p; } if (STRNCMP(p, "no", 2) == 0) { xp->xp_context = EXPAND_BOOL_SETTINGS; p += 2; } if (STRNCMP(p, "inv", 3) == 0) { xp->xp_context = EXPAND_BOOL_SETTINGS; p += 3; } xp->xp_pattern = arg = p; if (*arg == '<') { while (*p != '>') if (*p++ == NUL) /* expand terminal option name */ return; key = get_special_key_code(arg + 1); if (key == 0) { /* unknown name */ xp->xp_context = EXPAND_NOTHING; return; } nextchar = *++p; is_term_option = TRUE; expand_option_name[2] = (char_u)KEY2TERMCAP0(key); expand_option_name[3] = KEY2TERMCAP1(key); } else { if (p[0] == 't' && p[1] == '_') { p += 2; if (*p != NUL) ++p; if (*p == NUL) return; /* expand option name */ nextchar = *++p; is_term_option = TRUE; expand_option_name[2] = p[-2]; expand_option_name[3] = p[-1]; } else { /* Allow * wildcard */ while (ASCII_ISALNUM(*p) || *p == '_' || *p == '*') p++; if (*p == NUL) return; nextchar = *p; *p = NUL; opt_idx = findoption(arg); *p = nextchar; if (opt_idx == -1 || options[opt_idx].var == NULL) { xp->xp_context = EXPAND_NOTHING; return; } flags = options[opt_idx].flags; if (flags & P_BOOL) { xp->xp_context = EXPAND_NOTHING; return; } } } /* handle "-=" and "+=" */ if ((nextchar == '-' || nextchar == '+' || nextchar == '^') && p[1] == '=') { ++p; nextchar = '='; } if ((nextchar != '=' && nextchar != ':') || xp->xp_context == EXPAND_BOOL_SETTINGS) { xp->xp_context = EXPAND_UNSUCCESSFUL; return; } if (xp->xp_context != EXPAND_BOOL_SETTINGS && p[1] == NUL) { xp->xp_context = EXPAND_OLD_SETTING; if (is_term_option) expand_option_idx = -1; else expand_option_idx = opt_idx; xp->xp_pattern = p + 1; return; } xp->xp_context = EXPAND_NOTHING; if (is_term_option || (flags & P_NUM)) return; xp->xp_pattern = p + 1; if (flags & P_EXPAND) { p = options[opt_idx].var; if (p == (char_u *)&p_bdir || p == (char_u *)&p_dir || p == (char_u *)&p_path || p == (char_u *)&p_pp || p == (char_u *)&p_rtp || p == (char_u *)&p_cdpath || p == (char_u *)&p_vdir ) { xp->xp_context = EXPAND_DIRECTORIES; if (p == (char_u *)&p_path || p == (char_u *)&p_cdpath ) xp->xp_backslash = XP_BS_THREE; else xp->xp_backslash = XP_BS_ONE; } else { xp->xp_context = EXPAND_FILES; /* for 'tags' need three backslashes for a space */ if (p == (char_u *)&p_tags) xp->xp_backslash = XP_BS_THREE; else xp->xp_backslash = XP_BS_ONE; } } /* For an option that is a list of file names, find the start of the * last file name. */ for (p = arg + STRLEN(arg) - 1; p > xp->xp_pattern; --p) { /* count number of backslashes before ' ' or ',' */ if (*p == ' ' || *p == ',') { s = p; while (s > xp->xp_pattern && *(s - 1) == '\\') --s; if ((*p == ' ' && (xp->xp_backslash == XP_BS_THREE && (p - s) < 3)) || (*p == ',' && (flags & P_COMMA) && ((p - s) & 1) == 0)) { xp->xp_pattern = p + 1; break; } } /* for 'spellsuggest' start at "file:" */ if (options[opt_idx].var == (char_u *)&p_sps && STRNCMP(p, "file:", 5) == 0) { xp->xp_pattern = p + 5; break; } } return; } int ExpandSettings(expand_T *xp, regmatch_T *regmatch, int *num_file, char_u ***file) { int num_normal = 0; // Nr of matching non-term-code settings int match; int count = 0; char_u *str; int loop; static char *(names[]) = {"all", "termcap"}; int ic = regmatch->rm_ic; /* remember the ignore-case flag */ /* do this loop twice: * loop == 0: count the number of matching options * loop == 1: copy the matching options into allocated memory */ for (loop = 0; loop <= 1; ++loop) { regmatch->rm_ic = ic; if (xp->xp_context != EXPAND_BOOL_SETTINGS) { for (match = 0; match < (int)ARRAY_SIZE(names); ++match) if (vim_regexec(regmatch, (char_u *)names[match], (colnr_T)0)) { if (loop == 0) num_normal++; else (*file)[count++] = vim_strsave((char_u *)names[match]); } } for (size_t opt_idx = 0; (str = (char_u *)options[opt_idx].fullname) != NULL; opt_idx++) { if (options[opt_idx].var == NULL) continue; if (xp->xp_context == EXPAND_BOOL_SETTINGS && !(options[opt_idx].flags & P_BOOL)) continue; match = FALSE; if (vim_regexec(regmatch, str, (colnr_T)0) || (options[opt_idx].shortname != NULL && vim_regexec(regmatch, (char_u *)options[opt_idx].shortname, (colnr_T)0))){ match = TRUE; } if (match) { if (loop == 0) { num_normal++; } else (*file)[count++] = vim_strsave(str); } } if (loop == 0) { if (num_normal > 0) { *num_file = num_normal; } else { return OK; } *file = (char_u **)xmalloc((size_t)(*num_file) * sizeof(char_u *)); } } return OK; } void ExpandOldSetting(int *num_file, char_u ***file) { char_u *var = NULL; *num_file = 0; *file = (char_u **)xmalloc(sizeof(char_u *)); /* * For a terminal key code expand_option_idx is < 0. */ if (expand_option_idx < 0) { expand_option_idx = findoption(expand_option_name); } if (expand_option_idx >= 0) { /* put string of option value in NameBuff */ option_value2string(&options[expand_option_idx], expand_option_flags); var = NameBuff; } else if (var == NULL) var = (char_u *)""; /* A backslash is required before some characters. This is the reverse of * what happens in do_set(). */ char_u *buf = vim_strsave_escaped(var, escape_chars); #ifdef BACKSLASH_IN_FILENAME /* For MS-Windows et al. we don't double backslashes at the start and * before a file name character. */ for (var = buf; *var != NUL; mb_ptr_adv(var)) if (var[0] == '\\' && var[1] == '\\' && expand_option_idx >= 0 && (options[expand_option_idx].flags & P_EXPAND) && vim_isfilec(var[2]) && (var[2] != '\\' || (var == buf && var[4] != '\\'))) STRMOVE(var, var + 1); #endif *file[0] = buf; *num_file = 1; } /* * Get the value for the numeric or string option *opp in a nice format into * NameBuff[]. Must not be called with a hidden option! */ static void option_value2string ( vimoption_T *opp, int opt_flags /* OPT_GLOBAL and/or OPT_LOCAL */ ) { char_u *varp; varp = get_varp_scope(opp, opt_flags); if (opp->flags & P_NUM) { long wc = 0; if (wc_use_keyname(varp, &wc)) { STRLCPY(NameBuff, get_special_key_name((int)wc, 0), sizeof(NameBuff)); } else if (wc != 0) { STRLCPY(NameBuff, transchar((int)wc), sizeof(NameBuff)); } else { snprintf((char *)NameBuff, sizeof(NameBuff), "%" PRId64, (int64_t)*(long *)varp); } } else { // P_STRING varp = *(char_u **)(varp); if (varp == NULL) /* just in case */ NameBuff[0] = NUL; else if (opp->flags & P_EXPAND) home_replace(NULL, varp, NameBuff, MAXPATHL, FALSE); /* Translate 'pastetoggle' into special key names */ else if ((char_u **)opp->var == &p_pt) str2specialbuf(p_pt, NameBuff, MAXPATHL); else STRLCPY(NameBuff, varp, MAXPATHL); } } /* * Return TRUE if "varp" points to 'wildchar' or 'wildcharm' and it can be * printed as a keyname. * "*wcp" is set to the value of the option if it's 'wildchar' or 'wildcharm'. */ static int wc_use_keyname(char_u *varp, long *wcp) { if (((long *)varp == &p_wc) || ((long *)varp == &p_wcm)) { *wcp = *(long *)varp; if (IS_SPECIAL(*wcp) || find_special_key_in_table((int)*wcp) >= 0) return TRUE; } return FALSE; } /* * Any character has an equivalent 'langmap' character. This is used for * keyboards that have a special language mode that sends characters above * 128 (although other characters can be translated too). The "to" field is a * Vim command character. This avoids having to switch the keyboard back to * ASCII mode when leaving Insert mode. * * langmap_mapchar[] maps any of 256 chars to an ASCII char used for Vim * commands. * langmap_mapga.ga_data is a sorted table of langmap_entry_T. * This does the same as langmap_mapchar[] for characters >= 256. */ /* * With multi-byte support use growarray for 'langmap' chars >= 256 */ typedef struct { int from; int to; } langmap_entry_T; static garray_T langmap_mapga = GA_EMPTY_INIT_VALUE; /* * Search for an entry in "langmap_mapga" for "from". If found set the "to" * field. If not found insert a new entry at the appropriate location. */ static void langmap_set_entry(int from, int to) { langmap_entry_T *entries = (langmap_entry_T *)(langmap_mapga.ga_data); unsigned int a = 0; assert(langmap_mapga.ga_len >= 0); unsigned int b = (unsigned int)langmap_mapga.ga_len; /* Do a binary search for an existing entry. */ while (a != b) { unsigned int i = (a + b) / 2; int d = entries[i].from - from; if (d == 0) { entries[i].to = to; return; } if (d < 0) a = i + 1; else b = i; } ga_grow(&langmap_mapga, 1); /* insert new entry at position "a" */ entries = (langmap_entry_T *)(langmap_mapga.ga_data) + a; memmove(entries + 1, entries, ((unsigned int)langmap_mapga.ga_len - a) * sizeof(langmap_entry_T)); ++langmap_mapga.ga_len; entries[0].from = from; entries[0].to = to; } /* * Apply 'langmap' to multi-byte character "c" and return the result. */ int langmap_adjust_mb(int c) { langmap_entry_T *entries = (langmap_entry_T *)(langmap_mapga.ga_data); int a = 0; int b = langmap_mapga.ga_len; while (a != b) { int i = (a + b) / 2; int d = entries[i].from - c; if (d == 0) return entries[i].to; /* found matching entry */ if (d < 0) a = i + 1; else b = i; } return c; /* no entry found, return "c" unmodified */ } static void langmap_init(void) { for (int i = 0; i < 256; i++) langmap_mapchar[i] = (char_u)i; /* we init with a one-to-one map */ ga_init(&langmap_mapga, sizeof(langmap_entry_T), 8); } /* * Called when langmap option is set; the language map can be * changed at any time! */ static void langmap_set(void) { char_u *p; char_u *p2; int from, to; ga_clear(&langmap_mapga); /* clear the previous map first */ langmap_init(); /* back to one-to-one map */ for (p = p_langmap; p[0] != NUL; ) { for (p2 = p; p2[0] != NUL && p2[0] != ',' && p2[0] != ';'; mb_ptr_adv(p2)) { if (p2[0] == '\\' && p2[1] != NUL) ++p2; } if (p2[0] == ';') ++p2; /* abcd;ABCD form, p2 points to A */ else p2 = NULL; /* aAbBcCdD form, p2 is NULL */ while (p[0]) { if (p[0] == ',') { ++p; break; } if (p[0] == '\\' && p[1] != NUL) ++p; from = (*mb_ptr2char)(p); to = NUL; if (p2 == NULL) { mb_ptr_adv(p); if (p[0] != ',') { if (p[0] == '\\') ++p; to = (*mb_ptr2char)(p); } } else { if (p2[0] != ',') { if (p2[0] == '\\') ++p2; to = (*mb_ptr2char)(p2); } } if (to == NUL) { EMSG2(_("E357: 'langmap': Matching character missing for %s"), transchar(from)); return; } if (from >= 256) langmap_set_entry(from, to); else { assert(to <= UCHAR_MAX); langmap_mapchar[from & 255] = (char_u)to; } /* Advance to next pair */ mb_ptr_adv(p); if (p2 != NULL) { mb_ptr_adv(p2); if (*p == ';') { p = p2; if (p[0] != NUL) { if (p[0] != ',') { EMSG2(_( "E358: 'langmap': Extra characters after semicolon: %s"), p); return; } ++p; } break; } } } } } /* * Return TRUE if format option 'x' is in effect. * Take care of no formatting when 'paste' is set. */ int has_format_option(int x) { if (p_paste) return FALSE; return vim_strchr(curbuf->b_p_fo, x) != NULL; } /// @returns true if "x" is present in 'shortmess' option, or /// 'shortmess' contains 'a' and "x" is present in SHM_ALL_ABBREVIATIONS. bool shortmess(int x) { return (p_shm != NULL && (vim_strchr(p_shm, x) != NULL || (vim_strchr(p_shm, 'a') != NULL && vim_strchr((char_u *)SHM_ALL_ABBREVIATIONS, x) != NULL))); } /* * paste_option_changed() - Called after p_paste was set or reset. */ static void paste_option_changed(void) { static int old_p_paste = FALSE; static int save_sm = 0; static int save_sta = 0; static int save_ru = 0; static int save_ri = 0; static int save_hkmap = 0; if (p_paste) { /* * Paste switched from off to on. * Save the current values, so they can be restored later. */ if (!old_p_paste) { /* save options for each buffer */ FOR_ALL_BUFFERS(buf) { buf->b_p_tw_nopaste = buf->b_p_tw; buf->b_p_wm_nopaste = buf->b_p_wm; buf->b_p_sts_nopaste = buf->b_p_sts; buf->b_p_ai_nopaste = buf->b_p_ai; buf->b_p_et_nopaste = buf->b_p_et; } // save global options save_sm = p_sm; save_sta = p_sta; save_ru = p_ru; save_ri = p_ri; save_hkmap = p_hkmap; // save global values for local buffer options p_ai_nopaste = p_ai; p_et_nopaste = p_et; p_sts_nopaste = p_sts; p_tw_nopaste = p_tw; p_wm_nopaste = p_wm; } // Always set the option values, also when 'paste' is set when it is // already on. // set options for each buffer FOR_ALL_BUFFERS(buf) { buf->b_p_tw = 0; // textwidth is 0 buf->b_p_wm = 0; // wrapmargin is 0 buf->b_p_sts = 0; // softtabstop is 0 buf->b_p_ai = 0; // no auto-indent buf->b_p_et = 0; // no expandtab } // set global options p_sm = 0; // no showmatch p_sta = 0; // no smarttab if (p_ru) { status_redraw_all(); // redraw to remove the ruler } p_ru = 0; // no ruler p_ri = 0; // no reverse insert p_hkmap = 0; // no Hebrew keyboard // set global values for local buffer options p_tw = 0; p_wm = 0; p_sts = 0; p_ai = 0; } /* * Paste switched from on to off: Restore saved values. */ else if (old_p_paste) { /* restore options for each buffer */ FOR_ALL_BUFFERS(buf) { buf->b_p_tw = buf->b_p_tw_nopaste; buf->b_p_wm = buf->b_p_wm_nopaste; buf->b_p_sts = buf->b_p_sts_nopaste; buf->b_p_ai = buf->b_p_ai_nopaste; buf->b_p_et = buf->b_p_et_nopaste; } /* restore global options */ p_sm = save_sm; p_sta = save_sta; if (p_ru != save_ru) { status_redraw_all(); // redraw to draw the ruler } p_ru = save_ru; p_ri = save_ri; p_hkmap = save_hkmap; // set global values for local buffer options p_ai = p_ai_nopaste; p_et = p_et_nopaste; p_sts = p_sts_nopaste; p_tw = p_tw_nopaste; p_wm = p_wm_nopaste; } old_p_paste = p_paste; } /// vimrc_found() - Called when a vimrc or "VIMINIT" has been found. /// /// Set the values for options that didn't get set yet to the Vim defaults. /// When "fname" is not NULL, use it to set $"envname" when it wasn't set yet. void vimrc_found(char_u *fname, char_u *envname) { char_u *p; if (fname != NULL) { p = (char_u *)vim_getenv((char *)envname); if (p == NULL) { /* Set $MYVIMRC to the first vimrc file found. */ p = (char_u *)FullName_save((char *)fname, FALSE); if (p != NULL) { vim_setenv((char *)envname, (char *)p); xfree(p); } } else { xfree(p); } } } /* * Return TRUE when option "name" has been set. * Only works correctly for global options. */ int option_was_set(char_u *name) { int idx; idx = findoption(name); if (idx < 0) /* unknown option */ return FALSE; if (options[idx].flags & P_WAS_SET) return TRUE; return FALSE; } /* * fill_breakat_flags() -- called when 'breakat' changes value. */ static void fill_breakat_flags(void) { char_u *p; int i; for (i = 0; i < 256; i++) breakat_flags[i] = FALSE; if (p_breakat != NULL) for (p = p_breakat; *p; p++) breakat_flags[*p] = TRUE; } /* * Check an option that can be a range of string values. * * Return OK for correct value, FAIL otherwise. * Empty is always OK. */ static int check_opt_strings( char_u *val, char **values, int list /* when TRUE: accept a list of values */ ) { return opt_strings_flags(val, values, NULL, list); } /* * Handle an option that can be a range of string values. * Set a flag in "*flagp" for each string present. * * Return OK for correct value, FAIL otherwise. * Empty is always OK. */ static int opt_strings_flags( char_u *val, /* new value */ char **values, /* array of valid string values */ unsigned *flagp, bool list /* when TRUE: accept a list of values */ ) { unsigned int new_flags = 0; while (*val) { for (unsigned int i = 0;; ++i) { if (values[i] == NULL) /* val not found in values[] */ return FAIL; size_t len = STRLEN(values[i]); if (STRNCMP(values[i], val, len) == 0 && ((list && val[len] == ',') || val[len] == NUL)) { val += len + (val[len] == ','); assert(i < sizeof(1U) * 8); new_flags |= (1U << i); break; /* check next item in val list */ } } } if (flagp != NULL) *flagp = new_flags; return OK; } /* * Read the 'wildmode' option, fill wim_flags[]. */ static int check_opt_wim(void) { char_u new_wim_flags[4]; char_u *p; int i; int idx = 0; for (i = 0; i < 4; ++i) new_wim_flags[i] = 0; for (p = p_wim; *p; ++p) { for (i = 0; ASCII_ISALPHA(p[i]); ++i) ; if (p[i] != NUL && p[i] != ',' && p[i] != ':') return FAIL; if (i == 7 && STRNCMP(p, "longest", 7) == 0) new_wim_flags[idx] |= WIM_LONGEST; else if (i == 4 && STRNCMP(p, "full", 4) == 0) new_wim_flags[idx] |= WIM_FULL; else if (i == 4 && STRNCMP(p, "list", 4) == 0) new_wim_flags[idx] |= WIM_LIST; else return FAIL; p += i; if (*p == NUL) break; if (*p == ',') { if (idx == 3) return FAIL; ++idx; } } /* fill remaining entries with last flag */ while (idx < 3) { new_wim_flags[idx + 1] = new_wim_flags[idx]; ++idx; } /* only when there are no errors, wim_flags[] is changed */ for (i = 0; i < 4; ++i) wim_flags[i] = new_wim_flags[i]; return OK; } /* * Check if backspacing over something is allowed. * The parameter what is one of the following: whatBS_INDENT, BS_EOL * or BS_START */ bool can_bs(int what) { switch (*p_bs) { case '2': return TRUE; case '1': return what != BS_START; case '0': return FALSE; } return vim_strchr(p_bs, what) != NULL; } /* * Save the current values of 'fileformat' and 'fileencoding', so that we know * the file must be considered changed when the value is different. */ void save_file_ff(buf_T *buf) { buf->b_start_ffc = *buf->b_p_ff; buf->b_start_eol = buf->b_p_eol; buf->b_start_bomb = buf->b_p_bomb; /* Only use free/alloc when necessary, they take time. */ if (buf->b_start_fenc == NULL || STRCMP(buf->b_start_fenc, buf->b_p_fenc) != 0) { xfree(buf->b_start_fenc); buf->b_start_fenc = vim_strsave(buf->b_p_fenc); } } /* * Return TRUE if 'fileformat' and/or 'fileencoding' has a different value * from when editing started (save_file_ff() called). * Also when 'endofline' was changed and 'binary' is set, or when 'bomb' was * changed and 'binary' is not set. * Also when 'endofline' was changed and 'fixeol' is not set. * When "ignore_empty" is true don't consider a new, empty buffer to be * changed. */ bool file_ff_differs(buf_T *buf, bool ignore_empty) { /* In a buffer that was never loaded the options are not valid. */ if (buf->b_flags & BF_NEVERLOADED) return FALSE; if (ignore_empty && (buf->b_flags & BF_NEW) && buf->b_ml.ml_line_count == 1 && *ml_get_buf(buf, (linenr_T)1, FALSE) == NUL) return FALSE; if (buf->b_start_ffc != *buf->b_p_ff) return true; if ((buf->b_p_bin || !buf->b_p_fixeol) && buf->b_start_eol != buf->b_p_eol) return true; if (!buf->b_p_bin && buf->b_start_bomb != buf->b_p_bomb) return TRUE; if (buf->b_start_fenc == NULL) return *buf->b_p_fenc != NUL; return STRCMP(buf->b_start_fenc, buf->b_p_fenc) != 0; } /* * return OK if "p" is a valid fileformat name, FAIL otherwise. */ int check_ff_value(char_u *p) { return check_opt_strings(p, p_ff_values, FALSE); } /* * Return the effective shiftwidth value for current buffer, using the * 'tabstop' value when 'shiftwidth' is zero. */ int get_sw_value(buf_T *buf) { long result = buf->b_p_sw ? buf->b_p_sw : buf->b_p_ts; assert(result >= 0 && result <= INT_MAX); return (int)result; } // Return the effective softtabstop value for the current buffer, // using the effective shiftwidth value when 'softtabstop' is negative. int get_sts_value(void) { long result = curbuf->b_p_sts < 0 ? get_sw_value(curbuf) : curbuf->b_p_sts; assert(result >= 0 && result <= INT_MAX); return (int)result; } /* * Check matchpairs option for "*initc". * If there is a match set "*initc" to the matching character and "*findc" to * the opposite character. Set "*backwards" to the direction. * When "switchit" is TRUE swap the direction. */ void find_mps_values(int *initc, int *findc, int *backwards, int switchit) { char_u *ptr; ptr = curbuf->b_p_mps; while (*ptr != NUL) { if (has_mbyte) { char_u *prev; if (mb_ptr2char(ptr) == *initc) { if (switchit) { *findc = *initc; *initc = mb_ptr2char(ptr + mb_ptr2len(ptr) + 1); *backwards = TRUE; } else { *findc = mb_ptr2char(ptr + mb_ptr2len(ptr) + 1); *backwards = FALSE; } return; } prev = ptr; ptr += mb_ptr2len(ptr) + 1; if (mb_ptr2char(ptr) == *initc) { if (switchit) { *findc = *initc; *initc = mb_ptr2char(prev); *backwards = FALSE; } else { *findc = mb_ptr2char(prev); *backwards = TRUE; } return; } ptr += mb_ptr2len(ptr); } else { if (*ptr == *initc) { if (switchit) { *backwards = TRUE; *findc = *initc; *initc = ptr[2]; } else { *backwards = FALSE; *findc = ptr[2]; } return; } ptr += 2; if (*ptr == *initc) { if (switchit) { *backwards = FALSE; *findc = *initc; *initc = ptr[-2]; } else { *backwards = TRUE; *findc = ptr[-2]; } return; } ++ptr; } if (*ptr == ',') ++ptr; } } /// This is called when 'breakindentopt' is changed and when a window is /// initialized static bool briopt_check(win_T *wp) { int bri_shift = 0; int bri_min = 20; bool bri_sbr = false; char_u *p = wp->w_p_briopt; while (*p != NUL) { if (STRNCMP(p, "shift:", 6) == 0 && ((p[6] == '-' && ascii_isdigit(p[7])) || ascii_isdigit(p[6]))) { p += 6; bri_shift = getdigits_int(&p); } else if (STRNCMP(p, "min:", 4) == 0 && ascii_isdigit(p[4])) { p += 4; bri_min = getdigits_int(&p); } else if (STRNCMP(p, "sbr", 3) == 0) { p += 3; bri_sbr = true; } if (*p != ',' && *p != NUL) return false; if (*p == ',') ++p; } wp->w_p_brishift = bri_shift; wp->w_p_brimin = bri_min; wp->w_p_brisbr = bri_sbr; return true; } /// Get the local or global value of 'backupcopy'. /// /// @param buf The buffer. unsigned int get_bkc_value(buf_T *buf) { return buf->b_bkc_flags ? buf->b_bkc_flags : bkc_flags; } /// Return the current end-of-line type: EOL_DOS, EOL_UNIX or EOL_MAC. int get_fileformat(buf_T *buf) { int c = *buf->b_p_ff; if (buf->b_p_bin || c == 'u') { return EOL_UNIX; } if (c == 'm') { return EOL_MAC; } return EOL_DOS; } /// Like get_fileformat(), but override 'fileformat' with "p" for "++opt=val" /// argument. /// /// @param eap can be NULL! int get_fileformat_force(buf_T *buf, exarg_T *eap) { int c; if (eap != NULL && eap->force_ff != 0) { c = eap->cmd[eap->force_ff]; } else { if ((eap != NULL && eap->force_bin != 0) ? (eap->force_bin == FORCE_BIN) : buf->b_p_bin) { return EOL_UNIX; } c = *buf->b_p_ff; } if (c == 'u') { return EOL_UNIX; } if (c == 'm') { return EOL_MAC; } return EOL_DOS; } /// Return the default fileformat from 'fileformats'. int default_fileformat(void) { switch (*p_ffs) { case 'm': return EOL_MAC; case 'd': return EOL_DOS; } return EOL_UNIX; } /// Set the current end-of-line type to EOL_UNIX, EOL_MAC, or EOL_DOS. /// /// Sets 'fileformat'. /// /// @param eol_style End-of-line style. /// @param opt_flags OPT_LOCAL and/or OPT_GLOBAL void set_fileformat(int eol_style, int opt_flags) { char *p = NULL; switch (eol_style) { case EOL_UNIX: p = FF_UNIX; break; case EOL_MAC: p = FF_MAC; break; case EOL_DOS: p = FF_DOS; break; } // p is NULL if "eol_style" is EOL_UNKNOWN. if (p != NULL) { set_string_option_direct((char_u *)"ff", -1, (char_u *)p, OPT_FREE | opt_flags, 0); } // This may cause the buffer to become (un)modified. check_status(curbuf); redraw_tabline = true; need_maketitle = true; // Set window title later. } /// Skip to next part of an option argument: Skip space and comma. char_u *skip_to_option_part(char_u *p) { if (*p == ',') { p++; } while (*p == ' ') { p++; } return p; } /// Isolate one part of a string option separated by `sep_chars`. /// /// @param[in,out] option advanced to the next part /// @param[in,out] buf copy of the isolated part /// @param[in] maxlen length of `buf` /// @param[in] sep_chars chars that separate the option parts /// /// @return length of `*option` size_t copy_option_part(char_u **option, char_u *buf, size_t maxlen, char *sep_chars) { size_t len = 0; char_u *p = *option; // skip '.' at start of option part, for 'suffixes' if (*p == '.') { buf[len++] = *p++; } while (*p != NUL && vim_strchr((char_u *)sep_chars, *p) == NULL) { // Skip backslash before a separator character and space. if (p[0] == '\\' && vim_strchr((char_u *)sep_chars, p[1]) != NULL) { p++; } if (len < maxlen - 1) { buf[len++] = *p; } p++; } buf[len] = NUL; if (*p != NUL && *p != ',') { // skip non-standard separator p++; } p = skip_to_option_part(p); // p points to next file name *option = p; return len; } /// Return TRUE when 'shell' has "csh" in the tail. int csh_like_shell(void) { return strstr((char *)path_tail(p_sh), "csh") != NULL; }
./CrossVul/dataset_final_sorted/CWE-20/c/good_4899_0
crossvul-cpp_data_bad_5533_0
/* diskstore.c implements a very simple disk backed key-value store used * by Redis for the "disk" backend. This implementation uses the filesystem * to store key/value pairs. Every file represents a given key. * * The key path is calculated using the SHA1 of the key name. For instance * the key "foo" is stored as a file name called: * * /0b/ee/0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33 * * The couples of characters from the hex output of SHA1 are also used * to locate two two levels of directories to store the file (as most * filesystems are not able to handle too many files in a single dir). * * In the end there are 65536 final directories (256 directories inside * every 256 top level directories), so that with 1 billion of files every * directory will contain in the average 15258 entires, that is ok with * most filesystems implementation. * * The actaul implementation of this disk store is highly related to the * filesystem implementation. This implementation may be replaced by * a B+TREE implementation in future implementations. * * Data ok every key is serialized using the same format used for .rdb * serialization. Everything is serialized on every entry: key name, * ttl information in case of keys with an associated expire time, and the * serialized value itself. * * Because the format is the same of the .rdb files it is trivial to create * an .rdb file starting from this format just by mean of scanning the * directories and concatenating entries, with the sole addition of an * .rdb header at the start and the end-of-db opcode at the end. * * ------------------------------------------------------------------------- * * Copyright (c) 2010-2011, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "redis.h" #include <fcntl.h> #include <sys/stat.h> int dsOpen(void) { struct stat sb; int retval; char *path = server.diskstore_path; if ((retval = stat(path,&sb) == -1) && errno != ENOENT) { redisLog(REDIS_WARNING, "Error opening disk store at %s: %s", path, strerror(errno)); return REDIS_ERR; } /* Directory already in place. Assume everything is ok. */ if (retval == 0 && S_ISDIR(sb.st_mode)) return REDIS_OK; /* File exists but it's not a directory */ if (retval == 0 && !S_ISDIR(sb.st_mode)) { redisLog(REDIS_WARNING,"Disk store at %s is not a directory", path); return REDIS_ERR; } /* New disk store, create the directory structure now, as creating * them in a lazy way is not a good idea, after very few insertions * we'll need most of the 65536 directories anyway. */ if (mkdir(path) == -1) { redisLog(REDIS_WARNING,"Disk store init failed creating dir %s: %s", path, strerror(errno)); return REDIS_ERR; } return REDIS_OK; } int dsClose(void) { return REDIS_OK; } int dsSet(redisDb *db, robj *key, robj *val) { } robj *dsGet(redisDb *db, robj *key) { } int dsExists(redisDb *db, robj *key) { }
./CrossVul/dataset_final_sorted/CWE-20/c/bad_5533_0
crossvul-cpp_data_bad_3442_0
/* BNEP implementation for Linux Bluetooth stack (BlueZ). Copyright (C) 2001-2002 Inventel Systemes Written 2001-2002 by David Libault <david.libault@inventel.fr> Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ #include <linux/module.h> #include <linux/types.h> #include <linux/capability.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/poll.h> #include <linux/fcntl.h> #include <linux/skbuff.h> #include <linux/socket.h> #include <linux/ioctl.h> #include <linux/file.h> #include <linux/init.h> #include <linux/compat.h> #include <linux/gfp.h> #include <net/sock.h> #include <asm/system.h> #include <asm/uaccess.h> #include "bnep.h" static int bnep_sock_release(struct socket *sock) { struct sock *sk = sock->sk; BT_DBG("sock %p sk %p", sock, sk); if (!sk) return 0; sock_orphan(sk); sock_put(sk); return 0; } static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct bnep_connlist_req cl; struct bnep_connadd_req ca; struct bnep_conndel_req cd; struct bnep_conninfo ci; struct socket *nsock; void __user *argp = (void __user *)arg; int err; BT_DBG("cmd %x arg %lx", cmd, arg); switch (cmd) { case BNEPCONNADD: if (!capable(CAP_NET_ADMIN)) return -EACCES; if (copy_from_user(&ca, argp, sizeof(ca))) return -EFAULT; nsock = sockfd_lookup(ca.sock, &err); if (!nsock) return err; if (nsock->sk->sk_state != BT_CONNECTED) { sockfd_put(nsock); return -EBADFD; } err = bnep_add_connection(&ca, nsock); if (!err) { if (copy_to_user(argp, &ca, sizeof(ca))) err = -EFAULT; } else sockfd_put(nsock); return err; case BNEPCONNDEL: if (!capable(CAP_NET_ADMIN)) return -EACCES; if (copy_from_user(&cd, argp, sizeof(cd))) return -EFAULT; return bnep_del_connection(&cd); case BNEPGETCONNLIST: if (copy_from_user(&cl, argp, sizeof(cl))) return -EFAULT; if (cl.cnum <= 0) return -EINVAL; err = bnep_get_connlist(&cl); if (!err && copy_to_user(argp, &cl, sizeof(cl))) return -EFAULT; return err; case BNEPGETCONNINFO: if (copy_from_user(&ci, argp, sizeof(ci))) return -EFAULT; err = bnep_get_conninfo(&ci); if (!err && copy_to_user(argp, &ci, sizeof(ci))) return -EFAULT; return err; default: return -EINVAL; } return 0; } #ifdef CONFIG_COMPAT static int bnep_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { if (cmd == BNEPGETCONNLIST) { struct bnep_connlist_req cl; uint32_t uci; int err; if (get_user(cl.cnum, (uint32_t __user *) arg) || get_user(uci, (u32 __user *) (arg + 4))) return -EFAULT; cl.ci = compat_ptr(uci); if (cl.cnum <= 0) return -EINVAL; err = bnep_get_connlist(&cl); if (!err && put_user(cl.cnum, (uint32_t __user *) arg)) err = -EFAULT; return err; } return bnep_sock_ioctl(sock, cmd, arg); } #endif static const struct proto_ops bnep_sock_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .release = bnep_sock_release, .ioctl = bnep_sock_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = bnep_sock_compat_ioctl, #endif .bind = sock_no_bind, .getname = sock_no_getname, .sendmsg = sock_no_sendmsg, .recvmsg = sock_no_recvmsg, .poll = sock_no_poll, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = sock_no_setsockopt, .getsockopt = sock_no_getsockopt, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .mmap = sock_no_mmap }; static struct proto bnep_proto = { .name = "BNEP", .owner = THIS_MODULE, .obj_size = sizeof(struct bt_sock) }; static int bnep_sock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; BT_DBG("sock %p", sock); if (sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &bnep_proto); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sock->ops = &bnep_sock_ops; sock->state = SS_UNCONNECTED; sock_reset_flag(sk, SOCK_ZAPPED); sk->sk_protocol = protocol; sk->sk_state = BT_OPEN; return 0; } static const struct net_proto_family bnep_sock_family_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .create = bnep_sock_create }; int __init bnep_sock_init(void) { int err; err = proto_register(&bnep_proto, 0); if (err < 0) return err; err = bt_sock_register(BTPROTO_BNEP, &bnep_sock_family_ops); if (err < 0) goto error; return 0; error: BT_ERR("Can't register BNEP socket"); proto_unregister(&bnep_proto); return err; } void __exit bnep_sock_cleanup(void) { if (bt_sock_unregister(BTPROTO_BNEP) < 0) BT_ERR("Can't unregister BNEP socket"); proto_unregister(&bnep_proto); }
./CrossVul/dataset_final_sorted/CWE-20/c/bad_3442_0
crossvul-cpp_data_bad_2891_10
/* Request a key from userspace * * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * See Documentation/security/keys/request-key.rst */ #include <linux/module.h> #include <linux/sched.h> #include <linux/kmod.h> #include <linux/err.h> #include <linux/keyctl.h> #include <linux/slab.h> #include "internal.h" #define key_negative_timeout 60 /* default timeout on a negative key's existence */ /** * complete_request_key - Complete the construction of a key. * @cons: The key construction record. * @error: The success or failute of the construction. * * Complete the attempt to construct a key. The key will be negated * if an error is indicated. The authorisation key will be revoked * unconditionally. */ void complete_request_key(struct key_construction *cons, int error) { kenter("{%d,%d},%d", cons->key->serial, cons->authkey->serial, error); if (error < 0) key_negate_and_link(cons->key, key_negative_timeout, NULL, cons->authkey); else key_revoke(cons->authkey); key_put(cons->key); key_put(cons->authkey); kfree(cons); } EXPORT_SYMBOL(complete_request_key); /* * Initialise a usermode helper that is going to have a specific session * keyring. * * This is called in context of freshly forked kthread before kernel_execve(), * so we can simply install the desired session_keyring at this point. */ static int umh_keys_init(struct subprocess_info *info, struct cred *cred) { struct key *keyring = info->data; return install_session_keyring_to_cred(cred, keyring); } /* * Clean up a usermode helper with session keyring. */ static void umh_keys_cleanup(struct subprocess_info *info) { struct key *keyring = info->data; key_put(keyring); } /* * Call a usermode helper with a specific session keyring. */ static int call_usermodehelper_keys(const char *path, char **argv, char **envp, struct key *session_keyring, int wait) { struct subprocess_info *info; info = call_usermodehelper_setup(path, argv, envp, GFP_KERNEL, umh_keys_init, umh_keys_cleanup, session_keyring); if (!info) return -ENOMEM; key_get(session_keyring); return call_usermodehelper_exec(info, wait); } /* * Request userspace finish the construction of a key * - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>" */ static int call_sbin_request_key(struct key_construction *cons, const char *op, void *aux) { static char const request_key[] = "/sbin/request-key"; const struct cred *cred = current_cred(); key_serial_t prkey, sskey; struct key *key = cons->key, *authkey = cons->authkey, *keyring, *session; char *argv[9], *envp[3], uid_str[12], gid_str[12]; char key_str[12], keyring_str[3][12]; char desc[20]; int ret, i; kenter("{%d},{%d},%s", key->serial, authkey->serial, op); ret = install_user_keyrings(); if (ret < 0) goto error_alloc; /* allocate a new session keyring */ sprintf(desc, "_req.%u", key->serial); cred = get_current_cred(); keyring = keyring_alloc(desc, cred->fsuid, cred->fsgid, cred, KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ, KEY_ALLOC_QUOTA_OVERRUN, NULL, NULL); put_cred(cred); if (IS_ERR(keyring)) { ret = PTR_ERR(keyring); goto error_alloc; } /* attach the auth key to the session keyring */ ret = key_link(keyring, authkey); if (ret < 0) goto error_link; /* record the UID and GID */ sprintf(uid_str, "%d", from_kuid(&init_user_ns, cred->fsuid)); sprintf(gid_str, "%d", from_kgid(&init_user_ns, cred->fsgid)); /* we say which key is under construction */ sprintf(key_str, "%d", key->serial); /* we specify the process's default keyrings */ sprintf(keyring_str[0], "%d", cred->thread_keyring ? cred->thread_keyring->serial : 0); prkey = 0; if (cred->process_keyring) prkey = cred->process_keyring->serial; sprintf(keyring_str[1], "%d", prkey); rcu_read_lock(); session = rcu_dereference(cred->session_keyring); if (!session) session = cred->user->session_keyring; sskey = session->serial; rcu_read_unlock(); sprintf(keyring_str[2], "%d", sskey); /* set up a minimal environment */ i = 0; envp[i++] = "HOME=/"; envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; envp[i] = NULL; /* set up the argument list */ i = 0; argv[i++] = (char *)request_key; argv[i++] = (char *) op; argv[i++] = key_str; argv[i++] = uid_str; argv[i++] = gid_str; argv[i++] = keyring_str[0]; argv[i++] = keyring_str[1]; argv[i++] = keyring_str[2]; argv[i] = NULL; /* do it */ ret = call_usermodehelper_keys(request_key, argv, envp, keyring, UMH_WAIT_PROC); kdebug("usermode -> 0x%x", ret); if (ret >= 0) { /* ret is the exit/wait code */ if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags) || key_validate(key) < 0) ret = -ENOKEY; else /* ignore any errors from userspace if the key was * instantiated */ ret = 0; } error_link: key_put(keyring); error_alloc: complete_request_key(cons, ret); kleave(" = %d", ret); return ret; } /* * Call out to userspace for key construction. * * Program failure is ignored in favour of key status. */ static int construct_key(struct key *key, const void *callout_info, size_t callout_len, void *aux, struct key *dest_keyring) { struct key_construction *cons; request_key_actor_t actor; struct key *authkey; int ret; kenter("%d,%p,%zu,%p", key->serial, callout_info, callout_len, aux); cons = kmalloc(sizeof(*cons), GFP_KERNEL); if (!cons) return -ENOMEM; /* allocate an authorisation key */ authkey = request_key_auth_new(key, callout_info, callout_len, dest_keyring); if (IS_ERR(authkey)) { kfree(cons); ret = PTR_ERR(authkey); authkey = NULL; } else { cons->authkey = key_get(authkey); cons->key = key_get(key); /* make the call */ actor = call_sbin_request_key; if (key->type->request_key) actor = key->type->request_key; ret = actor(cons, "create", aux); /* check that the actor called complete_request_key() prior to * returning an error */ WARN_ON(ret < 0 && !test_bit(KEY_FLAG_REVOKED, &authkey->flags)); key_put(authkey); } kleave(" = %d", ret); return ret; } /* * Get the appropriate destination keyring for the request. * * The keyring selected is returned with an extra reference upon it which the * caller must release. */ static void construct_get_dest_keyring(struct key **_dest_keyring) { struct request_key_auth *rka; const struct cred *cred = current_cred(); struct key *dest_keyring = *_dest_keyring, *authkey; kenter("%p", dest_keyring); /* find the appropriate keyring */ if (dest_keyring) { /* the caller supplied one */ key_get(dest_keyring); } else { /* use a default keyring; falling through the cases until we * find one that we actually have */ switch (cred->jit_keyring) { case KEY_REQKEY_DEFL_DEFAULT: case KEY_REQKEY_DEFL_REQUESTOR_KEYRING: if (cred->request_key_auth) { authkey = cred->request_key_auth; down_read(&authkey->sem); rka = authkey->payload.data[0]; if (!test_bit(KEY_FLAG_REVOKED, &authkey->flags)) dest_keyring = key_get(rka->dest_keyring); up_read(&authkey->sem); if (dest_keyring) break; } case KEY_REQKEY_DEFL_THREAD_KEYRING: dest_keyring = key_get(cred->thread_keyring); if (dest_keyring) break; case KEY_REQKEY_DEFL_PROCESS_KEYRING: dest_keyring = key_get(cred->process_keyring); if (dest_keyring) break; case KEY_REQKEY_DEFL_SESSION_KEYRING: rcu_read_lock(); dest_keyring = key_get( rcu_dereference(cred->session_keyring)); rcu_read_unlock(); if (dest_keyring) break; case KEY_REQKEY_DEFL_USER_SESSION_KEYRING: dest_keyring = key_get(cred->user->session_keyring); break; case KEY_REQKEY_DEFL_USER_KEYRING: dest_keyring = key_get(cred->user->uid_keyring); break; case KEY_REQKEY_DEFL_GROUP_KEYRING: default: BUG(); } } *_dest_keyring = dest_keyring; kleave(" [dk %d]", key_serial(dest_keyring)); return; } /* * Allocate a new key in under-construction state and attempt to link it in to * the requested keyring. * * May return a key that's already under construction instead if there was a * race between two thread calling request_key(). */ static int construct_alloc_key(struct keyring_search_context *ctx, struct key *dest_keyring, unsigned long flags, struct key_user *user, struct key **_key) { struct assoc_array_edit *edit; struct key *key; key_perm_t perm; key_ref_t key_ref; int ret; kenter("%s,%s,,,", ctx->index_key.type->name, ctx->index_key.description); *_key = NULL; mutex_lock(&user->cons_lock); perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR; perm |= KEY_USR_VIEW; if (ctx->index_key.type->read) perm |= KEY_POS_READ; if (ctx->index_key.type == &key_type_keyring || ctx->index_key.type->update) perm |= KEY_POS_WRITE; key = key_alloc(ctx->index_key.type, ctx->index_key.description, ctx->cred->fsuid, ctx->cred->fsgid, ctx->cred, perm, flags, NULL); if (IS_ERR(key)) goto alloc_failed; set_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags); if (dest_keyring) { ret = __key_link_begin(dest_keyring, &ctx->index_key, &edit); if (ret < 0) goto link_prealloc_failed; } /* attach the key to the destination keyring under lock, but we do need * to do another check just in case someone beat us to it whilst we * waited for locks */ mutex_lock(&key_construction_mutex); key_ref = search_process_keyrings(ctx); if (!IS_ERR(key_ref)) goto key_already_present; if (dest_keyring) __key_link(key, &edit); mutex_unlock(&key_construction_mutex); if (dest_keyring) __key_link_end(dest_keyring, &ctx->index_key, edit); mutex_unlock(&user->cons_lock); *_key = key; kleave(" = 0 [%d]", key_serial(key)); return 0; /* the key is now present - we tell the caller that we found it by * returning -EINPROGRESS */ key_already_present: key_put(key); mutex_unlock(&key_construction_mutex); key = key_ref_to_ptr(key_ref); if (dest_keyring) { ret = __key_link_check_live_key(dest_keyring, key); if (ret == 0) __key_link(key, &edit); __key_link_end(dest_keyring, &ctx->index_key, edit); if (ret < 0) goto link_check_failed; } mutex_unlock(&user->cons_lock); *_key = key; kleave(" = -EINPROGRESS [%d]", key_serial(key)); return -EINPROGRESS; link_check_failed: mutex_unlock(&user->cons_lock); key_put(key); kleave(" = %d [linkcheck]", ret); return ret; link_prealloc_failed: mutex_unlock(&user->cons_lock); key_put(key); kleave(" = %d [prelink]", ret); return ret; alloc_failed: mutex_unlock(&user->cons_lock); kleave(" = %ld", PTR_ERR(key)); return PTR_ERR(key); } /* * Commence key construction. */ static struct key *construct_key_and_link(struct keyring_search_context *ctx, const char *callout_info, size_t callout_len, void *aux, struct key *dest_keyring, unsigned long flags) { struct key_user *user; struct key *key; int ret; kenter(""); if (ctx->index_key.type == &key_type_keyring) return ERR_PTR(-EPERM); user = key_user_lookup(current_fsuid()); if (!user) return ERR_PTR(-ENOMEM); construct_get_dest_keyring(&dest_keyring); ret = construct_alloc_key(ctx, dest_keyring, flags, user, &key); key_user_put(user); if (ret == 0) { ret = construct_key(key, callout_info, callout_len, aux, dest_keyring); if (ret < 0) { kdebug("cons failed"); goto construction_failed; } } else if (ret == -EINPROGRESS) { ret = 0; } else { goto couldnt_alloc_key; } key_put(dest_keyring); kleave(" = key %d", key_serial(key)); return key; construction_failed: key_negate_and_link(key, key_negative_timeout, NULL, NULL); key_put(key); couldnt_alloc_key: key_put(dest_keyring); kleave(" = %d", ret); return ERR_PTR(ret); } /** * request_key_and_link - Request a key and cache it in a keyring. * @type: The type of key we want. * @description: The searchable description of the key. * @callout_info: The data to pass to the instantiation upcall (or NULL). * @callout_len: The length of callout_info. * @aux: Auxiliary data for the upcall. * @dest_keyring: Where to cache the key. * @flags: Flags to key_alloc(). * * A key matching the specified criteria is searched for in the process's * keyrings and returned with its usage count incremented if found. Otherwise, * if callout_info is not NULL, a key will be allocated and some service * (probably in userspace) will be asked to instantiate it. * * If successfully found or created, the key will be linked to the destination * keyring if one is provided. * * Returns a pointer to the key if successful; -EACCES, -ENOKEY, -EKEYREVOKED * or -EKEYEXPIRED if an inaccessible, negative, revoked or expired key was * found; -ENOKEY if no key was found and no @callout_info was given; -EDQUOT * if insufficient key quota was available to create a new key; or -ENOMEM if * insufficient memory was available. * * If the returned key was created, then it may still be under construction, * and wait_for_key_construction() should be used to wait for that to complete. */ struct key *request_key_and_link(struct key_type *type, const char *description, const void *callout_info, size_t callout_len, void *aux, struct key *dest_keyring, unsigned long flags) { struct keyring_search_context ctx = { .index_key.type = type, .index_key.description = description, .cred = current_cred(), .match_data.cmp = key_default_cmp, .match_data.raw_data = description, .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, .flags = (KEYRING_SEARCH_DO_STATE_CHECK | KEYRING_SEARCH_SKIP_EXPIRED), }; struct key *key; key_ref_t key_ref; int ret; kenter("%s,%s,%p,%zu,%p,%p,%lx", ctx.index_key.type->name, ctx.index_key.description, callout_info, callout_len, aux, dest_keyring, flags); if (type->match_preparse) { ret = type->match_preparse(&ctx.match_data); if (ret < 0) { key = ERR_PTR(ret); goto error; } } /* search all the process keyrings for a key */ key_ref = search_process_keyrings(&ctx); if (!IS_ERR(key_ref)) { key = key_ref_to_ptr(key_ref); if (dest_keyring) { construct_get_dest_keyring(&dest_keyring); ret = key_link(dest_keyring, key); key_put(dest_keyring); if (ret < 0) { key_put(key); key = ERR_PTR(ret); goto error_free; } } } else if (PTR_ERR(key_ref) != -EAGAIN) { key = ERR_CAST(key_ref); } else { /* the search failed, but the keyrings were searchable, so we * should consult userspace if we can */ key = ERR_PTR(-ENOKEY); if (!callout_info) goto error_free; key = construct_key_and_link(&ctx, callout_info, callout_len, aux, dest_keyring, flags); } error_free: if (type->match_free) type->match_free(&ctx.match_data); error: kleave(" = %p", key); return key; } /** * wait_for_key_construction - Wait for construction of a key to complete * @key: The key being waited for. * @intr: Whether to wait interruptibly. * * Wait for a key to finish being constructed. * * Returns 0 if successful; -ERESTARTSYS if the wait was interrupted; -ENOKEY * if the key was negated; or -EKEYREVOKED or -EKEYEXPIRED if the key was * revoked or expired. */ int wait_for_key_construction(struct key *key, bool intr) { int ret; ret = wait_on_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT, intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); if (ret) return -ERESTARTSYS; if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) { smp_rmb(); return key->reject_error; } return key_validate(key); } EXPORT_SYMBOL(wait_for_key_construction); /** * request_key - Request a key and wait for construction * @type: Type of key. * @description: The searchable description of the key. * @callout_info: The data to pass to the instantiation upcall (or NULL). * * As for request_key_and_link() except that it does not add the returned key * to a keyring if found, new keys are always allocated in the user's quota, * the callout_info must be a NUL-terminated string and no auxiliary data can * be passed. * * Furthermore, it then works as wait_for_key_construction() to wait for the * completion of keys undergoing construction with a non-interruptible wait. */ struct key *request_key(struct key_type *type, const char *description, const char *callout_info) { struct key *key; size_t callout_len = 0; int ret; if (callout_info) callout_len = strlen(callout_info); key = request_key_and_link(type, description, callout_info, callout_len, NULL, NULL, KEY_ALLOC_IN_QUOTA); if (!IS_ERR(key)) { ret = wait_for_key_construction(key, false); if (ret < 0) { key_put(key); return ERR_PTR(ret); } } return key; } EXPORT_SYMBOL(request_key); /** * request_key_with_auxdata - Request a key with auxiliary data for the upcaller * @type: The type of key we want. * @description: The searchable description of the key. * @callout_info: The data to pass to the instantiation upcall (or NULL). * @callout_len: The length of callout_info. * @aux: Auxiliary data for the upcall. * * As for request_key_and_link() except that it does not add the returned key * to a keyring if found and new keys are always allocated in the user's quota. * * Furthermore, it then works as wait_for_key_construction() to wait for the * completion of keys undergoing construction with a non-interruptible wait. */ struct key *request_key_with_auxdata(struct key_type *type, const char *description, const void *callout_info, size_t callout_len, void *aux) { struct key *key; int ret; key = request_key_and_link(type, description, callout_info, callout_len, aux, NULL, KEY_ALLOC_IN_QUOTA); if (!IS_ERR(key)) { ret = wait_for_key_construction(key, false); if (ret < 0) { key_put(key); return ERR_PTR(ret); } } return key; } EXPORT_SYMBOL(request_key_with_auxdata); /* * request_key_async - Request a key (allow async construction) * @type: Type of key. * @description: The searchable description of the key. * @callout_info: The data to pass to the instantiation upcall (or NULL). * @callout_len: The length of callout_info. * * As for request_key_and_link() except that it does not add the returned key * to a keyring if found, new keys are always allocated in the user's quota and * no auxiliary data can be passed. * * The caller should call wait_for_key_construction() to wait for the * completion of the returned key if it is still undergoing construction. */ struct key *request_key_async(struct key_type *type, const char *description, const void *callout_info, size_t callout_len) { return request_key_and_link(type, description, callout_info, callout_len, NULL, NULL, KEY_ALLOC_IN_QUOTA); } EXPORT_SYMBOL(request_key_async); /* * request a key with auxiliary data for the upcaller (allow async construction) * @type: Type of key. * @description: The searchable description of the key. * @callout_info: The data to pass to the instantiation upcall (or NULL). * @callout_len: The length of callout_info. * @aux: Auxiliary data for the upcall. * * As for request_key_and_link() except that it does not add the returned key * to a keyring if found and new keys are always allocated in the user's quota. * * The caller should call wait_for_key_construction() to wait for the * completion of the returned key if it is still undergoing construction. */ struct key *request_key_async_with_auxdata(struct key_type *type, const char *description, const void *callout_info, size_t callout_len, void *aux) { return request_key_and_link(type, description, callout_info, callout_len, aux, NULL, KEY_ALLOC_IN_QUOTA); } EXPORT_SYMBOL(request_key_async_with_auxdata);
./CrossVul/dataset_final_sorted/CWE-20/c/bad_2891_10
crossvul-cpp_data_good_2391_0
/* * linux/fs/isofs/rock.c * * (C) 1992, 1993 Eric Youngdale * * Rock Ridge Extensions to iso9660 */ #include <linux/slab.h> #include <linux/pagemap.h> #include "isofs.h" #include "rock.h" /* * These functions are designed to read the system areas of a directory record * and extract relevant information. There are different functions provided * depending upon what information we need at the time. One function fills * out an inode structure, a second one extracts a filename, a third one * returns a symbolic link name, and a fourth one returns the extent number * for the file. */ #define SIG(A,B) ((A) | ((B) << 8)) /* isonum_721() */ struct rock_state { void *buffer; unsigned char *chr; int len; int cont_size; int cont_extent; int cont_offset; int cont_loops; struct inode *inode; }; /* * This is a way of ensuring that we have something in the system * use fields that is compatible with Rock Ridge. Return zero on success. */ static int check_sp(struct rock_ridge *rr, struct inode *inode) { if (rr->u.SP.magic[0] != 0xbe) return -1; if (rr->u.SP.magic[1] != 0xef) return -1; ISOFS_SB(inode->i_sb)->s_rock_offset = rr->u.SP.skip; return 0; } static void setup_rock_ridge(struct iso_directory_record *de, struct inode *inode, struct rock_state *rs) { rs->len = sizeof(struct iso_directory_record) + de->name_len[0]; if (rs->len & 1) (rs->len)++; rs->chr = (unsigned char *)de + rs->len; rs->len = *((unsigned char *)de) - rs->len; if (rs->len < 0) rs->len = 0; if (ISOFS_SB(inode->i_sb)->s_rock_offset != -1) { rs->len -= ISOFS_SB(inode->i_sb)->s_rock_offset; rs->chr += ISOFS_SB(inode->i_sb)->s_rock_offset; if (rs->len < 0) rs->len = 0; } } static void init_rock_state(struct rock_state *rs, struct inode *inode) { memset(rs, 0, sizeof(*rs)); rs->inode = inode; } /* Maximum number of Rock Ridge continuation entries */ #define RR_MAX_CE_ENTRIES 32 /* * Returns 0 if the caller should continue scanning, 1 if the scan must end * and -ve on error. */ static int rock_continue(struct rock_state *rs) { int ret = 1; int blocksize = 1 << rs->inode->i_blkbits; const int min_de_size = offsetof(struct rock_ridge, u); kfree(rs->buffer); rs->buffer = NULL; if ((unsigned)rs->cont_offset > blocksize - min_de_size || (unsigned)rs->cont_size > blocksize || (unsigned)(rs->cont_offset + rs->cont_size) > blocksize) { printk(KERN_NOTICE "rock: corrupted directory entry. " "extent=%d, offset=%d, size=%d\n", rs->cont_extent, rs->cont_offset, rs->cont_size); ret = -EIO; goto out; } if (rs->cont_extent) { struct buffer_head *bh; rs->buffer = kmalloc(rs->cont_size, GFP_KERNEL); if (!rs->buffer) { ret = -ENOMEM; goto out; } ret = -EIO; if (++rs->cont_loops >= RR_MAX_CE_ENTRIES) goto out; bh = sb_bread(rs->inode->i_sb, rs->cont_extent); if (bh) { memcpy(rs->buffer, bh->b_data + rs->cont_offset, rs->cont_size); put_bh(bh); rs->chr = rs->buffer; rs->len = rs->cont_size; rs->cont_extent = 0; rs->cont_size = 0; rs->cont_offset = 0; return 0; } printk("Unable to read rock-ridge attributes\n"); } out: kfree(rs->buffer); rs->buffer = NULL; return ret; } /* * We think there's a record of type `sig' at rs->chr. Parse the signature * and make sure that there's really room for a record of that type. */ static int rock_check_overflow(struct rock_state *rs, int sig) { int len; switch (sig) { case SIG('S', 'P'): len = sizeof(struct SU_SP_s); break; case SIG('C', 'E'): len = sizeof(struct SU_CE_s); break; case SIG('E', 'R'): len = sizeof(struct SU_ER_s); break; case SIG('R', 'R'): len = sizeof(struct RR_RR_s); break; case SIG('P', 'X'): len = sizeof(struct RR_PX_s); break; case SIG('P', 'N'): len = sizeof(struct RR_PN_s); break; case SIG('S', 'L'): len = sizeof(struct RR_SL_s); break; case SIG('N', 'M'): len = sizeof(struct RR_NM_s); break; case SIG('C', 'L'): len = sizeof(struct RR_CL_s); break; case SIG('P', 'L'): len = sizeof(struct RR_PL_s); break; case SIG('T', 'F'): len = sizeof(struct RR_TF_s); break; case SIG('Z', 'F'): len = sizeof(struct RR_ZF_s); break; default: len = 0; break; } len += offsetof(struct rock_ridge, u); if (len > rs->len) { printk(KERN_NOTICE "rock: directory entry would overflow " "storage\n"); printk(KERN_NOTICE "rock: sig=0x%02x, size=%d, remaining=%d\n", sig, len, rs->len); return -EIO; } return 0; } /* * return length of name field; 0: not found, -1: to be ignored */ int get_rock_ridge_filename(struct iso_directory_record *de, char *retname, struct inode *inode) { struct rock_state rs; struct rock_ridge *rr; int sig; int retnamlen = 0; int truncate = 0; int ret = 0; if (!ISOFS_SB(inode->i_sb)->s_rock) return 0; *retname = 0; init_rock_state(&rs, inode); setup_rock_ridge(de, inode, &rs); repeat: while (rs.len > 2) { /* There may be one byte for padding somewhere */ rr = (struct rock_ridge *)rs.chr; /* * Ignore rock ridge info if rr->len is out of range, but * don't return -EIO because that would make the file * invisible. */ if (rr->len < 3) goto out; /* Something got screwed up here */ sig = isonum_721(rs.chr); if (rock_check_overflow(&rs, sig)) goto eio; rs.chr += rr->len; rs.len -= rr->len; /* * As above, just ignore the rock ridge info if rr->len * is bogus. */ if (rs.len < 0) goto out; /* Something got screwed up here */ switch (sig) { case SIG('R', 'R'): if ((rr->u.RR.flags[0] & RR_NM) == 0) goto out; break; case SIG('S', 'P'): if (check_sp(rr, inode)) goto out; break; case SIG('C', 'E'): rs.cont_extent = isonum_733(rr->u.CE.extent); rs.cont_offset = isonum_733(rr->u.CE.offset); rs.cont_size = isonum_733(rr->u.CE.size); break; case SIG('N', 'M'): if (truncate) break; if (rr->len < 5) break; /* * If the flags are 2 or 4, this indicates '.' or '..'. * We don't want to do anything with this, because it * screws up the code that calls us. We don't really * care anyways, since we can just use the non-RR * name. */ if (rr->u.NM.flags & 6) break; if (rr->u.NM.flags & ~1) { printk("Unsupported NM flag settings (%d)\n", rr->u.NM.flags); break; } if ((strlen(retname) + rr->len - 5) >= 254) { truncate = 1; break; } strncat(retname, rr->u.NM.name, rr->len - 5); retnamlen += rr->len - 5; break; case SIG('R', 'E'): kfree(rs.buffer); return -1; default: break; } } ret = rock_continue(&rs); if (ret == 0) goto repeat; if (ret == 1) return retnamlen; /* If 0, this file did not have a NM field */ out: kfree(rs.buffer); return ret; eio: ret = -EIO; goto out; } #define RR_REGARD_XA 1 #define RR_RELOC_DE 2 static int parse_rock_ridge_inode_internal(struct iso_directory_record *de, struct inode *inode, int flags) { int symlink_len = 0; int cnt, sig; unsigned int reloc_block; struct inode *reloc; struct rock_ridge *rr; int rootflag; struct rock_state rs; int ret = 0; if (!ISOFS_SB(inode->i_sb)->s_rock) return 0; init_rock_state(&rs, inode); setup_rock_ridge(de, inode, &rs); if (flags & RR_REGARD_XA) { rs.chr += 14; rs.len -= 14; if (rs.len < 0) rs.len = 0; } repeat: while (rs.len > 2) { /* There may be one byte for padding somewhere */ rr = (struct rock_ridge *)rs.chr; /* * Ignore rock ridge info if rr->len is out of range, but * don't return -EIO because that would make the file * invisible. */ if (rr->len < 3) goto out; /* Something got screwed up here */ sig = isonum_721(rs.chr); if (rock_check_overflow(&rs, sig)) goto eio; rs.chr += rr->len; rs.len -= rr->len; /* * As above, just ignore the rock ridge info if rr->len * is bogus. */ if (rs.len < 0) goto out; /* Something got screwed up here */ switch (sig) { #ifndef CONFIG_ZISOFS /* No flag for SF or ZF */ case SIG('R', 'R'): if ((rr->u.RR.flags[0] & (RR_PX | RR_TF | RR_SL | RR_CL)) == 0) goto out; break; #endif case SIG('S', 'P'): if (check_sp(rr, inode)) goto out; break; case SIG('C', 'E'): rs.cont_extent = isonum_733(rr->u.CE.extent); rs.cont_offset = isonum_733(rr->u.CE.offset); rs.cont_size = isonum_733(rr->u.CE.size); break; case SIG('E', 'R'): /* Invalid length of ER tag id? */ if (rr->u.ER.len_id + offsetof(struct rock_ridge, u.ER.data) > rr->len) goto out; ISOFS_SB(inode->i_sb)->s_rock = 1; printk(KERN_DEBUG "ISO 9660 Extensions: "); { int p; for (p = 0; p < rr->u.ER.len_id; p++) printk("%c", rr->u.ER.data[p]); } printk("\n"); break; case SIG('P', 'X'): inode->i_mode = isonum_733(rr->u.PX.mode); set_nlink(inode, isonum_733(rr->u.PX.n_links)); i_uid_write(inode, isonum_733(rr->u.PX.uid)); i_gid_write(inode, isonum_733(rr->u.PX.gid)); break; case SIG('P', 'N'): { int high, low; high = isonum_733(rr->u.PN.dev_high); low = isonum_733(rr->u.PN.dev_low); /* * The Rock Ridge standard specifies that if * sizeof(dev_t) <= 4, then the high field is * unused, and the device number is completely * stored in the low field. Some writers may * ignore this subtlety, * and as a result we test to see if the entire * device number is * stored in the low field, and use that. */ if ((low & ~0xff) && high == 0) { inode->i_rdev = MKDEV(low >> 8, low & 0xff); } else { inode->i_rdev = MKDEV(high, low); } } break; case SIG('T', 'F'): /* * Some RRIP writers incorrectly place ctime in the * TF_CREATE field. Try to handle this correctly for * either case. */ /* Rock ridge never appears on a High Sierra disk */ cnt = 0; if (rr->u.TF.flags & TF_CREATE) { inode->i_ctime.tv_sec = iso_date(rr->u.TF.times[cnt++].time, 0); inode->i_ctime.tv_nsec = 0; } if (rr->u.TF.flags & TF_MODIFY) { inode->i_mtime.tv_sec = iso_date(rr->u.TF.times[cnt++].time, 0); inode->i_mtime.tv_nsec = 0; } if (rr->u.TF.flags & TF_ACCESS) { inode->i_atime.tv_sec = iso_date(rr->u.TF.times[cnt++].time, 0); inode->i_atime.tv_nsec = 0; } if (rr->u.TF.flags & TF_ATTRIBUTES) { inode->i_ctime.tv_sec = iso_date(rr->u.TF.times[cnt++].time, 0); inode->i_ctime.tv_nsec = 0; } break; case SIG('S', 'L'): { int slen; struct SL_component *slp; struct SL_component *oldslp; slen = rr->len - 5; slp = &rr->u.SL.link; inode->i_size = symlink_len; while (slen > 1) { rootflag = 0; switch (slp->flags & ~1) { case 0: inode->i_size += slp->len; break; case 2: inode->i_size += 1; break; case 4: inode->i_size += 2; break; case 8: rootflag = 1; inode->i_size += 1; break; default: printk("Symlink component flag " "not implemented\n"); } slen -= slp->len + 2; oldslp = slp; slp = (struct SL_component *) (((char *)slp) + slp->len + 2); if (slen < 2) { if (((rr->u.SL. flags & 1) != 0) && ((oldslp-> flags & 1) == 0)) inode->i_size += 1; break; } /* * If this component record isn't * continued, then append a '/'. */ if (!rootflag && (oldslp->flags & 1) == 0) inode->i_size += 1; } } symlink_len = inode->i_size; break; case SIG('R', 'E'): printk(KERN_WARNING "Attempt to read inode for " "relocated directory\n"); goto out; case SIG('C', 'L'): if (flags & RR_RELOC_DE) { printk(KERN_ERR "ISOFS: Recursive directory relocation " "is not supported\n"); goto eio; } reloc_block = isonum_733(rr->u.CL.location); if (reloc_block == ISOFS_I(inode)->i_iget5_block && ISOFS_I(inode)->i_iget5_offset == 0) { printk(KERN_ERR "ISOFS: Directory relocation points to " "itself\n"); goto eio; } ISOFS_I(inode)->i_first_extent = reloc_block; reloc = isofs_iget_reloc(inode->i_sb, reloc_block, 0); if (IS_ERR(reloc)) { ret = PTR_ERR(reloc); goto out; } inode->i_mode = reloc->i_mode; set_nlink(inode, reloc->i_nlink); inode->i_uid = reloc->i_uid; inode->i_gid = reloc->i_gid; inode->i_rdev = reloc->i_rdev; inode->i_size = reloc->i_size; inode->i_blocks = reloc->i_blocks; inode->i_atime = reloc->i_atime; inode->i_ctime = reloc->i_ctime; inode->i_mtime = reloc->i_mtime; iput(reloc); break; #ifdef CONFIG_ZISOFS case SIG('Z', 'F'): { int algo; if (ISOFS_SB(inode->i_sb)->s_nocompress) break; algo = isonum_721(rr->u.ZF.algorithm); if (algo == SIG('p', 'z')) { int block_shift = isonum_711(&rr->u.ZF.parms[1]); if (block_shift > 17) { printk(KERN_WARNING "isofs: " "Can't handle ZF block " "size of 2^%d\n", block_shift); } else { /* * Note: we don't change * i_blocks here */ ISOFS_I(inode)->i_file_format = isofs_file_compressed; /* * Parameters to compression * algorithm (header size, * block size) */ ISOFS_I(inode)->i_format_parm[0] = isonum_711(&rr->u.ZF.parms[0]); ISOFS_I(inode)->i_format_parm[1] = isonum_711(&rr->u.ZF.parms[1]); inode->i_size = isonum_733(rr->u.ZF. real_size); } } else { printk(KERN_WARNING "isofs: Unknown ZF compression " "algorithm: %c%c\n", rr->u.ZF.algorithm[0], rr->u.ZF.algorithm[1]); } break; } #endif default: break; } } ret = rock_continue(&rs); if (ret == 0) goto repeat; if (ret == 1) ret = 0; out: kfree(rs.buffer); return ret; eio: ret = -EIO; goto out; } static char *get_symlink_chunk(char *rpnt, struct rock_ridge *rr, char *plimit) { int slen; int rootflag; struct SL_component *oldslp; struct SL_component *slp; slen = rr->len - 5; slp = &rr->u.SL.link; while (slen > 1) { rootflag = 0; switch (slp->flags & ~1) { case 0: if (slp->len > plimit - rpnt) return NULL; memcpy(rpnt, slp->text, slp->len); rpnt += slp->len; break; case 2: if (rpnt >= plimit) return NULL; *rpnt++ = '.'; break; case 4: if (2 > plimit - rpnt) return NULL; *rpnt++ = '.'; *rpnt++ = '.'; break; case 8: if (rpnt >= plimit) return NULL; rootflag = 1; *rpnt++ = '/'; break; default: printk("Symlink component flag not implemented (%d)\n", slp->flags); } slen -= slp->len + 2; oldslp = slp; slp = (struct SL_component *)((char *)slp + slp->len + 2); if (slen < 2) { /* * If there is another SL record, and this component * record isn't continued, then add a slash. */ if ((!rootflag) && (rr->u.SL.flags & 1) && !(oldslp->flags & 1)) { if (rpnt >= plimit) return NULL; *rpnt++ = '/'; } break; } /* * If this component record isn't continued, then append a '/'. */ if (!rootflag && !(oldslp->flags & 1)) { if (rpnt >= plimit) return NULL; *rpnt++ = '/'; } } return rpnt; } int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode, int relocated) { int flags = relocated ? RR_RELOC_DE : 0; int result = parse_rock_ridge_inode_internal(de, inode, flags); /* * if rockridge flag was reset and we didn't look for attributes * behind eventual XA attributes, have a look there */ if ((ISOFS_SB(inode->i_sb)->s_rock_offset == -1) && (ISOFS_SB(inode->i_sb)->s_rock == 2)) { result = parse_rock_ridge_inode_internal(de, inode, flags | RR_REGARD_XA); } return result; } /* * readpage() for symlinks: reads symlink contents into the page and either * makes it uptodate and returns 0 or returns error (-EIO) */ static int rock_ridge_symlink_readpage(struct file *file, struct page *page) { struct inode *inode = page->mapping->host; struct iso_inode_info *ei = ISOFS_I(inode); struct isofs_sb_info *sbi = ISOFS_SB(inode->i_sb); char *link = kmap(page); unsigned long bufsize = ISOFS_BUFFER_SIZE(inode); struct buffer_head *bh; char *rpnt = link; unsigned char *pnt; struct iso_directory_record *raw_de; unsigned long block, offset; int sig; struct rock_ridge *rr; struct rock_state rs; int ret; if (!sbi->s_rock) goto error; init_rock_state(&rs, inode); block = ei->i_iget5_block; bh = sb_bread(inode->i_sb, block); if (!bh) goto out_noread; offset = ei->i_iget5_offset; pnt = (unsigned char *)bh->b_data + offset; raw_de = (struct iso_directory_record *)pnt; /* * If we go past the end of the buffer, there is some sort of error. */ if (offset + *pnt > bufsize) goto out_bad_span; /* * Now test for possible Rock Ridge extensions which will override * some of these numbers in the inode structure. */ setup_rock_ridge(raw_de, inode, &rs); repeat: while (rs.len > 2) { /* There may be one byte for padding somewhere */ rr = (struct rock_ridge *)rs.chr; if (rr->len < 3) goto out; /* Something got screwed up here */ sig = isonum_721(rs.chr); if (rock_check_overflow(&rs, sig)) goto out; rs.chr += rr->len; rs.len -= rr->len; if (rs.len < 0) goto out; /* corrupted isofs */ switch (sig) { case SIG('R', 'R'): if ((rr->u.RR.flags[0] & RR_SL) == 0) goto out; break; case SIG('S', 'P'): if (check_sp(rr, inode)) goto out; break; case SIG('S', 'L'): rpnt = get_symlink_chunk(rpnt, rr, link + (PAGE_SIZE - 1)); if (rpnt == NULL) goto out; break; case SIG('C', 'E'): /* This tells is if there is a continuation record */ rs.cont_extent = isonum_733(rr->u.CE.extent); rs.cont_offset = isonum_733(rr->u.CE.offset); rs.cont_size = isonum_733(rr->u.CE.size); default: break; } } ret = rock_continue(&rs); if (ret == 0) goto repeat; if (ret < 0) goto fail; if (rpnt == link) goto fail; brelse(bh); *rpnt = '\0'; SetPageUptodate(page); kunmap(page); unlock_page(page); return 0; /* error exit from macro */ out: kfree(rs.buffer); goto fail; out_noread: printk("unable to read i-node block"); goto fail; out_bad_span: printk("symlink spans iso9660 blocks\n"); fail: brelse(bh); error: SetPageError(page); kunmap(page); unlock_page(page); return -EIO; } const struct address_space_operations isofs_symlink_aops = { .readpage = rock_ridge_symlink_readpage };
./CrossVul/dataset_final_sorted/CWE-20/c/good_2391_0
crossvul-cpp_data_bad_5845_23
/* * Copyright (C) 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define pr_fmt(fmt) "llcp: %s: " fmt, __func__ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/nfc.h> #include "nfc.h" #include "llcp.h" static int sock_wait_state(struct sock *sk, int state, unsigned long timeo) { DECLARE_WAITQUEUE(wait, current); int err = 0; pr_debug("sk %p", sk); add_wait_queue(sk_sleep(sk), &wait); set_current_state(TASK_INTERRUPTIBLE); while (sk->sk_state != state) { if (!timeo) { err = -EINPROGRESS; break; } if (signal_pending(current)) { err = sock_intr_errno(timeo); break; } release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); set_current_state(TASK_INTERRUPTIBLE); err = sock_error(sk); if (err) break; } __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); return err; } static struct proto llcp_sock_proto = { .name = "NFC_LLCP", .owner = THIS_MODULE, .obj_size = sizeof(struct nfc_llcp_sock), }; static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) { struct sock *sk = sock->sk; struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); struct nfc_llcp_local *local; struct nfc_dev *dev; struct sockaddr_nfc_llcp llcp_addr; int len, ret = 0; if (!addr || addr->sa_family != AF_NFC) return -EINVAL; pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family); memset(&llcp_addr, 0, sizeof(llcp_addr)); len = min_t(unsigned int, sizeof(llcp_addr), alen); memcpy(&llcp_addr, addr, len); /* This is going to be a listening socket, dsap must be 0 */ if (llcp_addr.dsap != 0) return -EINVAL; lock_sock(sk); if (sk->sk_state != LLCP_CLOSED) { ret = -EBADFD; goto error; } dev = nfc_get_device(llcp_addr.dev_idx); if (dev == NULL) { ret = -ENODEV; goto error; } local = nfc_llcp_find_local(dev); if (local == NULL) { ret = -ENODEV; goto put_dev; } llcp_sock->dev = dev; llcp_sock->local = nfc_llcp_local_get(local); llcp_sock->nfc_protocol = llcp_addr.nfc_protocol; llcp_sock->service_name_len = min_t(unsigned int, llcp_addr.service_name_len, NFC_LLCP_MAX_SERVICE_NAME); llcp_sock->service_name = kmemdup(llcp_addr.service_name, llcp_sock->service_name_len, GFP_KERNEL); llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock); if (llcp_sock->ssap == LLCP_SAP_MAX) { ret = -EADDRINUSE; goto put_dev; } llcp_sock->reserved_ssap = llcp_sock->ssap; nfc_llcp_sock_link(&local->sockets, sk); pr_debug("Socket bound to SAP %d\n", llcp_sock->ssap); sk->sk_state = LLCP_BOUND; put_dev: nfc_put_device(dev); error: release_sock(sk); return ret; } static int llcp_raw_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) { struct sock *sk = sock->sk; struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); struct nfc_llcp_local *local; struct nfc_dev *dev; struct sockaddr_nfc_llcp llcp_addr; int len, ret = 0; if (!addr || addr->sa_family != AF_NFC) return -EINVAL; pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family); memset(&llcp_addr, 0, sizeof(llcp_addr)); len = min_t(unsigned int, sizeof(llcp_addr), alen); memcpy(&llcp_addr, addr, len); lock_sock(sk); if (sk->sk_state != LLCP_CLOSED) { ret = -EBADFD; goto error; } dev = nfc_get_device(llcp_addr.dev_idx); if (dev == NULL) { ret = -ENODEV; goto error; } local = nfc_llcp_find_local(dev); if (local == NULL) { ret = -ENODEV; goto put_dev; } llcp_sock->dev = dev; llcp_sock->local = nfc_llcp_local_get(local); llcp_sock->nfc_protocol = llcp_addr.nfc_protocol; nfc_llcp_sock_link(&local->raw_sockets, sk); sk->sk_state = LLCP_BOUND; put_dev: nfc_put_device(dev); error: release_sock(sk); return ret; } static int llcp_sock_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; int ret = 0; pr_debug("sk %p backlog %d\n", sk, backlog); lock_sock(sk); if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM) || sk->sk_state != LLCP_BOUND) { ret = -EBADFD; goto error; } sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; pr_debug("Socket listening\n"); sk->sk_state = LLCP_LISTEN; error: release_sock(sk); return ret; } static int nfc_llcp_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); u32 opt; int err = 0; pr_debug("%p optname %d\n", sk, optname); if (level != SOL_NFC) return -ENOPROTOOPT; lock_sock(sk); switch (optname) { case NFC_LLCP_RW: if (sk->sk_state == LLCP_CONNECTED || sk->sk_state == LLCP_BOUND || sk->sk_state == LLCP_LISTEN) { err = -EINVAL; break; } if (get_user(opt, (u32 __user *) optval)) { err = -EFAULT; break; } if (opt > LLCP_MAX_RW) { err = -EINVAL; break; } llcp_sock->rw = (u8) opt; break; case NFC_LLCP_MIUX: if (sk->sk_state == LLCP_CONNECTED || sk->sk_state == LLCP_BOUND || sk->sk_state == LLCP_LISTEN) { err = -EINVAL; break; } if (get_user(opt, (u32 __user *) optval)) { err = -EFAULT; break; } if (opt > LLCP_MAX_MIUX) { err = -EINVAL; break; } llcp_sock->miux = cpu_to_be16((u16) opt); break; default: err = -ENOPROTOOPT; break; } release_sock(sk); pr_debug("%p rw %d miux %d\n", llcp_sock, llcp_sock->rw, llcp_sock->miux); return err; } static int nfc_llcp_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct nfc_llcp_local *local; struct sock *sk = sock->sk; struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); int len, err = 0; u16 miux, remote_miu; u8 rw; pr_debug("%p optname %d\n", sk, optname); if (level != SOL_NFC) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; local = llcp_sock->local; if (!local) return -ENODEV; len = min_t(u32, len, sizeof(u32)); lock_sock(sk); switch (optname) { case NFC_LLCP_RW: rw = llcp_sock->rw > LLCP_MAX_RW ? local->rw : llcp_sock->rw; if (put_user(rw, (u32 __user *) optval)) err = -EFAULT; break; case NFC_LLCP_MIUX: miux = be16_to_cpu(llcp_sock->miux) > LLCP_MAX_MIUX ? be16_to_cpu(local->miux) : be16_to_cpu(llcp_sock->miux); if (put_user(miux, (u32 __user *) optval)) err = -EFAULT; break; case NFC_LLCP_REMOTE_MIU: remote_miu = llcp_sock->remote_miu > LLCP_MAX_MIU ? local->remote_miu : llcp_sock->remote_miu; if (put_user(remote_miu, (u32 __user *) optval)) err = -EFAULT; break; case NFC_LLCP_REMOTE_LTO: if (put_user(local->remote_lto / 10, (u32 __user *) optval)) err = -EFAULT; break; case NFC_LLCP_REMOTE_RW: if (put_user(llcp_sock->remote_rw, (u32 __user *) optval)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); if (put_user(len, optlen)) return -EFAULT; return err; } void nfc_llcp_accept_unlink(struct sock *sk) { struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); pr_debug("state %d\n", sk->sk_state); list_del_init(&llcp_sock->accept_queue); sk_acceptq_removed(llcp_sock->parent); llcp_sock->parent = NULL; sock_put(sk); } void nfc_llcp_accept_enqueue(struct sock *parent, struct sock *sk) { struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); struct nfc_llcp_sock *llcp_sock_parent = nfc_llcp_sock(parent); /* Lock will be free from unlink */ sock_hold(sk); list_add_tail(&llcp_sock->accept_queue, &llcp_sock_parent->accept_queue); llcp_sock->parent = parent; sk_acceptq_added(parent); } struct sock *nfc_llcp_accept_dequeue(struct sock *parent, struct socket *newsock) { struct nfc_llcp_sock *lsk, *n, *llcp_parent; struct sock *sk; llcp_parent = nfc_llcp_sock(parent); list_for_each_entry_safe(lsk, n, &llcp_parent->accept_queue, accept_queue) { sk = &lsk->sk; lock_sock(sk); if (sk->sk_state == LLCP_CLOSED) { release_sock(sk); nfc_llcp_accept_unlink(sk); continue; } if (sk->sk_state == LLCP_CONNECTED || !newsock) { list_del_init(&lsk->accept_queue); sock_put(sk); if (newsock) sock_graft(sk, newsock); release_sock(sk); pr_debug("Returning sk state %d\n", sk->sk_state); sk_acceptq_removed(parent); return sk; } release_sock(sk); } return NULL; } static int llcp_sock_accept(struct socket *sock, struct socket *newsock, int flags) { DECLARE_WAITQUEUE(wait, current); struct sock *sk = sock->sk, *new_sk; long timeo; int ret = 0; pr_debug("parent %p\n", sk); lock_sock_nested(sk, SINGLE_DEPTH_NESTING); if (sk->sk_state != LLCP_LISTEN) { ret = -EBADFD; goto error; } timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); /* Wait for an incoming connection. */ add_wait_queue_exclusive(sk_sleep(sk), &wait); while (!(new_sk = nfc_llcp_accept_dequeue(sk, newsock))) { set_current_state(TASK_INTERRUPTIBLE); if (!timeo) { ret = -EAGAIN; break; } if (signal_pending(current)) { ret = sock_intr_errno(timeo); break; } release_sock(sk); timeo = schedule_timeout(timeo); lock_sock_nested(sk, SINGLE_DEPTH_NESTING); } __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); if (ret) goto error; newsock->state = SS_CONNECTED; pr_debug("new socket %p\n", new_sk); error: release_sock(sk); return ret; } static int llcp_sock_getname(struct socket *sock, struct sockaddr *uaddr, int *len, int peer) { struct sock *sk = sock->sk; struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); DECLARE_SOCKADDR(struct sockaddr_nfc_llcp *, llcp_addr, uaddr); if (llcp_sock == NULL || llcp_sock->dev == NULL) return -EBADFD; pr_debug("%p %d %d %d\n", sk, llcp_sock->target_idx, llcp_sock->dsap, llcp_sock->ssap); memset(llcp_addr, 0, sizeof(*llcp_addr)); *len = sizeof(struct sockaddr_nfc_llcp); llcp_addr->sa_family = AF_NFC; llcp_addr->dev_idx = llcp_sock->dev->idx; llcp_addr->target_idx = llcp_sock->target_idx; llcp_addr->nfc_protocol = llcp_sock->nfc_protocol; llcp_addr->dsap = llcp_sock->dsap; llcp_addr->ssap = llcp_sock->ssap; llcp_addr->service_name_len = llcp_sock->service_name_len; memcpy(llcp_addr->service_name, llcp_sock->service_name, llcp_addr->service_name_len); return 0; } static inline unsigned int llcp_accept_poll(struct sock *parent) { struct nfc_llcp_sock *llcp_sock, *n, *parent_sock; struct sock *sk; parent_sock = nfc_llcp_sock(parent); list_for_each_entry_safe(llcp_sock, n, &parent_sock->accept_queue, accept_queue) { sk = &llcp_sock->sk; if (sk->sk_state == LLCP_CONNECTED) return POLLIN | POLLRDNORM; } return 0; } static unsigned int llcp_sock_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; unsigned int mask = 0; pr_debug("%p\n", sk); sock_poll_wait(file, sk_sleep(sk), wait); if (sk->sk_state == LLCP_LISTEN) return llcp_accept_poll(sk); if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) mask |= POLLERR | (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); if (!skb_queue_empty(&sk->sk_receive_queue)) mask |= POLLIN | POLLRDNORM; if (sk->sk_state == LLCP_CLOSED) mask |= POLLHUP; if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= POLLRDHUP | POLLIN | POLLRDNORM; if (sk->sk_shutdown == SHUTDOWN_MASK) mask |= POLLHUP; if (sock_writeable(sk) && sk->sk_state == LLCP_CONNECTED) mask |= POLLOUT | POLLWRNORM | POLLWRBAND; else set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); pr_debug("mask 0x%x\n", mask); return mask; } static int llcp_sock_release(struct socket *sock) { struct sock *sk = sock->sk; struct nfc_llcp_local *local; struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); int err = 0; if (!sk) return 0; pr_debug("%p\n", sk); local = llcp_sock->local; if (local == NULL) { err = -ENODEV; goto out; } lock_sock(sk); /* Send a DISC */ if (sk->sk_state == LLCP_CONNECTED) nfc_llcp_send_disconnect(llcp_sock); if (sk->sk_state == LLCP_LISTEN) { struct nfc_llcp_sock *lsk, *n; struct sock *accept_sk; list_for_each_entry_safe(lsk, n, &llcp_sock->accept_queue, accept_queue) { accept_sk = &lsk->sk; lock_sock(accept_sk); nfc_llcp_send_disconnect(lsk); nfc_llcp_accept_unlink(accept_sk); release_sock(accept_sk); } } if (llcp_sock->reserved_ssap < LLCP_SAP_MAX) nfc_llcp_put_ssap(llcp_sock->local, llcp_sock->ssap); release_sock(sk); /* Keep this sock alive and therefore do not remove it from the sockets * list until the DISC PDU has been actually sent. Otherwise we would * reply with DM PDUs before sending the DISC one. */ if (sk->sk_state == LLCP_DISCONNECTING) return err; if (sock->type == SOCK_RAW) nfc_llcp_sock_unlink(&local->raw_sockets, sk); else nfc_llcp_sock_unlink(&local->sockets, sk); out: sock_orphan(sk); sock_put(sk); return err; } static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr, int len, int flags) { struct sock *sk = sock->sk; struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); struct sockaddr_nfc_llcp *addr = (struct sockaddr_nfc_llcp *)_addr; struct nfc_dev *dev; struct nfc_llcp_local *local; int ret = 0; pr_debug("sock %p sk %p flags 0x%x\n", sock, sk, flags); if (!addr || len < sizeof(struct sockaddr_nfc) || addr->sa_family != AF_NFC) return -EINVAL; if (addr->service_name_len == 0 && addr->dsap == 0) return -EINVAL; pr_debug("addr dev_idx=%u target_idx=%u protocol=%u\n", addr->dev_idx, addr->target_idx, addr->nfc_protocol); lock_sock(sk); if (sk->sk_state == LLCP_CONNECTED) { ret = -EISCONN; goto error; } dev = nfc_get_device(addr->dev_idx); if (dev == NULL) { ret = -ENODEV; goto error; } local = nfc_llcp_find_local(dev); if (local == NULL) { ret = -ENODEV; goto put_dev; } device_lock(&dev->dev); if (dev->dep_link_up == false) { ret = -ENOLINK; device_unlock(&dev->dev); goto put_dev; } device_unlock(&dev->dev); if (local->rf_mode == NFC_RF_INITIATOR && addr->target_idx != local->target_idx) { ret = -ENOLINK; goto put_dev; } llcp_sock->dev = dev; llcp_sock->local = nfc_llcp_local_get(local); llcp_sock->remote_miu = llcp_sock->local->remote_miu; llcp_sock->ssap = nfc_llcp_get_local_ssap(local); if (llcp_sock->ssap == LLCP_SAP_MAX) { ret = -ENOMEM; goto put_dev; } llcp_sock->reserved_ssap = llcp_sock->ssap; if (addr->service_name_len == 0) llcp_sock->dsap = addr->dsap; else llcp_sock->dsap = LLCP_SAP_SDP; llcp_sock->nfc_protocol = addr->nfc_protocol; llcp_sock->service_name_len = min_t(unsigned int, addr->service_name_len, NFC_LLCP_MAX_SERVICE_NAME); llcp_sock->service_name = kmemdup(addr->service_name, llcp_sock->service_name_len, GFP_KERNEL); nfc_llcp_sock_link(&local->connecting_sockets, sk); ret = nfc_llcp_send_connect(llcp_sock); if (ret) goto sock_unlink; sk->sk_state = LLCP_CONNECTING; ret = sock_wait_state(sk, LLCP_CONNECTED, sock_sndtimeo(sk, flags & O_NONBLOCK)); if (ret && ret != -EINPROGRESS) goto sock_unlink; release_sock(sk); return ret; sock_unlink: nfc_llcp_put_ssap(local, llcp_sock->ssap); nfc_llcp_sock_unlink(&local->connecting_sockets, sk); put_dev: nfc_put_device(dev); error: release_sock(sk); return ret; } static int llcp_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); int ret; pr_debug("sock %p sk %p", sock, sk); ret = sock_error(sk); if (ret) return ret; if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; lock_sock(sk); if (sk->sk_type == SOCK_DGRAM) { struct sockaddr_nfc_llcp *addr = (struct sockaddr_nfc_llcp *)msg->msg_name; if (msg->msg_namelen < sizeof(*addr)) { release_sock(sk); return -EINVAL; } release_sock(sk); return nfc_llcp_send_ui_frame(llcp_sock, addr->dsap, addr->ssap, msg, len); } if (sk->sk_state != LLCP_CONNECTED) { release_sock(sk); return -ENOTCONN; } release_sock(sk); return nfc_llcp_send_i_frame(llcp_sock, msg, len); } static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; unsigned int copied, rlen; struct sk_buff *skb, *cskb; int err = 0; pr_debug("%p %zu\n", sk, len); msg->msg_namelen = 0; lock_sock(sk); if (sk->sk_state == LLCP_CLOSED && skb_queue_empty(&sk->sk_receive_queue)) { release_sock(sk); return 0; } release_sock(sk); if (flags & (MSG_OOB)) return -EOPNOTSUPP; skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) { pr_err("Recv datagram failed state %d %d %d", sk->sk_state, err, sock_error(sk)); if (sk->sk_shutdown & RCV_SHUTDOWN) return 0; return err; } rlen = skb->len; /* real length of skb */ copied = min_t(unsigned int, rlen, len); cskb = skb; if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) { if (!(flags & MSG_PEEK)) skb_queue_head(&sk->sk_receive_queue, skb); return -EFAULT; } sock_recv_timestamp(msg, sk, skb); if (sk->sk_type == SOCK_DGRAM && msg->msg_name) { struct nfc_llcp_ui_cb *ui_cb = nfc_llcp_ui_skb_cb(skb); struct sockaddr_nfc_llcp *sockaddr = (struct sockaddr_nfc_llcp *) msg->msg_name; msg->msg_namelen = sizeof(struct sockaddr_nfc_llcp); pr_debug("Datagram socket %d %d\n", ui_cb->dsap, ui_cb->ssap); memset(sockaddr, 0, sizeof(*sockaddr)); sockaddr->sa_family = AF_NFC; sockaddr->nfc_protocol = NFC_PROTO_NFC_DEP; sockaddr->dsap = ui_cb->dsap; sockaddr->ssap = ui_cb->ssap; } /* Mark read part of skb as used */ if (!(flags & MSG_PEEK)) { /* SOCK_STREAM: re-queue skb if it contains unreceived data */ if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_RAW) { skb_pull(skb, copied); if (skb->len) { skb_queue_head(&sk->sk_receive_queue, skb); goto done; } } kfree_skb(skb); } /* XXX Queue backlogged skbs */ done: /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */ if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC)) copied = rlen; return copied; } static const struct proto_ops llcp_sock_ops = { .family = PF_NFC, .owner = THIS_MODULE, .bind = llcp_sock_bind, .connect = llcp_sock_connect, .release = llcp_sock_release, .socketpair = sock_no_socketpair, .accept = llcp_sock_accept, .getname = llcp_sock_getname, .poll = llcp_sock_poll, .ioctl = sock_no_ioctl, .listen = llcp_sock_listen, .shutdown = sock_no_shutdown, .setsockopt = nfc_llcp_setsockopt, .getsockopt = nfc_llcp_getsockopt, .sendmsg = llcp_sock_sendmsg, .recvmsg = llcp_sock_recvmsg, .mmap = sock_no_mmap, }; static const struct proto_ops llcp_rawsock_ops = { .family = PF_NFC, .owner = THIS_MODULE, .bind = llcp_raw_sock_bind, .connect = sock_no_connect, .release = llcp_sock_release, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = llcp_sock_getname, .poll = llcp_sock_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = sock_no_setsockopt, .getsockopt = sock_no_getsockopt, .sendmsg = sock_no_sendmsg, .recvmsg = llcp_sock_recvmsg, .mmap = sock_no_mmap, }; static void llcp_sock_destruct(struct sock *sk) { struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); pr_debug("%p\n", sk); if (sk->sk_state == LLCP_CONNECTED) nfc_put_device(llcp_sock->dev); skb_queue_purge(&sk->sk_receive_queue); nfc_llcp_sock_free(llcp_sock); if (!sock_flag(sk, SOCK_DEAD)) { pr_err("Freeing alive NFC LLCP socket %p\n", sk); return; } } struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp) { struct sock *sk; struct nfc_llcp_sock *llcp_sock; sk = sk_alloc(&init_net, PF_NFC, gfp, &llcp_sock_proto); if (!sk) return NULL; llcp_sock = nfc_llcp_sock(sk); sock_init_data(sock, sk); sk->sk_state = LLCP_CLOSED; sk->sk_protocol = NFC_SOCKPROTO_LLCP; sk->sk_type = type; sk->sk_destruct = llcp_sock_destruct; llcp_sock->ssap = 0; llcp_sock->dsap = LLCP_SAP_SDP; llcp_sock->rw = LLCP_MAX_RW + 1; llcp_sock->miux = cpu_to_be16(LLCP_MAX_MIUX + 1); llcp_sock->send_n = llcp_sock->send_ack_n = 0; llcp_sock->recv_n = llcp_sock->recv_ack_n = 0; llcp_sock->remote_ready = 1; llcp_sock->reserved_ssap = LLCP_SAP_MAX; nfc_llcp_socket_remote_param_init(llcp_sock); skb_queue_head_init(&llcp_sock->tx_queue); skb_queue_head_init(&llcp_sock->tx_pending_queue); INIT_LIST_HEAD(&llcp_sock->accept_queue); if (sock != NULL) sock->state = SS_UNCONNECTED; return sk; } void nfc_llcp_sock_free(struct nfc_llcp_sock *sock) { kfree(sock->service_name); skb_queue_purge(&sock->tx_queue); skb_queue_purge(&sock->tx_pending_queue); list_del_init(&sock->accept_queue); sock->parent = NULL; nfc_llcp_local_put(sock->local); } static int llcp_sock_create(struct net *net, struct socket *sock, const struct nfc_protocol *nfc_proto) { struct sock *sk; pr_debug("%p\n", sock); if (sock->type != SOCK_STREAM && sock->type != SOCK_DGRAM && sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; if (sock->type == SOCK_RAW) sock->ops = &llcp_rawsock_ops; else sock->ops = &llcp_sock_ops; sk = nfc_llcp_sock_alloc(sock, sock->type, GFP_ATOMIC); if (sk == NULL) return -ENOMEM; return 0; } static const struct nfc_protocol llcp_nfc_proto = { .id = NFC_SOCKPROTO_LLCP, .proto = &llcp_sock_proto, .owner = THIS_MODULE, .create = llcp_sock_create }; int __init nfc_llcp_sock_init(void) { return nfc_proto_register(&llcp_nfc_proto); } void nfc_llcp_sock_exit(void) { nfc_proto_unregister(&llcp_nfc_proto); }
./CrossVul/dataset_final_sorted/CWE-20/c/bad_5845_23
crossvul-cpp_data_good_190_0
/* * MPEG-4 encoder * Copyright (c) 2000,2001 Fabrice Bellard * Copyright (c) 2002-2010 Michael Niedermayer <michaelni@gmx.at> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/attributes.h" #include "libavutil/log.h" #include "libavutil/opt.h" #include "mpegutils.h" #include "mpegvideo.h" #include "h263.h" #include "mpeg4video.h" /* The uni_DCtab_* tables below contain unified bits+length tables to encode DC * differences in MPEG-4. Unified in the sense that the specification specifies * this encoding in several steps. */ static uint8_t uni_DCtab_lum_len[512]; static uint8_t uni_DCtab_chrom_len[512]; static uint16_t uni_DCtab_lum_bits[512]; static uint16_t uni_DCtab_chrom_bits[512]; /* Unified encoding tables for run length encoding of coefficients. * Unified in the sense that the specification specifies the encoding in several steps. */ static uint32_t uni_mpeg4_intra_rl_bits[64 * 64 * 2 * 2]; static uint8_t uni_mpeg4_intra_rl_len[64 * 64 * 2 * 2]; static uint32_t uni_mpeg4_inter_rl_bits[64 * 64 * 2 * 2]; static uint8_t uni_mpeg4_inter_rl_len[64 * 64 * 2 * 2]; //#define UNI_MPEG4_ENC_INDEX(last, run, level) ((last) * 128 + (run) * 256 + (level)) //#define UNI_MPEG4_ENC_INDEX(last, run, level) ((last) * 128 * 64 + (run) + (level) * 64) #define UNI_MPEG4_ENC_INDEX(last, run, level) ((last) * 128 * 64 + (run) * 128 + (level)) /* MPEG-4 * inter * max level: 24/6 * max run: 53/63 * * intra * max level: 53/16 * max run: 29/41 */ /** * Return the number of bits that encoding the 8x8 block in block would need. * @param[in] block_last_index last index in scantable order that refers to a non zero element in block. */ static inline int get_block_rate(MpegEncContext *s, int16_t block[64], int block_last_index, uint8_t scantable[64]) { int last = 0; int j; int rate = 0; for (j = 1; j <= block_last_index; j++) { const int index = scantable[j]; int level = block[index]; if (level) { level += 64; if ((level & (~127)) == 0) { if (j < block_last_index) rate += s->intra_ac_vlc_length[UNI_AC_ENC_INDEX(j - last - 1, level)]; else rate += s->intra_ac_vlc_last_length[UNI_AC_ENC_INDEX(j - last - 1, level)]; } else rate += s->ac_esc_length; last = j; } } return rate; } /** * Restore the ac coefficients in block that have been changed by decide_ac_pred(). * This function also restores s->block_last_index. * @param[in,out] block MB coefficients, these will be restored * @param[in] dir ac prediction direction for each 8x8 block * @param[out] st scantable for each 8x8 block * @param[in] zigzag_last_index index referring to the last non zero coefficient in zigzag order */ static inline void restore_ac_coeffs(MpegEncContext *s, int16_t block[6][64], const int dir[6], uint8_t *st[6], const int zigzag_last_index[6]) { int i, n; memcpy(s->block_last_index, zigzag_last_index, sizeof(int) * 6); for (n = 0; n < 6; n++) { int16_t *ac_val = s->ac_val[0][0] + s->block_index[n] * 16; st[n] = s->intra_scantable.permutated; if (dir[n]) { /* top prediction */ for (i = 1; i < 8; i++) block[n][s->idsp.idct_permutation[i]] = ac_val[i + 8]; } else { /* left prediction */ for (i = 1; i < 8; i++) block[n][s->idsp.idct_permutation[i << 3]] = ac_val[i]; } } } /** * Return the optimal value (0 or 1) for the ac_pred element for the given MB in MPEG-4. * This function will also update s->block_last_index and s->ac_val. * @param[in,out] block MB coefficients, these will be updated if 1 is returned * @param[in] dir ac prediction direction for each 8x8 block * @param[out] st scantable for each 8x8 block * @param[out] zigzag_last_index index referring to the last non zero coefficient in zigzag order */ static inline int decide_ac_pred(MpegEncContext *s, int16_t block[6][64], const int dir[6], uint8_t *st[6], int zigzag_last_index[6]) { int score = 0; int i, n; int8_t *const qscale_table = s->current_picture.qscale_table; memcpy(zigzag_last_index, s->block_last_index, sizeof(int) * 6); for (n = 0; n < 6; n++) { int16_t *ac_val, *ac_val1; score -= get_block_rate(s, block[n], s->block_last_index[n], s->intra_scantable.permutated); ac_val = s->ac_val[0][0] + s->block_index[n] * 16; ac_val1 = ac_val; if (dir[n]) { const int xy = s->mb_x + s->mb_y * s->mb_stride - s->mb_stride; /* top prediction */ ac_val -= s->block_wrap[n] * 16; if (s->mb_y == 0 || s->qscale == qscale_table[xy] || n == 2 || n == 3) { /* same qscale */ for (i = 1; i < 8; i++) { const int level = block[n][s->idsp.idct_permutation[i]]; block[n][s->idsp.idct_permutation[i]] = level - ac_val[i + 8]; ac_val1[i] = block[n][s->idsp.idct_permutation[i << 3]]; ac_val1[i + 8] = level; } } else { /* different qscale, we must rescale */ for (i = 1; i < 8; i++) { const int level = block[n][s->idsp.idct_permutation[i]]; block[n][s->idsp.idct_permutation[i]] = level - ROUNDED_DIV(ac_val[i + 8] * qscale_table[xy], s->qscale); ac_val1[i] = block[n][s->idsp.idct_permutation[i << 3]]; ac_val1[i + 8] = level; } } st[n] = s->intra_h_scantable.permutated; } else { const int xy = s->mb_x - 1 + s->mb_y * s->mb_stride; /* left prediction */ ac_val -= 16; if (s->mb_x == 0 || s->qscale == qscale_table[xy] || n == 1 || n == 3) { /* same qscale */ for (i = 1; i < 8; i++) { const int level = block[n][s->idsp.idct_permutation[i << 3]]; block[n][s->idsp.idct_permutation[i << 3]] = level - ac_val[i]; ac_val1[i] = level; ac_val1[i + 8] = block[n][s->idsp.idct_permutation[i]]; } } else { /* different qscale, we must rescale */ for (i = 1; i < 8; i++) { const int level = block[n][s->idsp.idct_permutation[i << 3]]; block[n][s->idsp.idct_permutation[i << 3]] = level - ROUNDED_DIV(ac_val[i] * qscale_table[xy], s->qscale); ac_val1[i] = level; ac_val1[i + 8] = block[n][s->idsp.idct_permutation[i]]; } } st[n] = s->intra_v_scantable.permutated; } for (i = 63; i > 0; i--) // FIXME optimize if (block[n][st[n][i]]) break; s->block_last_index[n] = i; score += get_block_rate(s, block[n], s->block_last_index[n], st[n]); } if (score < 0) { return 1; } else { restore_ac_coeffs(s, block, dir, st, zigzag_last_index); return 0; } } /** * modify mb_type & qscale so that encoding is actually possible in MPEG-4 */ void ff_clean_mpeg4_qscales(MpegEncContext *s) { int i; int8_t *const qscale_table = s->current_picture.qscale_table; ff_clean_h263_qscales(s); if (s->pict_type == AV_PICTURE_TYPE_B) { int odd = 0; /* ok, come on, this isn't funny anymore, there's more code for * handling this MPEG-4 mess than for the actual adaptive quantization */ for (i = 0; i < s->mb_num; i++) { int mb_xy = s->mb_index2xy[i]; odd += qscale_table[mb_xy] & 1; } if (2 * odd > s->mb_num) odd = 1; else odd = 0; for (i = 0; i < s->mb_num; i++) { int mb_xy = s->mb_index2xy[i]; if ((qscale_table[mb_xy] & 1) != odd) qscale_table[mb_xy]++; if (qscale_table[mb_xy] > 31) qscale_table[mb_xy] = 31; } for (i = 1; i < s->mb_num; i++) { int mb_xy = s->mb_index2xy[i]; if (qscale_table[mb_xy] != qscale_table[s->mb_index2xy[i - 1]] && (s->mb_type[mb_xy] & CANDIDATE_MB_TYPE_DIRECT)) { s->mb_type[mb_xy] |= CANDIDATE_MB_TYPE_BIDIR; } } } } /** * Encode the dc value. * @param n block index (0-3 are luma, 4-5 are chroma) */ static inline void mpeg4_encode_dc(PutBitContext *s, int level, int n) { /* DC will overflow if level is outside the [-255,255] range. */ level += 256; if (n < 4) { /* luminance */ put_bits(s, uni_DCtab_lum_len[level], uni_DCtab_lum_bits[level]); } else { /* chrominance */ put_bits(s, uni_DCtab_chrom_len[level], uni_DCtab_chrom_bits[level]); } } static inline int mpeg4_get_dc_length(int level, int n) { if (n < 4) return uni_DCtab_lum_len[level + 256]; else return uni_DCtab_chrom_len[level + 256]; } /** * Encode an 8x8 block. * @param n block index (0-3 are luma, 4-5 are chroma) */ static inline void mpeg4_encode_block(MpegEncContext *s, int16_t *block, int n, int intra_dc, uint8_t *scan_table, PutBitContext *dc_pb, PutBitContext *ac_pb) { int i, last_non_zero; uint32_t *bits_tab; uint8_t *len_tab; const int last_index = s->block_last_index[n]; if (s->mb_intra) { // Note gcc (3.2.1 at least) will optimize this away /* MPEG-4 based DC predictor */ mpeg4_encode_dc(dc_pb, intra_dc, n); if (last_index < 1) return; i = 1; bits_tab = uni_mpeg4_intra_rl_bits; len_tab = uni_mpeg4_intra_rl_len; } else { if (last_index < 0) return; i = 0; bits_tab = uni_mpeg4_inter_rl_bits; len_tab = uni_mpeg4_inter_rl_len; } /* AC coefs */ last_non_zero = i - 1; for (; i < last_index; i++) { int level = block[scan_table[i]]; if (level) { int run = i - last_non_zero - 1; level += 64; if ((level & (~127)) == 0) { const int index = UNI_MPEG4_ENC_INDEX(0, run, level); put_bits(ac_pb, len_tab[index], bits_tab[index]); } else { // ESC3 put_bits(ac_pb, 7 + 2 + 1 + 6 + 1 + 12 + 1, (3 << 23) + (3 << 21) + (0 << 20) + (run << 14) + (1 << 13) + (((level - 64) & 0xfff) << 1) + 1); } last_non_zero = i; } } /* if (i <= last_index) */ { int level = block[scan_table[i]]; int run = i - last_non_zero - 1; level += 64; if ((level & (~127)) == 0) { const int index = UNI_MPEG4_ENC_INDEX(1, run, level); put_bits(ac_pb, len_tab[index], bits_tab[index]); } else { // ESC3 put_bits(ac_pb, 7 + 2 + 1 + 6 + 1 + 12 + 1, (3 << 23) + (3 << 21) + (1 << 20) + (run << 14) + (1 << 13) + (((level - 64) & 0xfff) << 1) + 1); } } } static int mpeg4_get_block_length(MpegEncContext *s, int16_t *block, int n, int intra_dc, uint8_t *scan_table) { int i, last_non_zero; uint8_t *len_tab; const int last_index = s->block_last_index[n]; int len = 0; if (s->mb_intra) { // Note gcc (3.2.1 at least) will optimize this away /* MPEG-4 based DC predictor */ len += mpeg4_get_dc_length(intra_dc, n); if (last_index < 1) return len; i = 1; len_tab = uni_mpeg4_intra_rl_len; } else { if (last_index < 0) return 0; i = 0; len_tab = uni_mpeg4_inter_rl_len; } /* AC coefs */ last_non_zero = i - 1; for (; i < last_index; i++) { int level = block[scan_table[i]]; if (level) { int run = i - last_non_zero - 1; level += 64; if ((level & (~127)) == 0) { const int index = UNI_MPEG4_ENC_INDEX(0, run, level); len += len_tab[index]; } else { // ESC3 len += 7 + 2 + 1 + 6 + 1 + 12 + 1; } last_non_zero = i; } } /* if (i <= last_index) */ { int level = block[scan_table[i]]; int run = i - last_non_zero - 1; level += 64; if ((level & (~127)) == 0) { const int index = UNI_MPEG4_ENC_INDEX(1, run, level); len += len_tab[index]; } else { // ESC3 len += 7 + 2 + 1 + 6 + 1 + 12 + 1; } } return len; } static inline void mpeg4_encode_blocks(MpegEncContext *s, int16_t block[6][64], int intra_dc[6], uint8_t **scan_table, PutBitContext *dc_pb, PutBitContext *ac_pb) { int i; if (scan_table) { if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT) { for (i = 0; i < 6; i++) skip_put_bits(&s->pb, mpeg4_get_block_length(s, block[i], i, intra_dc[i], scan_table[i])); } else { /* encode each block */ for (i = 0; i < 6; i++) mpeg4_encode_block(s, block[i], i, intra_dc[i], scan_table[i], dc_pb, ac_pb); } } else { if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT) { for (i = 0; i < 6; i++) skip_put_bits(&s->pb, mpeg4_get_block_length(s, block[i], i, 0, s->intra_scantable.permutated)); } else { /* encode each block */ for (i = 0; i < 6; i++) mpeg4_encode_block(s, block[i], i, 0, s->intra_scantable.permutated, dc_pb, ac_pb); } } } static inline int get_b_cbp(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y, int mb_type) { int cbp = 0, i; if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) { int score = 0; const int lambda = s->lambda2 >> (FF_LAMBDA_SHIFT - 6); for (i = 0; i < 6; i++) { if (s->coded_score[i] < 0) { score += s->coded_score[i]; cbp |= 1 << (5 - i); } } if (cbp) { int zero_score = -6; if ((motion_x | motion_y | s->dquant | mb_type) == 0) zero_score -= 4; // 2 * MV + mb_type + cbp bit zero_score *= lambda; if (zero_score <= score) cbp = 0; } for (i = 0; i < 6; i++) { if (s->block_last_index[i] >= 0 && ((cbp >> (5 - i)) & 1) == 0) { s->block_last_index[i] = -1; s->bdsp.clear_block(s->block[i]); } } } else { for (i = 0; i < 6; i++) { if (s->block_last_index[i] >= 0) cbp |= 1 << (5 - i); } } return cbp; } // FIXME this is duplicated to h263.c static const int dquant_code[5] = { 1, 0, 9, 2, 3 }; void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y) { int cbpc, cbpy, pred_x, pred_y; PutBitContext *const pb2 = s->data_partitioning ? &s->pb2 : &s->pb; PutBitContext *const tex_pb = s->data_partitioning && s->pict_type != AV_PICTURE_TYPE_B ? &s->tex_pb : &s->pb; PutBitContext *const dc_pb = s->data_partitioning && s->pict_type != AV_PICTURE_TYPE_I ? &s->pb2 : &s->pb; const int interleaved_stats = (s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->data_partitioning ? 1 : 0; if (!s->mb_intra) { int i, cbp; if (s->pict_type == AV_PICTURE_TYPE_B) { /* convert from mv_dir to type */ static const int mb_type_table[8] = { -1, 3, 2, 1, -1, -1, -1, 0 }; int mb_type = mb_type_table[s->mv_dir]; if (s->mb_x == 0) { for (i = 0; i < 2; i++) s->last_mv[i][0][0] = s->last_mv[i][0][1] = s->last_mv[i][1][0] = s->last_mv[i][1][1] = 0; } av_assert2(s->dquant >= -2 && s->dquant <= 2); av_assert2((s->dquant & 1) == 0); av_assert2(mb_type >= 0); /* nothing to do if this MB was skipped in the next P-frame */ if (s->next_picture.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]) { // FIXME avoid DCT & ... s->skip_count++; s->mv[0][0][0] = s->mv[0][0][1] = s->mv[1][0][0] = s->mv[1][0][1] = 0; s->mv_dir = MV_DIR_FORWARD; // doesn't matter s->qscale -= s->dquant; // s->mb_skipped = 1; return; } cbp = get_b_cbp(s, block, motion_x, motion_y, mb_type); if ((cbp | motion_x | motion_y | mb_type) == 0) { /* direct MB with MV={0,0} */ av_assert2(s->dquant == 0); put_bits(&s->pb, 1, 1); /* mb not coded modb1=1 */ if (interleaved_stats) { s->misc_bits++; s->last_bits++; } s->skip_count++; return; } put_bits(&s->pb, 1, 0); /* mb coded modb1=0 */ put_bits(&s->pb, 1, cbp ? 0 : 1); /* modb2 */ // FIXME merge put_bits(&s->pb, mb_type + 1, 1); // this table is so simple that we don't need it :) if (cbp) put_bits(&s->pb, 6, cbp); if (cbp && mb_type) { if (s->dquant) put_bits(&s->pb, 2, (s->dquant >> 2) + 3); else put_bits(&s->pb, 1, 0); } else s->qscale -= s->dquant; if (!s->progressive_sequence) { if (cbp) put_bits(&s->pb, 1, s->interlaced_dct); if (mb_type) // not direct mode put_bits(&s->pb, 1, s->mv_type == MV_TYPE_FIELD); } if (interleaved_stats) s->misc_bits += get_bits_diff(s); if (!mb_type) { av_assert2(s->mv_dir & MV_DIRECT); ff_h263_encode_motion_vector(s, motion_x, motion_y, 1); s->b_count++; s->f_count++; } else { av_assert2(mb_type > 0 && mb_type < 4); if (s->mv_type != MV_TYPE_FIELD) { if (s->mv_dir & MV_DIR_FORWARD) { ff_h263_encode_motion_vector(s, s->mv[0][0][0] - s->last_mv[0][0][0], s->mv[0][0][1] - s->last_mv[0][0][1], s->f_code); s->last_mv[0][0][0] = s->last_mv[0][1][0] = s->mv[0][0][0]; s->last_mv[0][0][1] = s->last_mv[0][1][1] = s->mv[0][0][1]; s->f_count++; } if (s->mv_dir & MV_DIR_BACKWARD) { ff_h263_encode_motion_vector(s, s->mv[1][0][0] - s->last_mv[1][0][0], s->mv[1][0][1] - s->last_mv[1][0][1], s->b_code); s->last_mv[1][0][0] = s->last_mv[1][1][0] = s->mv[1][0][0]; s->last_mv[1][0][1] = s->last_mv[1][1][1] = s->mv[1][0][1]; s->b_count++; } } else { if (s->mv_dir & MV_DIR_FORWARD) { put_bits(&s->pb, 1, s->field_select[0][0]); put_bits(&s->pb, 1, s->field_select[0][1]); } if (s->mv_dir & MV_DIR_BACKWARD) { put_bits(&s->pb, 1, s->field_select[1][0]); put_bits(&s->pb, 1, s->field_select[1][1]); } if (s->mv_dir & MV_DIR_FORWARD) { for (i = 0; i < 2; i++) { ff_h263_encode_motion_vector(s, s->mv[0][i][0] - s->last_mv[0][i][0], s->mv[0][i][1] - s->last_mv[0][i][1] / 2, s->f_code); s->last_mv[0][i][0] = s->mv[0][i][0]; s->last_mv[0][i][1] = s->mv[0][i][1] * 2; } s->f_count++; } if (s->mv_dir & MV_DIR_BACKWARD) { for (i = 0; i < 2; i++) { ff_h263_encode_motion_vector(s, s->mv[1][i][0] - s->last_mv[1][i][0], s->mv[1][i][1] - s->last_mv[1][i][1] / 2, s->b_code); s->last_mv[1][i][0] = s->mv[1][i][0]; s->last_mv[1][i][1] = s->mv[1][i][1] * 2; } s->b_count++; } } } if (interleaved_stats) s->mv_bits += get_bits_diff(s); mpeg4_encode_blocks(s, block, NULL, NULL, NULL, &s->pb); if (interleaved_stats) s->p_tex_bits += get_bits_diff(s); } else { /* s->pict_type==AV_PICTURE_TYPE_B */ cbp = get_p_cbp(s, block, motion_x, motion_y); if ((cbp | motion_x | motion_y | s->dquant) == 0 && s->mv_type == MV_TYPE_16X16) { /* Check if the B-frames can skip it too, as we must skip it * if we skip here why didn't they just compress * the skip-mb bits instead of reusing them ?! */ if (s->max_b_frames > 0) { int i; int x, y, offset; uint8_t *p_pic; x = s->mb_x * 16; y = s->mb_y * 16; offset = x + y * s->linesize; p_pic = s->new_picture.f->data[0] + offset; s->mb_skipped = 1; for (i = 0; i < s->max_b_frames; i++) { uint8_t *b_pic; int diff; Picture *pic = s->reordered_input_picture[i + 1]; if (!pic || pic->f->pict_type != AV_PICTURE_TYPE_B) break; b_pic = pic->f->data[0] + offset; if (!pic->shared) b_pic += INPLACE_OFFSET; if (x + 16 > s->width || y + 16 > s->height) { int x1, y1; int xe = FFMIN(16, s->width - x); int ye = FFMIN(16, s->height - y); diff = 0; for (y1 = 0; y1 < ye; y1++) { for (x1 = 0; x1 < xe; x1++) { diff += FFABS(p_pic[x1 + y1 * s->linesize] - b_pic[x1 + y1 * s->linesize]); } } diff = diff * 256 / (xe * ye); } else { diff = s->mecc.sad[0](NULL, p_pic, b_pic, s->linesize, 16); } if (diff > s->qscale * 70) { // FIXME check that 70 is optimal s->mb_skipped = 0; break; } } } else s->mb_skipped = 1; if (s->mb_skipped == 1) { /* skip macroblock */ put_bits(&s->pb, 1, 1); if (interleaved_stats) { s->misc_bits++; s->last_bits++; } s->skip_count++; return; } } put_bits(&s->pb, 1, 0); /* mb coded */ cbpc = cbp & 3; cbpy = cbp >> 2; cbpy ^= 0xf; if (s->mv_type == MV_TYPE_16X16) { if (s->dquant) cbpc += 8; put_bits(&s->pb, ff_h263_inter_MCBPC_bits[cbpc], ff_h263_inter_MCBPC_code[cbpc]); put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]); if (s->dquant) put_bits(pb2, 2, dquant_code[s->dquant + 2]); if (!s->progressive_sequence) { if (cbp) put_bits(pb2, 1, s->interlaced_dct); put_bits(pb2, 1, 0); } if (interleaved_stats) s->misc_bits += get_bits_diff(s); /* motion vectors: 16x16 mode */ ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y); ff_h263_encode_motion_vector(s, motion_x - pred_x, motion_y - pred_y, s->f_code); } else if (s->mv_type == MV_TYPE_FIELD) { if (s->dquant) cbpc += 8; put_bits(&s->pb, ff_h263_inter_MCBPC_bits[cbpc], ff_h263_inter_MCBPC_code[cbpc]); put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]); if (s->dquant) put_bits(pb2, 2, dquant_code[s->dquant + 2]); av_assert2(!s->progressive_sequence); if (cbp) put_bits(pb2, 1, s->interlaced_dct); put_bits(pb2, 1, 1); if (interleaved_stats) s->misc_bits += get_bits_diff(s); /* motion vectors: 16x8 interlaced mode */ ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y); pred_y /= 2; put_bits(&s->pb, 1, s->field_select[0][0]); put_bits(&s->pb, 1, s->field_select[0][1]); ff_h263_encode_motion_vector(s, s->mv[0][0][0] - pred_x, s->mv[0][0][1] - pred_y, s->f_code); ff_h263_encode_motion_vector(s, s->mv[0][1][0] - pred_x, s->mv[0][1][1] - pred_y, s->f_code); } else { av_assert2(s->mv_type == MV_TYPE_8X8); put_bits(&s->pb, ff_h263_inter_MCBPC_bits[cbpc + 16], ff_h263_inter_MCBPC_code[cbpc + 16]); put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]); if (!s->progressive_sequence && cbp) put_bits(pb2, 1, s->interlaced_dct); if (interleaved_stats) s->misc_bits += get_bits_diff(s); for (i = 0; i < 4; i++) { /* motion vectors: 8x8 mode*/ ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y); ff_h263_encode_motion_vector(s, s->current_picture.motion_val[0][s->block_index[i]][0] - pred_x, s->current_picture.motion_val[0][s->block_index[i]][1] - pred_y, s->f_code); } } if (interleaved_stats) s->mv_bits += get_bits_diff(s); mpeg4_encode_blocks(s, block, NULL, NULL, NULL, tex_pb); if (interleaved_stats) s->p_tex_bits += get_bits_diff(s); s->f_count++; } } else { int cbp; int dc_diff[6]; // dc values with the dc prediction subtracted int dir[6]; // prediction direction int zigzag_last_index[6]; uint8_t *scan_table[6]; int i; for (i = 0; i < 6; i++) dc_diff[i] = ff_mpeg4_pred_dc(s, i, block[i][0], &dir[i], 1); if (s->avctx->flags & AV_CODEC_FLAG_AC_PRED) { s->ac_pred = decide_ac_pred(s, block, dir, scan_table, zigzag_last_index); } else { for (i = 0; i < 6; i++) scan_table[i] = s->intra_scantable.permutated; } /* compute cbp */ cbp = 0; for (i = 0; i < 6; i++) if (s->block_last_index[i] >= 1) cbp |= 1 << (5 - i); cbpc = cbp & 3; if (s->pict_type == AV_PICTURE_TYPE_I) { if (s->dquant) cbpc += 4; put_bits(&s->pb, ff_h263_intra_MCBPC_bits[cbpc], ff_h263_intra_MCBPC_code[cbpc]); } else { if (s->dquant) cbpc += 8; put_bits(&s->pb, 1, 0); /* mb coded */ put_bits(&s->pb, ff_h263_inter_MCBPC_bits[cbpc + 4], ff_h263_inter_MCBPC_code[cbpc + 4]); } put_bits(pb2, 1, s->ac_pred); cbpy = cbp >> 2; put_bits(pb2, ff_h263_cbpy_tab[cbpy][1], ff_h263_cbpy_tab[cbpy][0]); if (s->dquant) put_bits(dc_pb, 2, dquant_code[s->dquant + 2]); if (!s->progressive_sequence) put_bits(dc_pb, 1, s->interlaced_dct); if (interleaved_stats) s->misc_bits += get_bits_diff(s); mpeg4_encode_blocks(s, block, dc_diff, scan_table, dc_pb, tex_pb); if (interleaved_stats) s->i_tex_bits += get_bits_diff(s); s->i_count++; /* restore ac coeffs & last_index stuff * if we messed them up with the prediction */ if (s->ac_pred) restore_ac_coeffs(s, block, dir, scan_table, zigzag_last_index); } } /** * add MPEG-4 stuffing bits (01...1) */ void ff_mpeg4_stuffing(PutBitContext *pbc) { int length; put_bits(pbc, 1, 0); length = (-put_bits_count(pbc)) & 7; if (length) put_bits(pbc, length, (1 << length) - 1); } /* must be called before writing the header */ void ff_set_mpeg4_time(MpegEncContext *s) { if (s->pict_type == AV_PICTURE_TYPE_B) { ff_mpeg4_init_direct_mv(s); } else { s->last_time_base = s->time_base; s->time_base = FFUDIV(s->time, s->avctx->time_base.den); } } static void mpeg4_encode_gop_header(MpegEncContext *s) { int64_t hours, minutes, seconds; int64_t time; put_bits(&s->pb, 16, 0); put_bits(&s->pb, 16, GOP_STARTCODE); time = s->current_picture_ptr->f->pts; if (s->reordered_input_picture[1]) time = FFMIN(time, s->reordered_input_picture[1]->f->pts); time = time * s->avctx->time_base.num; s->last_time_base = FFUDIV(time, s->avctx->time_base.den); seconds = FFUDIV(time, s->avctx->time_base.den); minutes = FFUDIV(seconds, 60); seconds = FFUMOD(seconds, 60); hours = FFUDIV(minutes, 60); minutes = FFUMOD(minutes, 60); hours = FFUMOD(hours , 24); put_bits(&s->pb, 5, hours); put_bits(&s->pb, 6, minutes); put_bits(&s->pb, 1, 1); put_bits(&s->pb, 6, seconds); put_bits(&s->pb, 1, !!(s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)); put_bits(&s->pb, 1, 0); // broken link == NO ff_mpeg4_stuffing(&s->pb); } static void mpeg4_encode_visual_object_header(MpegEncContext *s) { int profile_and_level_indication; int vo_ver_id; if (s->avctx->profile != FF_PROFILE_UNKNOWN) { profile_and_level_indication = s->avctx->profile << 4; } else if (s->max_b_frames || s->quarter_sample) { profile_and_level_indication = 0xF0; // adv simple } else { profile_and_level_indication = 0x00; // simple } if (s->avctx->level != FF_LEVEL_UNKNOWN) profile_and_level_indication |= s->avctx->level; else profile_and_level_indication |= 1; // level 1 if (profile_and_level_indication >> 4 == 0xF) vo_ver_id = 5; else vo_ver_id = 1; // FIXME levels put_bits(&s->pb, 16, 0); put_bits(&s->pb, 16, VOS_STARTCODE); put_bits(&s->pb, 8, profile_and_level_indication); put_bits(&s->pb, 16, 0); put_bits(&s->pb, 16, VISUAL_OBJ_STARTCODE); put_bits(&s->pb, 1, 1); put_bits(&s->pb, 4, vo_ver_id); put_bits(&s->pb, 3, 1); // priority put_bits(&s->pb, 4, 1); // visual obj type== video obj put_bits(&s->pb, 1, 0); // video signal type == no clue // FIXME ff_mpeg4_stuffing(&s->pb); } static void mpeg4_encode_vol_header(MpegEncContext *s, int vo_number, int vol_number) { int vo_ver_id; if (!CONFIG_MPEG4_ENCODER) return; if (s->max_b_frames || s->quarter_sample) { vo_ver_id = 5; s->vo_type = ADV_SIMPLE_VO_TYPE; } else { vo_ver_id = 1; s->vo_type = SIMPLE_VO_TYPE; } put_bits(&s->pb, 16, 0); put_bits(&s->pb, 16, 0x100 + vo_number); /* video obj */ put_bits(&s->pb, 16, 0); put_bits(&s->pb, 16, 0x120 + vol_number); /* video obj layer */ put_bits(&s->pb, 1, 0); /* random access vol */ put_bits(&s->pb, 8, s->vo_type); /* video obj type indication */ if (s->workaround_bugs & FF_BUG_MS) { put_bits(&s->pb, 1, 0); /* is obj layer id= no */ } else { put_bits(&s->pb, 1, 1); /* is obj layer id= yes */ put_bits(&s->pb, 4, vo_ver_id); /* is obj layer ver id */ put_bits(&s->pb, 3, 1); /* is obj layer priority */ } s->aspect_ratio_info = ff_h263_aspect_to_info(s->avctx->sample_aspect_ratio); put_bits(&s->pb, 4, s->aspect_ratio_info); /* aspect ratio info */ if (s->aspect_ratio_info == FF_ASPECT_EXTENDED) { av_reduce(&s->avctx->sample_aspect_ratio.num, &s->avctx->sample_aspect_ratio.den, s->avctx->sample_aspect_ratio.num, s->avctx->sample_aspect_ratio.den, 255); put_bits(&s->pb, 8, s->avctx->sample_aspect_ratio.num); put_bits(&s->pb, 8, s->avctx->sample_aspect_ratio.den); } if (s->workaround_bugs & FF_BUG_MS) { put_bits(&s->pb, 1, 0); /* vol control parameters= no @@@ */ } else { put_bits(&s->pb, 1, 1); /* vol control parameters= yes */ put_bits(&s->pb, 2, 1); /* chroma format YUV 420/YV12 */ put_bits(&s->pb, 1, s->low_delay); put_bits(&s->pb, 1, 0); /* vbv parameters= no */ } put_bits(&s->pb, 2, RECT_SHAPE); /* vol shape= rectangle */ put_bits(&s->pb, 1, 1); /* marker bit */ put_bits(&s->pb, 16, s->avctx->time_base.den); if (s->time_increment_bits < 1) s->time_increment_bits = 1; put_bits(&s->pb, 1, 1); /* marker bit */ put_bits(&s->pb, 1, 0); /* fixed vop rate=no */ put_bits(&s->pb, 1, 1); /* marker bit */ put_bits(&s->pb, 13, s->width); /* vol width */ put_bits(&s->pb, 1, 1); /* marker bit */ put_bits(&s->pb, 13, s->height); /* vol height */ put_bits(&s->pb, 1, 1); /* marker bit */ put_bits(&s->pb, 1, s->progressive_sequence ? 0 : 1); put_bits(&s->pb, 1, 1); /* obmc disable */ if (vo_ver_id == 1) put_bits(&s->pb, 1, 0); /* sprite enable */ else put_bits(&s->pb, 2, 0); /* sprite enable */ put_bits(&s->pb, 1, 0); /* not 8 bit == false */ put_bits(&s->pb, 1, s->mpeg_quant); /* quant type = (0 = H.263 style) */ if (s->mpeg_quant) { ff_write_quant_matrix(&s->pb, s->avctx->intra_matrix); ff_write_quant_matrix(&s->pb, s->avctx->inter_matrix); } if (vo_ver_id != 1) put_bits(&s->pb, 1, s->quarter_sample); put_bits(&s->pb, 1, 1); /* complexity estimation disable */ put_bits(&s->pb, 1, s->rtp_mode ? 0 : 1); /* resync marker disable */ put_bits(&s->pb, 1, s->data_partitioning ? 1 : 0); if (s->data_partitioning) put_bits(&s->pb, 1, 0); /* no rvlc */ if (vo_ver_id != 1) { put_bits(&s->pb, 1, 0); /* newpred */ put_bits(&s->pb, 1, 0); /* reduced res vop */ } put_bits(&s->pb, 1, 0); /* scalability */ ff_mpeg4_stuffing(&s->pb); /* user data */ if (!(s->avctx->flags & AV_CODEC_FLAG_BITEXACT)) { put_bits(&s->pb, 16, 0); put_bits(&s->pb, 16, 0x1B2); /* user_data */ avpriv_put_string(&s->pb, LIBAVCODEC_IDENT, 0); } } /* write MPEG-4 VOP header */ int ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number) { uint64_t time_incr; int64_t time_div, time_mod; if (s->pict_type == AV_PICTURE_TYPE_I) { if (!(s->avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)) { if (s->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT) // HACK, the reference sw is buggy mpeg4_encode_visual_object_header(s); if (s->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT || picture_number == 0) // HACK, the reference sw is buggy mpeg4_encode_vol_header(s, 0, 0); } if (!(s->workaround_bugs & FF_BUG_MS)) mpeg4_encode_gop_header(s); } s->partitioned_frame = s->data_partitioning && s->pict_type != AV_PICTURE_TYPE_B; put_bits(&s->pb, 16, 0); /* vop header */ put_bits(&s->pb, 16, VOP_STARTCODE); /* vop header */ put_bits(&s->pb, 2, s->pict_type - 1); /* pict type: I = 0 , P = 1 */ time_div = FFUDIV(s->time, s->avctx->time_base.den); time_mod = FFUMOD(s->time, s->avctx->time_base.den); time_incr = time_div - s->last_time_base; // This limits the frame duration to max 1 hour if (time_incr > 3600) { av_log(s->avctx, AV_LOG_ERROR, "time_incr %"PRIu64" too large\n", time_incr); return AVERROR(EINVAL); } while (time_incr--) put_bits(&s->pb, 1, 1); put_bits(&s->pb, 1, 0); put_bits(&s->pb, 1, 1); /* marker */ put_bits(&s->pb, s->time_increment_bits, time_mod); /* time increment */ put_bits(&s->pb, 1, 1); /* marker */ put_bits(&s->pb, 1, 1); /* vop coded */ if (s->pict_type == AV_PICTURE_TYPE_P) { put_bits(&s->pb, 1, s->no_rounding); /* rounding type */ } put_bits(&s->pb, 3, 0); /* intra dc VLC threshold */ if (!s->progressive_sequence) { put_bits(&s->pb, 1, s->current_picture_ptr->f->top_field_first); put_bits(&s->pb, 1, s->alternate_scan); } // FIXME sprite stuff put_bits(&s->pb, 5, s->qscale); if (s->pict_type != AV_PICTURE_TYPE_I) put_bits(&s->pb, 3, s->f_code); /* fcode_for */ if (s->pict_type == AV_PICTURE_TYPE_B) put_bits(&s->pb, 3, s->b_code); /* fcode_back */ return 0; } static av_cold void init_uni_dc_tab(void) { int level, uni_code, uni_len; for (level = -256; level < 256; level++) { int size, v, l; /* find number of bits */ size = 0; v = abs(level); while (v) { v >>= 1; size++; } if (level < 0) l = (-level) ^ ((1 << size) - 1); else l = level; /* luminance */ uni_code = ff_mpeg4_DCtab_lum[size][0]; uni_len = ff_mpeg4_DCtab_lum[size][1]; if (size > 0) { uni_code <<= size; uni_code |= l; uni_len += size; if (size > 8) { uni_code <<= 1; uni_code |= 1; uni_len++; } } uni_DCtab_lum_bits[level + 256] = uni_code; uni_DCtab_lum_len[level + 256] = uni_len; /* chrominance */ uni_code = ff_mpeg4_DCtab_chrom[size][0]; uni_len = ff_mpeg4_DCtab_chrom[size][1]; if (size > 0) { uni_code <<= size; uni_code |= l; uni_len += size; if (size > 8) { uni_code <<= 1; uni_code |= 1; uni_len++; } } uni_DCtab_chrom_bits[level + 256] = uni_code; uni_DCtab_chrom_len[level + 256] = uni_len; } } static av_cold void init_uni_mpeg4_rl_tab(RLTable *rl, uint32_t *bits_tab, uint8_t *len_tab) { int slevel, run, last; av_assert0(MAX_LEVEL >= 64); av_assert0(MAX_RUN >= 63); for (slevel = -64; slevel < 64; slevel++) { if (slevel == 0) continue; for (run = 0; run < 64; run++) { for (last = 0; last <= 1; last++) { const int index = UNI_MPEG4_ENC_INDEX(last, run, slevel + 64); int level = slevel < 0 ? -slevel : slevel; int sign = slevel < 0 ? 1 : 0; int bits, len, code; int level1, run1; len_tab[index] = 100; /* ESC0 */ code = get_rl_index(rl, last, run, level); bits = rl->table_vlc[code][0]; len = rl->table_vlc[code][1]; bits = bits * 2 + sign; len++; if (code != rl->n && len < len_tab[index]) { bits_tab[index] = bits; len_tab[index] = len; } /* ESC1 */ bits = rl->table_vlc[rl->n][0]; len = rl->table_vlc[rl->n][1]; bits = bits * 2; len++; // esc1 level1 = level - rl->max_level[last][run]; if (level1 > 0) { code = get_rl_index(rl, last, run, level1); bits <<= rl->table_vlc[code][1]; len += rl->table_vlc[code][1]; bits += rl->table_vlc[code][0]; bits = bits * 2 + sign; len++; if (code != rl->n && len < len_tab[index]) { bits_tab[index] = bits; len_tab[index] = len; } } /* ESC2 */ bits = rl->table_vlc[rl->n][0]; len = rl->table_vlc[rl->n][1]; bits = bits * 4 + 2; len += 2; // esc2 run1 = run - rl->max_run[last][level] - 1; if (run1 >= 0) { code = get_rl_index(rl, last, run1, level); bits <<= rl->table_vlc[code][1]; len += rl->table_vlc[code][1]; bits += rl->table_vlc[code][0]; bits = bits * 2 + sign; len++; if (code != rl->n && len < len_tab[index]) { bits_tab[index] = bits; len_tab[index] = len; } } /* ESC3 */ bits = rl->table_vlc[rl->n][0]; len = rl->table_vlc[rl->n][1]; bits = bits * 4 + 3; len += 2; // esc3 bits = bits * 2 + last; len++; bits = bits * 64 + run; len += 6; bits = bits * 2 + 1; len++; // marker bits = bits * 4096 + (slevel & 0xfff); len += 12; bits = bits * 2 + 1; len++; // marker if (len < len_tab[index]) { bits_tab[index] = bits; len_tab[index] = len; } } } } } static av_cold int encode_init(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; int ret; static int done = 0; if (avctx->width >= (1<<13) || avctx->height >= (1<<13)) { av_log(avctx, AV_LOG_ERROR, "dimensions too large for MPEG-4\n"); return AVERROR(EINVAL); } if ((ret = ff_mpv_encode_init(avctx)) < 0) return ret; if (!done) { done = 1; init_uni_dc_tab(); ff_rl_init(&ff_mpeg4_rl_intra, ff_mpeg4_static_rl_table_store[0]); init_uni_mpeg4_rl_tab(&ff_mpeg4_rl_intra, uni_mpeg4_intra_rl_bits, uni_mpeg4_intra_rl_len); init_uni_mpeg4_rl_tab(&ff_h263_rl_inter, uni_mpeg4_inter_rl_bits, uni_mpeg4_inter_rl_len); } s->min_qcoeff = -2048; s->max_qcoeff = 2047; s->intra_ac_vlc_length = uni_mpeg4_intra_rl_len; s->intra_ac_vlc_last_length = uni_mpeg4_intra_rl_len + 128 * 64; s->inter_ac_vlc_length = uni_mpeg4_inter_rl_len; s->inter_ac_vlc_last_length = uni_mpeg4_inter_rl_len + 128 * 64; s->luma_dc_vlc_length = uni_DCtab_lum_len; s->ac_esc_length = 7 + 2 + 1 + 6 + 1 + 12 + 1; s->y_dc_scale_table = ff_mpeg4_y_dc_scale_table; s->c_dc_scale_table = ff_mpeg4_c_dc_scale_table; if (s->avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) { s->avctx->extradata = av_malloc(1024); init_put_bits(&s->pb, s->avctx->extradata, 1024); if (!(s->workaround_bugs & FF_BUG_MS)) mpeg4_encode_visual_object_header(s); mpeg4_encode_vol_header(s, 0, 0); // ff_mpeg4_stuffing(&s->pb); ? flush_put_bits(&s->pb); s->avctx->extradata_size = (put_bits_count(&s->pb) + 7) >> 3; } return 0; } void ff_mpeg4_init_partitions(MpegEncContext *s) { uint8_t *start = put_bits_ptr(&s->pb); uint8_t *end = s->pb.buf_end; int size = end - start; int pb_size = (((intptr_t)start + size / 3) & (~3)) - (intptr_t)start; int tex_size = (size - 2 * pb_size) & (~3); set_put_bits_buffer_size(&s->pb, pb_size); init_put_bits(&s->tex_pb, start + pb_size, tex_size); init_put_bits(&s->pb2, start + pb_size + tex_size, pb_size); } void ff_mpeg4_merge_partitions(MpegEncContext *s) { const int pb2_len = put_bits_count(&s->pb2); const int tex_pb_len = put_bits_count(&s->tex_pb); const int bits = put_bits_count(&s->pb); if (s->pict_type == AV_PICTURE_TYPE_I) { put_bits(&s->pb, 19, DC_MARKER); s->misc_bits += 19 + pb2_len + bits - s->last_bits; s->i_tex_bits += tex_pb_len; } else { put_bits(&s->pb, 17, MOTION_MARKER); s->misc_bits += 17 + pb2_len; s->mv_bits += bits - s->last_bits; s->p_tex_bits += tex_pb_len; } flush_put_bits(&s->pb2); flush_put_bits(&s->tex_pb); set_put_bits_buffer_size(&s->pb, s->pb2.buf_end - s->pb.buf); avpriv_copy_bits(&s->pb, s->pb2.buf, pb2_len); avpriv_copy_bits(&s->pb, s->tex_pb.buf, tex_pb_len); s->last_bits = put_bits_count(&s->pb); } void ff_mpeg4_encode_video_packet_header(MpegEncContext *s) { int mb_num_bits = av_log2(s->mb_num - 1) + 1; put_bits(&s->pb, ff_mpeg4_get_video_packet_prefix_length(s), 0); put_bits(&s->pb, 1, 1); put_bits(&s->pb, mb_num_bits, s->mb_x + s->mb_y * s->mb_width); put_bits(&s->pb, s->quant_precision, s->qscale); put_bits(&s->pb, 1, 0); /* no HEC */ } #define OFFSET(x) offsetof(MpegEncContext, x) #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM static const AVOption options[] = { { "data_partitioning", "Use data partitioning.", OFFSET(data_partitioning), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, { "alternate_scan", "Enable alternate scantable.", OFFSET(alternate_scan), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, FF_MPV_COMMON_OPTS { NULL }, }; static const AVClass mpeg4enc_class = { .class_name = "MPEG4 encoder", .item_name = av_default_item_name, .option = options, .version = LIBAVUTIL_VERSION_INT, }; AVCodec ff_mpeg4_encoder = { .name = "mpeg4", .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2"), .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_MPEG4, .priv_data_size = sizeof(MpegEncContext), .init = encode_init, .encode2 = ff_mpv_encode_picture, .close = ff_mpv_encode_end, .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }, .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS, .priv_class = &mpeg4enc_class, };
./CrossVul/dataset_final_sorted/CWE-20/c/good_190_0
crossvul-cpp_data_bad_5794_2
/* * Kernel-based Virtual Machine driver for Linux * * derived from drivers/kvm/kvm_main.c * * Copyright (C) 2006 Qumranet, Inc. * Copyright (C) 2008 Qumranet, Inc. * Copyright IBM Corporation, 2008 * Copyright 2010 Red Hat, Inc. and/or its affiliates. * * Authors: * Avi Kivity <avi@qumranet.com> * Yaniv Kamay <yaniv@qumranet.com> * Amit Shah <amit.shah@qumranet.com> * Ben-Ami Yassour <benami@il.ibm.com> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * */ #include <linux/kvm_host.h> #include "irq.h" #include "mmu.h" #include "i8254.h" #include "tss.h" #include "kvm_cache_regs.h" #include "x86.h" #include "cpuid.h" #include <linux/clocksource.h> #include <linux/interrupt.h> #include <linux/kvm.h> #include <linux/fs.h> #include <linux/vmalloc.h> #include <linux/module.h> #include <linux/mman.h> #include <linux/highmem.h> #include <linux/iommu.h> #include <linux/intel-iommu.h> #include <linux/cpufreq.h> #include <linux/user-return-notifier.h> #include <linux/srcu.h> #include <linux/slab.h> #include <linux/perf_event.h> #include <linux/uaccess.h> #include <linux/hash.h> #include <linux/pci.h> #include <linux/timekeeper_internal.h> #include <linux/pvclock_gtod.h> #include <trace/events/kvm.h> #define CREATE_TRACE_POINTS #include "trace.h" #include <asm/debugreg.h> #include <asm/msr.h> #include <asm/desc.h> #include <asm/mtrr.h> #include <asm/mce.h> #include <asm/i387.h> #include <asm/fpu-internal.h> /* Ugh! */ #include <asm/xcr.h> #include <asm/pvclock.h> #include <asm/div64.h> #define MAX_IO_MSRS 256 #define KVM_MAX_MCE_BANKS 32 #define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P) #define emul_to_vcpu(ctxt) \ container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt) /* EFER defaults: * - enable syscall per default because its emulated by KVM * - enable LME and LMA per default on 64 bit KVM */ #ifdef CONFIG_X86_64 static u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA)); #else static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE); #endif #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU static void update_cr8_intercept(struct kvm_vcpu *vcpu); static void process_nmi(struct kvm_vcpu *vcpu); struct kvm_x86_ops *kvm_x86_ops; EXPORT_SYMBOL_GPL(kvm_x86_ops); static bool ignore_msrs = 0; module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR); bool kvm_has_tsc_control; EXPORT_SYMBOL_GPL(kvm_has_tsc_control); u32 kvm_max_guest_tsc_khz; EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz); /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */ static u32 tsc_tolerance_ppm = 250; module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR); #define KVM_NR_SHARED_MSRS 16 struct kvm_shared_msrs_global { int nr; u32 msrs[KVM_NR_SHARED_MSRS]; }; struct kvm_shared_msrs { struct user_return_notifier urn; bool registered; struct kvm_shared_msr_values { u64 host; u64 curr; } values[KVM_NR_SHARED_MSRS]; }; static struct kvm_shared_msrs_global __read_mostly shared_msrs_global; static struct kvm_shared_msrs __percpu *shared_msrs; struct kvm_stats_debugfs_item debugfs_entries[] = { { "pf_fixed", VCPU_STAT(pf_fixed) }, { "pf_guest", VCPU_STAT(pf_guest) }, { "tlb_flush", VCPU_STAT(tlb_flush) }, { "invlpg", VCPU_STAT(invlpg) }, { "exits", VCPU_STAT(exits) }, { "io_exits", VCPU_STAT(io_exits) }, { "mmio_exits", VCPU_STAT(mmio_exits) }, { "signal_exits", VCPU_STAT(signal_exits) }, { "irq_window", VCPU_STAT(irq_window_exits) }, { "nmi_window", VCPU_STAT(nmi_window_exits) }, { "halt_exits", VCPU_STAT(halt_exits) }, { "halt_wakeup", VCPU_STAT(halt_wakeup) }, { "hypercalls", VCPU_STAT(hypercalls) }, { "request_irq", VCPU_STAT(request_irq_exits) }, { "irq_exits", VCPU_STAT(irq_exits) }, { "host_state_reload", VCPU_STAT(host_state_reload) }, { "efer_reload", VCPU_STAT(efer_reload) }, { "fpu_reload", VCPU_STAT(fpu_reload) }, { "insn_emulation", VCPU_STAT(insn_emulation) }, { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) }, { "irq_injections", VCPU_STAT(irq_injections) }, { "nmi_injections", VCPU_STAT(nmi_injections) }, { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) }, { "mmu_pte_write", VM_STAT(mmu_pte_write) }, { "mmu_pte_updated", VM_STAT(mmu_pte_updated) }, { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) }, { "mmu_flooded", VM_STAT(mmu_flooded) }, { "mmu_recycled", VM_STAT(mmu_recycled) }, { "mmu_cache_miss", VM_STAT(mmu_cache_miss) }, { "mmu_unsync", VM_STAT(mmu_unsync) }, { "remote_tlb_flush", VM_STAT(remote_tlb_flush) }, { "largepages", VM_STAT(lpages) }, { NULL } }; u64 __read_mostly host_xcr0; static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt); static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu) { int i; for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++) vcpu->arch.apf.gfns[i] = ~0; } static void kvm_on_user_return(struct user_return_notifier *urn) { unsigned slot; struct kvm_shared_msrs *locals = container_of(urn, struct kvm_shared_msrs, urn); struct kvm_shared_msr_values *values; for (slot = 0; slot < shared_msrs_global.nr; ++slot) { values = &locals->values[slot]; if (values->host != values->curr) { wrmsrl(shared_msrs_global.msrs[slot], values->host); values->curr = values->host; } } locals->registered = false; user_return_notifier_unregister(urn); } static void shared_msr_update(unsigned slot, u32 msr) { u64 value; unsigned int cpu = smp_processor_id(); struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); /* only read, and nobody should modify it at this time, * so don't need lock */ if (slot >= shared_msrs_global.nr) { printk(KERN_ERR "kvm: invalid MSR slot!"); return; } rdmsrl_safe(msr, &value); smsr->values[slot].host = value; smsr->values[slot].curr = value; } void kvm_define_shared_msr(unsigned slot, u32 msr) { if (slot >= shared_msrs_global.nr) shared_msrs_global.nr = slot + 1; shared_msrs_global.msrs[slot] = msr; /* we need ensured the shared_msr_global have been updated */ smp_wmb(); } EXPORT_SYMBOL_GPL(kvm_define_shared_msr); static void kvm_shared_msr_cpu_online(void) { unsigned i; for (i = 0; i < shared_msrs_global.nr; ++i) shared_msr_update(i, shared_msrs_global.msrs[i]); } void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask) { unsigned int cpu = smp_processor_id(); struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); if (((value ^ smsr->values[slot].curr) & mask) == 0) return; smsr->values[slot].curr = value; wrmsrl(shared_msrs_global.msrs[slot], value); if (!smsr->registered) { smsr->urn.on_user_return = kvm_on_user_return; user_return_notifier_register(&smsr->urn); smsr->registered = true; } } EXPORT_SYMBOL_GPL(kvm_set_shared_msr); static void drop_user_return_notifiers(void *ignore) { unsigned int cpu = smp_processor_id(); struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); if (smsr->registered) kvm_on_user_return(&smsr->urn); } u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) { return vcpu->arch.apic_base; } EXPORT_SYMBOL_GPL(kvm_get_apic_base); void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data) { /* TODO: reserve bits check */ kvm_lapic_set_base(vcpu, data); } EXPORT_SYMBOL_GPL(kvm_set_apic_base); asmlinkage void kvm_spurious_fault(void) { /* Fault while not rebooting. We want the trace. */ BUG(); } EXPORT_SYMBOL_GPL(kvm_spurious_fault); #define EXCPT_BENIGN 0 #define EXCPT_CONTRIBUTORY 1 #define EXCPT_PF 2 static int exception_class(int vector) { switch (vector) { case PF_VECTOR: return EXCPT_PF; case DE_VECTOR: case TS_VECTOR: case NP_VECTOR: case SS_VECTOR: case GP_VECTOR: return EXCPT_CONTRIBUTORY; default: break; } return EXCPT_BENIGN; } static void kvm_multiple_exception(struct kvm_vcpu *vcpu, unsigned nr, bool has_error, u32 error_code, bool reinject) { u32 prev_nr; int class1, class2; kvm_make_request(KVM_REQ_EVENT, vcpu); if (!vcpu->arch.exception.pending) { queue: vcpu->arch.exception.pending = true; vcpu->arch.exception.has_error_code = has_error; vcpu->arch.exception.nr = nr; vcpu->arch.exception.error_code = error_code; vcpu->arch.exception.reinject = reinject; return; } /* to check exception */ prev_nr = vcpu->arch.exception.nr; if (prev_nr == DF_VECTOR) { /* triple fault -> shutdown */ kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return; } class1 = exception_class(prev_nr); class2 = exception_class(nr); if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY) || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) { /* generate double fault per SDM Table 5-5 */ vcpu->arch.exception.pending = true; vcpu->arch.exception.has_error_code = true; vcpu->arch.exception.nr = DF_VECTOR; vcpu->arch.exception.error_code = 0; } else /* replace previous exception with a new one in a hope that instruction re-execution will regenerate lost exception */ goto queue; } void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) { kvm_multiple_exception(vcpu, nr, false, 0, false); } EXPORT_SYMBOL_GPL(kvm_queue_exception); void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) { kvm_multiple_exception(vcpu, nr, false, 0, true); } EXPORT_SYMBOL_GPL(kvm_requeue_exception); void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) { if (err) kvm_inject_gp(vcpu, 0); else kvm_x86_ops->skip_emulated_instruction(vcpu); } EXPORT_SYMBOL_GPL(kvm_complete_insn_gp); void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { ++vcpu->stat.pf_guest; vcpu->arch.cr2 = fault->address; kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code); } EXPORT_SYMBOL_GPL(kvm_inject_page_fault); void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) { if (mmu_is_nested(vcpu) && !fault->nested_page_fault) vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault); else vcpu->arch.mmu.inject_page_fault(vcpu, fault); } void kvm_inject_nmi(struct kvm_vcpu *vcpu) { atomic_inc(&vcpu->arch.nmi_queued); kvm_make_request(KVM_REQ_NMI, vcpu); } EXPORT_SYMBOL_GPL(kvm_inject_nmi); void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) { kvm_multiple_exception(vcpu, nr, true, error_code, false); } EXPORT_SYMBOL_GPL(kvm_queue_exception_e); void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) { kvm_multiple_exception(vcpu, nr, true, error_code, true); } EXPORT_SYMBOL_GPL(kvm_requeue_exception_e); /* * Checks if cpl <= required_cpl; if true, return true. Otherwise queue * a #GP and return false. */ bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) { if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl) return true; kvm_queue_exception_e(vcpu, GP_VECTOR, 0); return false; } EXPORT_SYMBOL_GPL(kvm_require_cpl); /* * This function will be used to read from the physical memory of the currently * running guest. The difference to kvm_read_guest_page is that this function * can read from guest physical or from the guest's guest physical memory. */ int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, gfn_t ngfn, void *data, int offset, int len, u32 access) { gfn_t real_gfn; gpa_t ngpa; ngpa = gfn_to_gpa(ngfn); real_gfn = mmu->translate_gpa(vcpu, ngpa, access); if (real_gfn == UNMAPPED_GVA) return -EFAULT; real_gfn = gpa_to_gfn(real_gfn); return kvm_read_guest_page(vcpu->kvm, real_gfn, data, offset, len); } EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu); int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, int len, u32 access) { return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn, data, offset, len, access); } /* * Load the pae pdptrs. Return true is they are all valid. */ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) { gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2; int i; int ret; u64 pdpte[ARRAY_SIZE(mmu->pdptrs)]; ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte, offset * sizeof(u64), sizeof(pdpte), PFERR_USER_MASK|PFERR_WRITE_MASK); if (ret < 0) { ret = 0; goto out; } for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { if (is_present_gpte(pdpte[i]) && (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) { ret = 0; goto out; } } ret = 1; memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); __set_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_avail); __set_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_dirty); out: return ret; } EXPORT_SYMBOL_GPL(load_pdptrs); static bool pdptrs_changed(struct kvm_vcpu *vcpu) { u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)]; bool changed = true; int offset; gfn_t gfn; int r; if (is_long_mode(vcpu) || !is_pae(vcpu)) return false; if (!test_bit(VCPU_EXREG_PDPTR, (unsigned long *)&vcpu->arch.regs_avail)) return true; gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT; offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1); r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte), PFERR_USER_MASK | PFERR_WRITE_MASK); if (r < 0) goto out; changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0; out: return changed; } int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { unsigned long old_cr0 = kvm_read_cr0(vcpu); unsigned long update_bits = X86_CR0_PG | X86_CR0_WP | X86_CR0_CD | X86_CR0_NW; cr0 |= X86_CR0_ET; #ifdef CONFIG_X86_64 if (cr0 & 0xffffffff00000000UL) return 1; #endif cr0 &= ~CR0_RESERVED_BITS; if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) return 1; if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) return 1; if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { #ifdef CONFIG_X86_64 if ((vcpu->arch.efer & EFER_LME)) { int cs_db, cs_l; if (!is_pae(vcpu)) return 1; kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); if (cs_l) return 1; } else #endif if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu))) return 1; } if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) return 1; kvm_x86_ops->set_cr0(vcpu, cr0); if ((cr0 ^ old_cr0) & X86_CR0_PG) { kvm_clear_async_pf_completion_queue(vcpu); kvm_async_pf_hash_reset(vcpu); } if ((cr0 ^ old_cr0) & update_bits) kvm_mmu_reset_context(vcpu); return 0; } EXPORT_SYMBOL_GPL(kvm_set_cr0); void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) { (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f)); } EXPORT_SYMBOL_GPL(kvm_lmsw); static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) { if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && !vcpu->guest_xcr0_loaded) { /* kvm_set_xcr() also depends on this */ xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); vcpu->guest_xcr0_loaded = 1; } } static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) { if (vcpu->guest_xcr0_loaded) { if (vcpu->arch.xcr0 != host_xcr0) xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); vcpu->guest_xcr0_loaded = 0; } } int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) { u64 xcr0; u64 valid_bits; /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */ if (index != XCR_XFEATURE_ENABLED_MASK) return 1; xcr0 = xcr; if (!(xcr0 & XSTATE_FP)) return 1; if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) return 1; /* * Do not allow the guest to set bits that we do not support * saving. However, xcr0 bit 0 is always set, even if the * emulated CPU does not support XSAVE (see fx_init). */ valid_bits = vcpu->arch.guest_supported_xcr0 | XSTATE_FP; if (xcr0 & ~valid_bits) return 1; kvm_put_guest_xcr0(vcpu); vcpu->arch.xcr0 = xcr0; return 0; } int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) { if (kvm_x86_ops->get_cpl(vcpu) != 0 || __kvm_set_xcr(vcpu, index, xcr)) { kvm_inject_gp(vcpu, 0); return 1; } return 0; } EXPORT_SYMBOL_GPL(kvm_set_xcr); int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { unsigned long old_cr4 = kvm_read_cr4(vcpu); unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_SMEP; if (cr4 & CR4_RESERVED_BITS) return 1; if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE)) return 1; if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP)) return 1; if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE)) return 1; if (is_long_mode(vcpu)) { if (!(cr4 & X86_CR4_PAE)) return 1; } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) && ((cr4 ^ old_cr4) & pdptr_bits) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu))) return 1; if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) { if (!guest_cpuid_has_pcid(vcpu)) return 1; /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */ if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu)) return 1; } if (kvm_x86_ops->set_cr4(vcpu, cr4)) return 1; if (((cr4 ^ old_cr4) & pdptr_bits) || (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) kvm_mmu_reset_context(vcpu); if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE) kvm_update_cpuid(vcpu); return 0; } EXPORT_SYMBOL_GPL(kvm_set_cr4); int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) { if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) { kvm_mmu_sync_roots(vcpu); kvm_mmu_flush_tlb(vcpu); return 0; } if (is_long_mode(vcpu)) { if (kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) { if (cr3 & CR3_PCID_ENABLED_RESERVED_BITS) return 1; } else if (cr3 & CR3_L_MODE_RESERVED_BITS) return 1; } else { if (is_pae(vcpu)) { if (cr3 & CR3_PAE_RESERVED_BITS) return 1; if (is_paging(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) return 1; } /* * We don't check reserved bits in nonpae mode, because * this isn't enforced, and VMware depends on this. */ } vcpu->arch.cr3 = cr3; __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); kvm_mmu_new_cr3(vcpu); return 0; } EXPORT_SYMBOL_GPL(kvm_set_cr3); int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) { if (cr8 & CR8_RESERVED_BITS) return 1; if (irqchip_in_kernel(vcpu->kvm)) kvm_lapic_set_tpr(vcpu, cr8); else vcpu->arch.cr8 = cr8; return 0; } EXPORT_SYMBOL_GPL(kvm_set_cr8); unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) { if (irqchip_in_kernel(vcpu->kvm)) return kvm_lapic_get_cr8(vcpu); else return vcpu->arch.cr8; } EXPORT_SYMBOL_GPL(kvm_get_cr8); static void kvm_update_dr7(struct kvm_vcpu *vcpu) { unsigned long dr7; if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) dr7 = vcpu->arch.guest_debug_dr7; else dr7 = vcpu->arch.dr7; kvm_x86_ops->set_dr7(vcpu, dr7); vcpu->arch.switch_db_regs = (dr7 & DR7_BP_EN_MASK); } static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) { switch (dr) { case 0 ... 3: vcpu->arch.db[dr] = val; if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) vcpu->arch.eff_db[dr] = val; break; case 4: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* #UD */ /* fall through */ case 6: if (val & 0xffffffff00000000ULL) return -1; /* #GP */ vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1; break; case 5: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* #UD */ /* fall through */ default: /* 7 */ if (val & 0xffffffff00000000ULL) return -1; /* #GP */ vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; kvm_update_dr7(vcpu); break; } return 0; } int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) { int res; res = __kvm_set_dr(vcpu, dr, val); if (res > 0) kvm_queue_exception(vcpu, UD_VECTOR); else if (res < 0) kvm_inject_gp(vcpu, 0); return res; } EXPORT_SYMBOL_GPL(kvm_set_dr); static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) { switch (dr) { case 0 ... 3: *val = vcpu->arch.db[dr]; break; case 4: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* fall through */ case 6: *val = vcpu->arch.dr6; break; case 5: if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) return 1; /* fall through */ default: /* 7 */ *val = vcpu->arch.dr7; break; } return 0; } int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) { if (_kvm_get_dr(vcpu, dr, val)) { kvm_queue_exception(vcpu, UD_VECTOR); return 1; } return 0; } EXPORT_SYMBOL_GPL(kvm_get_dr); bool kvm_rdpmc(struct kvm_vcpu *vcpu) { u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX); u64 data; int err; err = kvm_pmu_read_pmc(vcpu, ecx, &data); if (err) return err; kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data); kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32); return err; } EXPORT_SYMBOL_GPL(kvm_rdpmc); /* * List of msr numbers which we expose to userspace through KVM_GET_MSRS * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. * * This list is modified at module load time to reflect the * capabilities of the host cpu. This capabilities test skips MSRs that are * kvm-specific. Those are put in the beginning of the list. */ #define KVM_SAVE_MSRS_BEGIN 10 static u32 msrs_to_save[] = { MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME, MSR_KVM_PV_EOI_EN, MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, MSR_STAR, #ifdef CONFIG_X86_64 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, #endif MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, MSR_IA32_FEATURE_CONTROL }; static unsigned num_msrs_to_save; static const u32 emulated_msrs[] = { MSR_IA32_TSC_ADJUST, MSR_IA32_TSCDEADLINE, MSR_IA32_MISC_ENABLE, MSR_IA32_MCG_STATUS, MSR_IA32_MCG_CTL, }; bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) { if (efer & efer_reserved_bits) return false; if (efer & EFER_FFXSR) { struct kvm_cpuid_entry2 *feat; feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) return false; } if (efer & EFER_SVME) { struct kvm_cpuid_entry2 *feat; feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) return false; } return true; } EXPORT_SYMBOL_GPL(kvm_valid_efer); static int set_efer(struct kvm_vcpu *vcpu, u64 efer) { u64 old_efer = vcpu->arch.efer; if (!kvm_valid_efer(vcpu, efer)) return 1; if (is_paging(vcpu) && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) return 1; efer &= ~EFER_LMA; efer |= vcpu->arch.efer & EFER_LMA; kvm_x86_ops->set_efer(vcpu, efer); /* Update reserved bits */ if ((efer ^ old_efer) & EFER_NX) kvm_mmu_reset_context(vcpu); return 0; } void kvm_enable_efer_bits(u64 mask) { efer_reserved_bits &= ~mask; } EXPORT_SYMBOL_GPL(kvm_enable_efer_bits); /* * Writes msr value into into the appropriate "register". * Returns 0 on success, non-0 otherwise. * Assumes vcpu_load() was already called. */ int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) { return kvm_x86_ops->set_msr(vcpu, msr); } /* * Adapt set_msr() to msr_io()'s calling convention */ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) { struct msr_data msr; msr.data = *data; msr.index = index; msr.host_initiated = true; return kvm_set_msr(vcpu, &msr); } #ifdef CONFIG_X86_64 struct pvclock_gtod_data { seqcount_t seq; struct { /* extract of a clocksource struct */ int vclock_mode; cycle_t cycle_last; cycle_t mask; u32 mult; u32 shift; } clock; /* open coded 'struct timespec' */ u64 monotonic_time_snsec; time_t monotonic_time_sec; }; static struct pvclock_gtod_data pvclock_gtod_data; static void update_pvclock_gtod(struct timekeeper *tk) { struct pvclock_gtod_data *vdata = &pvclock_gtod_data; write_seqcount_begin(&vdata->seq); /* copy pvclock gtod data */ vdata->clock.vclock_mode = tk->clock->archdata.vclock_mode; vdata->clock.cycle_last = tk->clock->cycle_last; vdata->clock.mask = tk->clock->mask; vdata->clock.mult = tk->mult; vdata->clock.shift = tk->shift; vdata->monotonic_time_sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; vdata->monotonic_time_snsec = tk->xtime_nsec + (tk->wall_to_monotonic.tv_nsec << tk->shift); while (vdata->monotonic_time_snsec >= (((u64)NSEC_PER_SEC) << tk->shift)) { vdata->monotonic_time_snsec -= ((u64)NSEC_PER_SEC) << tk->shift; vdata->monotonic_time_sec++; } write_seqcount_end(&vdata->seq); } #endif static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) { int version; int r; struct pvclock_wall_clock wc; struct timespec boot; if (!wall_clock) return; r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version)); if (r) return; if (version & 1) ++version; /* first time write, random junk */ ++version; kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); /* * The guest calculates current wall clock time by adding * system time (updated by kvm_guest_time_update below) to the * wall clock specified here. guest system time equals host * system time for us, thus we must fill in host boot time here. */ getboottime(&boot); if (kvm->arch.kvmclock_offset) { struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset); boot = timespec_sub(boot, ts); } wc.sec = boot.tv_sec; wc.nsec = boot.tv_nsec; wc.version = version; kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc)); version++; kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); } static uint32_t div_frac(uint32_t dividend, uint32_t divisor) { uint32_t quotient, remainder; /* Don't try to replace with do_div(), this one calculates * "(dividend << 32) / divisor" */ __asm__ ( "divl %4" : "=a" (quotient), "=d" (remainder) : "0" (0), "1" (dividend), "r" (divisor) ); return quotient; } static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz, s8 *pshift, u32 *pmultiplier) { uint64_t scaled64; int32_t shift = 0; uint64_t tps64; uint32_t tps32; tps64 = base_khz * 1000LL; scaled64 = scaled_khz * 1000LL; while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) { tps64 >>= 1; shift--; } tps32 = (uint32_t)tps64; while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) { if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000) scaled64 >>= 1; else tps32 <<= 1; shift++; } *pshift = shift; *pmultiplier = div_frac(scaled64, tps32); pr_debug("%s: base_khz %u => %u, shift %d, mul %u\n", __func__, base_khz, scaled_khz, shift, *pmultiplier); } static inline u64 get_kernel_ns(void) { struct timespec ts; WARN_ON(preemptible()); ktime_get_ts(&ts); monotonic_to_bootbased(&ts); return timespec_to_ns(&ts); } #ifdef CONFIG_X86_64 static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0); #endif static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz); unsigned long max_tsc_khz; static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) { return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, vcpu->arch.virtual_tsc_shift); } static u32 adjust_tsc_khz(u32 khz, s32 ppm) { u64 v = (u64)khz * (1000000 + ppm); do_div(v, 1000000); return v; } static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz) { u32 thresh_lo, thresh_hi; int use_scaling = 0; /* tsc_khz can be zero if TSC calibration fails */ if (this_tsc_khz == 0) return; /* Compute a scale to convert nanoseconds in TSC cycles */ kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000, &vcpu->arch.virtual_tsc_shift, &vcpu->arch.virtual_tsc_mult); vcpu->arch.virtual_tsc_khz = this_tsc_khz; /* * Compute the variation in TSC rate which is acceptable * within the range of tolerance and decide if the * rate being applied is within that bounds of the hardware * rate. If so, no scaling or compensation need be done. */ thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm); thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm); if (this_tsc_khz < thresh_lo || this_tsc_khz > thresh_hi) { pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", this_tsc_khz, thresh_lo, thresh_hi); use_scaling = 1; } kvm_x86_ops->set_tsc_khz(vcpu, this_tsc_khz, use_scaling); } static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) { u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, vcpu->arch.virtual_tsc_mult, vcpu->arch.virtual_tsc_shift); tsc += vcpu->arch.this_tsc_write; return tsc; } void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) { #ifdef CONFIG_X86_64 bool vcpus_matched; bool do_request = false; struct kvm_arch *ka = &vcpu->kvm->arch; struct pvclock_gtod_data *gtod = &pvclock_gtod_data; vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == atomic_read(&vcpu->kvm->online_vcpus)); if (vcpus_matched && gtod->clock.vclock_mode == VCLOCK_TSC) if (!ka->use_master_clock) do_request = 1; if (!vcpus_matched && ka->use_master_clock) do_request = 1; if (do_request) kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, atomic_read(&vcpu->kvm->online_vcpus), ka->use_master_clock, gtod->clock.vclock_mode); #endif } static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset) { u64 curr_offset = kvm_x86_ops->read_tsc_offset(vcpu); vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset; } void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) { struct kvm *kvm = vcpu->kvm; u64 offset, ns, elapsed; unsigned long flags; s64 usdiff; bool matched; u64 data = msr->data; raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); offset = kvm_x86_ops->compute_tsc_offset(vcpu, data); ns = get_kernel_ns(); elapsed = ns - kvm->arch.last_tsc_nsec; if (vcpu->arch.virtual_tsc_khz) { int faulted = 0; /* n.b - signed multiplication and division required */ usdiff = data - kvm->arch.last_tsc_write; #ifdef CONFIG_X86_64 usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz; #else /* do_div() only does unsigned */ asm("1: idivl %[divisor]\n" "2: xor %%edx, %%edx\n" " movl $0, %[faulted]\n" "3:\n" ".section .fixup,\"ax\"\n" "4: movl $1, %[faulted]\n" " jmp 3b\n" ".previous\n" _ASM_EXTABLE(1b, 4b) : "=A"(usdiff), [faulted] "=r" (faulted) : "A"(usdiff * 1000), [divisor] "rm"(vcpu->arch.virtual_tsc_khz)); #endif do_div(elapsed, 1000); usdiff -= elapsed; if (usdiff < 0) usdiff = -usdiff; /* idivl overflow => difference is larger than USEC_PER_SEC */ if (faulted) usdiff = USEC_PER_SEC; } else usdiff = USEC_PER_SEC; /* disable TSC match window below */ /* * Special case: TSC write with a small delta (1 second) of virtual * cycle time against real time is interpreted as an attempt to * synchronize the CPU. * * For a reliable TSC, we can match TSC offsets, and for an unstable * TSC, we add elapsed time in this computation. We could let the * compensation code attempt to catch up if we fall behind, but * it's better to try to match offsets from the beginning. */ if (usdiff < USEC_PER_SEC && vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { if (!check_tsc_unstable()) { offset = kvm->arch.cur_tsc_offset; pr_debug("kvm: matched tsc offset for %llu\n", data); } else { u64 delta = nsec_to_cycles(vcpu, elapsed); data += delta; offset = kvm_x86_ops->compute_tsc_offset(vcpu, data); pr_debug("kvm: adjusted tsc offset by %llu\n", delta); } matched = true; } else { /* * We split periods of matched TSC writes into generations. * For each generation, we track the original measured * nanosecond time, offset, and write, so if TSCs are in * sync, we can match exact offset, and if not, we can match * exact software computation in compute_guest_tsc() * * These values are tracked in kvm->arch.cur_xxx variables. */ kvm->arch.cur_tsc_generation++; kvm->arch.cur_tsc_nsec = ns; kvm->arch.cur_tsc_write = data; kvm->arch.cur_tsc_offset = offset; matched = false; pr_debug("kvm: new tsc generation %u, clock %llu\n", kvm->arch.cur_tsc_generation, data); } /* * We also track th most recent recorded KHZ, write and time to * allow the matching interval to be extended at each write. */ kvm->arch.last_tsc_nsec = ns; kvm->arch.last_tsc_write = data; kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; /* Reset of TSC must disable overshoot protection below */ vcpu->arch.hv_clock.tsc_timestamp = 0; vcpu->arch.last_guest_tsc = data; /* Keep track of which generation this VCPU has synchronized to */ vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated) update_ia32_tsc_adjust_msr(vcpu, offset); kvm_x86_ops->write_tsc_offset(vcpu, offset); raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); spin_lock(&kvm->arch.pvclock_gtod_sync_lock); if (matched) kvm->arch.nr_vcpus_matched_tsc++; else kvm->arch.nr_vcpus_matched_tsc = 0; kvm_track_tsc_matching(vcpu); spin_unlock(&kvm->arch.pvclock_gtod_sync_lock); } EXPORT_SYMBOL_GPL(kvm_write_tsc); #ifdef CONFIG_X86_64 static cycle_t read_tsc(void) { cycle_t ret; u64 last; /* * Empirically, a fence (of type that depends on the CPU) * before rdtsc is enough to ensure that rdtsc is ordered * with respect to loads. The various CPU manuals are unclear * as to whether rdtsc can be reordered with later loads, * but no one has ever seen it happen. */ rdtsc_barrier(); ret = (cycle_t)vget_cycles(); last = pvclock_gtod_data.clock.cycle_last; if (likely(ret >= last)) return ret; /* * GCC likes to generate cmov here, but this branch is extremely * predictable (it's just a funciton of time and the likely is * very likely) and there's a data dependence, so force GCC * to generate a branch instead. I don't barrier() because * we don't actually need a barrier, and if this function * ever gets inlined it will generate worse code. */ asm volatile (""); return last; } static inline u64 vgettsc(cycle_t *cycle_now) { long v; struct pvclock_gtod_data *gtod = &pvclock_gtod_data; *cycle_now = read_tsc(); v = (*cycle_now - gtod->clock.cycle_last) & gtod->clock.mask; return v * gtod->clock.mult; } static int do_monotonic(struct timespec *ts, cycle_t *cycle_now) { unsigned long seq; u64 ns; int mode; struct pvclock_gtod_data *gtod = &pvclock_gtod_data; ts->tv_nsec = 0; do { seq = read_seqcount_begin(&gtod->seq); mode = gtod->clock.vclock_mode; ts->tv_sec = gtod->monotonic_time_sec; ns = gtod->monotonic_time_snsec; ns += vgettsc(cycle_now); ns >>= gtod->clock.shift; } while (unlikely(read_seqcount_retry(&gtod->seq, seq))); timespec_add_ns(ts, ns); return mode; } /* returns true if host is using tsc clocksource */ static bool kvm_get_time_and_clockread(s64 *kernel_ns, cycle_t *cycle_now) { struct timespec ts; /* checked again under seqlock below */ if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC) return false; if (do_monotonic(&ts, cycle_now) != VCLOCK_TSC) return false; monotonic_to_bootbased(&ts); *kernel_ns = timespec_to_ns(&ts); return true; } #endif /* * * Assuming a stable TSC across physical CPUS, and a stable TSC * across virtual CPUs, the following condition is possible. * Each numbered line represents an event visible to both * CPUs at the next numbered event. * * "timespecX" represents host monotonic time. "tscX" represents * RDTSC value. * * VCPU0 on CPU0 | VCPU1 on CPU1 * * 1. read timespec0,tsc0 * 2. | timespec1 = timespec0 + N * | tsc1 = tsc0 + M * 3. transition to guest | transition to guest * 4. ret0 = timespec0 + (rdtsc - tsc0) | * 5. | ret1 = timespec1 + (rdtsc - tsc1) * | ret1 = timespec0 + N + (rdtsc - (tsc0 + M)) * * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity: * * - ret0 < ret1 * - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M)) * ... * - 0 < N - M => M < N * * That is, when timespec0 != timespec1, M < N. Unfortunately that is not * always the case (the difference between two distinct xtime instances * might be smaller then the difference between corresponding TSC reads, * when updating guest vcpus pvclock areas). * * To avoid that problem, do not allow visibility of distinct * system_timestamp/tsc_timestamp values simultaneously: use a master * copy of host monotonic time values. Update that master copy * in lockstep. * * Rely on synchronization of host TSCs and guest TSCs for monotonicity. * */ static void pvclock_update_vm_gtod_copy(struct kvm *kvm) { #ifdef CONFIG_X86_64 struct kvm_arch *ka = &kvm->arch; int vclock_mode; bool host_tsc_clocksource, vcpus_matched; vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == atomic_read(&kvm->online_vcpus)); /* * If the host uses TSC clock, then passthrough TSC as stable * to the guest. */ host_tsc_clocksource = kvm_get_time_and_clockread( &ka->master_kernel_ns, &ka->master_cycle_now); ka->use_master_clock = host_tsc_clocksource & vcpus_matched; if (ka->use_master_clock) atomic_set(&kvm_guest_has_master_clock, 1); vclock_mode = pvclock_gtod_data.clock.vclock_mode; trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode, vcpus_matched); #endif } static void kvm_gen_update_masterclock(struct kvm *kvm) { #ifdef CONFIG_X86_64 int i; struct kvm_vcpu *vcpu; struct kvm_arch *ka = &kvm->arch; spin_lock(&ka->pvclock_gtod_sync_lock); kvm_make_mclock_inprogress_request(kvm); /* no guest entries from this point */ pvclock_update_vm_gtod_copy(kvm); kvm_for_each_vcpu(i, vcpu, kvm) set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests); /* guest entries allowed */ kvm_for_each_vcpu(i, vcpu, kvm) clear_bit(KVM_REQ_MCLOCK_INPROGRESS, &vcpu->requests); spin_unlock(&ka->pvclock_gtod_sync_lock); #endif } static int kvm_guest_time_update(struct kvm_vcpu *v) { unsigned long flags, this_tsc_khz; struct kvm_vcpu_arch *vcpu = &v->arch; struct kvm_arch *ka = &v->kvm->arch; s64 kernel_ns, max_kernel_ns; u64 tsc_timestamp, host_tsc; struct pvclock_vcpu_time_info guest_hv_clock; u8 pvclock_flags; bool use_master_clock; kernel_ns = 0; host_tsc = 0; /* * If the host uses TSC clock, then passthrough TSC as stable * to the guest. */ spin_lock(&ka->pvclock_gtod_sync_lock); use_master_clock = ka->use_master_clock; if (use_master_clock) { host_tsc = ka->master_cycle_now; kernel_ns = ka->master_kernel_ns; } spin_unlock(&ka->pvclock_gtod_sync_lock); /* Keep irq disabled to prevent changes to the clock */ local_irq_save(flags); this_tsc_khz = __get_cpu_var(cpu_tsc_khz); if (unlikely(this_tsc_khz == 0)) { local_irq_restore(flags); kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); return 1; } if (!use_master_clock) { host_tsc = native_read_tsc(); kernel_ns = get_kernel_ns(); } tsc_timestamp = kvm_x86_ops->read_l1_tsc(v, host_tsc); /* * We may have to catch up the TSC to match elapsed wall clock * time for two reasons, even if kvmclock is used. * 1) CPU could have been running below the maximum TSC rate * 2) Broken TSC compensation resets the base at each VCPU * entry to avoid unknown leaps of TSC even when running * again on the same CPU. This may cause apparent elapsed * time to disappear, and the guest to stand still or run * very slowly. */ if (vcpu->tsc_catchup) { u64 tsc = compute_guest_tsc(v, kernel_ns); if (tsc > tsc_timestamp) { adjust_tsc_offset_guest(v, tsc - tsc_timestamp); tsc_timestamp = tsc; } } local_irq_restore(flags); if (!vcpu->pv_time_enabled) return 0; /* * Time as measured by the TSC may go backwards when resetting the base * tsc_timestamp. The reason for this is that the TSC resolution is * higher than the resolution of the other clock scales. Thus, many * possible measurments of the TSC correspond to one measurement of any * other clock, and so a spread of values is possible. This is not a * problem for the computation of the nanosecond clock; with TSC rates * around 1GHZ, there can only be a few cycles which correspond to one * nanosecond value, and any path through this code will inevitably * take longer than that. However, with the kernel_ns value itself, * the precision may be much lower, down to HZ granularity. If the * first sampling of TSC against kernel_ns ends in the low part of the * range, and the second in the high end of the range, we can get: * * (TSC - offset_low) * S + kns_old > (TSC - offset_high) * S + kns_new * * As the sampling errors potentially range in the thousands of cycles, * it is possible such a time value has already been observed by the * guest. To protect against this, we must compute the system time as * observed by the guest and ensure the new system time is greater. */ max_kernel_ns = 0; if (vcpu->hv_clock.tsc_timestamp) { max_kernel_ns = vcpu->last_guest_tsc - vcpu->hv_clock.tsc_timestamp; max_kernel_ns = pvclock_scale_delta(max_kernel_ns, vcpu->hv_clock.tsc_to_system_mul, vcpu->hv_clock.tsc_shift); max_kernel_ns += vcpu->last_kernel_ns; } if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) { kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz, &vcpu->hv_clock.tsc_shift, &vcpu->hv_clock.tsc_to_system_mul); vcpu->hw_tsc_khz = this_tsc_khz; } /* with a master <monotonic time, tsc value> tuple, * pvclock clock reads always increase at the (scaled) rate * of guest TSC - no need to deal with sampling errors. */ if (!use_master_clock) { if (max_kernel_ns > kernel_ns) kernel_ns = max_kernel_ns; } /* With all the info we got, fill in the values */ vcpu->hv_clock.tsc_timestamp = tsc_timestamp; vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; vcpu->last_kernel_ns = kernel_ns; vcpu->last_guest_tsc = tsc_timestamp; /* * The interface expects us to write an even number signaling that the * update is finished. Since the guest won't see the intermediate * state, we just increase by 2 at the end. */ vcpu->hv_clock.version += 2; if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time, &guest_hv_clock, sizeof(guest_hv_clock)))) return 0; /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED); if (vcpu->pvclock_set_guest_stopped_request) { pvclock_flags |= PVCLOCK_GUEST_STOPPED; vcpu->pvclock_set_guest_stopped_request = false; } /* If the host uses TSC clocksource, then it is stable */ if (use_master_clock) pvclock_flags |= PVCLOCK_TSC_STABLE_BIT; vcpu->hv_clock.flags = pvclock_flags; kvm_write_guest_cached(v->kvm, &vcpu->pv_time, &vcpu->hv_clock, sizeof(vcpu->hv_clock)); return 0; } /* * kvmclock updates which are isolated to a given vcpu, such as * vcpu->cpu migration, should not allow system_timestamp from * the rest of the vcpus to remain static. Otherwise ntp frequency * correction applies to one vcpu's system_timestamp but not * the others. * * So in those cases, request a kvmclock update for all vcpus. * The worst case for a remote vcpu to update its kvmclock * is then bounded by maximum nohz sleep latency. */ static void kvm_gen_kvmclock_update(struct kvm_vcpu *v) { int i; struct kvm *kvm = v->kvm; struct kvm_vcpu *vcpu; kvm_for_each_vcpu(i, vcpu, kvm) { set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests); kvm_vcpu_kick(vcpu); } } static bool msr_mtrr_valid(unsigned msr) { switch (msr) { case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1: case MSR_MTRRfix64K_00000: case MSR_MTRRfix16K_80000: case MSR_MTRRfix16K_A0000: case MSR_MTRRfix4K_C0000: case MSR_MTRRfix4K_C8000: case MSR_MTRRfix4K_D0000: case MSR_MTRRfix4K_D8000: case MSR_MTRRfix4K_E0000: case MSR_MTRRfix4K_E8000: case MSR_MTRRfix4K_F0000: case MSR_MTRRfix4K_F8000: case MSR_MTRRdefType: case MSR_IA32_CR_PAT: return true; case 0x2f8: return true; } return false; } static bool valid_pat_type(unsigned t) { return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */ } static bool valid_mtrr_type(unsigned t) { return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */ } static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data) { int i; if (!msr_mtrr_valid(msr)) return false; if (msr == MSR_IA32_CR_PAT) { for (i = 0; i < 8; i++) if (!valid_pat_type((data >> (i * 8)) & 0xff)) return false; return true; } else if (msr == MSR_MTRRdefType) { if (data & ~0xcff) return false; return valid_mtrr_type(data & 0xff); } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) { for (i = 0; i < 8 ; i++) if (!valid_mtrr_type((data >> (i * 8)) & 0xff)) return false; return true; } /* variable MTRRs */ return valid_mtrr_type(data & 0xff); } static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data) { u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; if (!mtrr_valid(vcpu, msr, data)) return 1; if (msr == MSR_MTRRdefType) { vcpu->arch.mtrr_state.def_type = data; vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10; } else if (msr == MSR_MTRRfix64K_00000) p[0] = data; else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000) p[1 + msr - MSR_MTRRfix16K_80000] = data; else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000) p[3 + msr - MSR_MTRRfix4K_C0000] = data; else if (msr == MSR_IA32_CR_PAT) vcpu->arch.pat = data; else { /* Variable MTRRs */ int idx, is_mtrr_mask; u64 *pt; idx = (msr - 0x200) / 2; is_mtrr_mask = msr - 0x200 - 2 * idx; if (!is_mtrr_mask) pt = (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo; else pt = (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo; *pt = data; } kvm_mmu_reset_context(vcpu); return 0; } static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data) { u64 mcg_cap = vcpu->arch.mcg_cap; unsigned bank_num = mcg_cap & 0xff; switch (msr) { case MSR_IA32_MCG_STATUS: vcpu->arch.mcg_status = data; break; case MSR_IA32_MCG_CTL: if (!(mcg_cap & MCG_CTL_P)) return 1; if (data != 0 && data != ~(u64)0) return -1; vcpu->arch.mcg_ctl = data; break; default: if (msr >= MSR_IA32_MC0_CTL && msr < MSR_IA32_MC0_CTL + 4 * bank_num) { u32 offset = msr - MSR_IA32_MC0_CTL; /* only 0 or all 1s can be written to IA32_MCi_CTL * some Linux kernels though clear bit 10 in bank 4 to * workaround a BIOS/GART TBL issue on AMD K8s, ignore * this to avoid an uncatched #GP in the guest */ if ((offset & 0x3) == 0 && data != 0 && (data | (1 << 10)) != ~(u64)0) return -1; vcpu->arch.mce_banks[offset] = data; break; } return 1; } return 0; } static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) { struct kvm *kvm = vcpu->kvm; int lm = is_long_mode(vcpu); u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32; u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 : kvm->arch.xen_hvm_config.blob_size_32; u32 page_num = data & ~PAGE_MASK; u64 page_addr = data & PAGE_MASK; u8 *page; int r; r = -E2BIG; if (page_num >= blob_size) goto out; r = -ENOMEM; page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE); if (IS_ERR(page)) { r = PTR_ERR(page); goto out; } if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE)) goto out_free; r = 0; out_free: kfree(page); out: return r; } static bool kvm_hv_hypercall_enabled(struct kvm *kvm) { return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE; } static bool kvm_hv_msr_partition_wide(u32 msr) { bool r = false; switch (msr) { case HV_X64_MSR_GUEST_OS_ID: case HV_X64_MSR_HYPERCALL: r = true; break; } return r; } static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data) { struct kvm *kvm = vcpu->kvm; switch (msr) { case HV_X64_MSR_GUEST_OS_ID: kvm->arch.hv_guest_os_id = data; /* setting guest os id to zero disables hypercall page */ if (!kvm->arch.hv_guest_os_id) kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE; break; case HV_X64_MSR_HYPERCALL: { u64 gfn; unsigned long addr; u8 instructions[4]; /* if guest os id is not set hypercall should remain disabled */ if (!kvm->arch.hv_guest_os_id) break; if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) { kvm->arch.hv_hypercall = data; break; } gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT; addr = gfn_to_hva(kvm, gfn); if (kvm_is_error_hva(addr)) return 1; kvm_x86_ops->patch_hypercall(vcpu, instructions); ((unsigned char *)instructions)[3] = 0xc3; /* ret */ if (__copy_to_user((void __user *)addr, instructions, 4)) return 1; kvm->arch.hv_hypercall = data; break; } default: vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " "data 0x%llx\n", msr, data); return 1; } return 0; } static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data) { switch (msr) { case HV_X64_MSR_APIC_ASSIST_PAGE: { unsigned long addr; if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) { vcpu->arch.hv_vapic = data; break; } addr = gfn_to_hva(vcpu->kvm, data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT); if (kvm_is_error_hva(addr)) return 1; if (__clear_user((void __user *)addr, PAGE_SIZE)) return 1; vcpu->arch.hv_vapic = data; break; } case HV_X64_MSR_EOI: return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data); case HV_X64_MSR_ICR: return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data); case HV_X64_MSR_TPR: return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data); default: vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " "data 0x%llx\n", msr, data); return 1; } return 0; } static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) { gpa_t gpa = data & ~0x3f; /* Bits 2:5 are reserved, Should be zero */ if (data & 0x3c) return 1; vcpu->arch.apf.msr_val = data; if (!(data & KVM_ASYNC_PF_ENABLED)) { kvm_clear_async_pf_completion_queue(vcpu); kvm_async_pf_hash_reset(vcpu); return 0; } if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, sizeof(u32))) return 1; vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); kvm_async_pf_wakeup_all(vcpu); return 0; } static void kvmclock_reset(struct kvm_vcpu *vcpu) { vcpu->arch.pv_time_enabled = false; } static void accumulate_steal_time(struct kvm_vcpu *vcpu) { u64 delta; if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) return; delta = current->sched_info.run_delay - vcpu->arch.st.last_steal; vcpu->arch.st.last_steal = current->sched_info.run_delay; vcpu->arch.st.accum_steal = delta; } static void record_steal_time(struct kvm_vcpu *vcpu) { if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) return; if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)))) return; vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal; vcpu->arch.st.steal.version += 2; vcpu->arch.st.accum_steal = 0; kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime, &vcpu->arch.st.steal, sizeof(struct kvm_steal_time)); } int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { bool pr = false; u32 msr = msr_info->index; u64 data = msr_info->data; switch (msr) { case MSR_AMD64_NB_CFG: case MSR_IA32_UCODE_REV: case MSR_IA32_UCODE_WRITE: case MSR_VM_HSAVE_PA: case MSR_AMD64_PATCH_LOADER: case MSR_AMD64_BU_CFG2: break; case MSR_EFER: return set_efer(vcpu, data); case MSR_K7_HWCR: data &= ~(u64)0x40; /* ignore flush filter disable */ data &= ~(u64)0x100; /* ignore ignne emulation enable */ data &= ~(u64)0x8; /* ignore TLB cache disable */ if (data != 0) { vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n", data); return 1; } break; case MSR_FAM10H_MMIO_CONF_BASE: if (data != 0) { vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: " "0x%llx\n", data); return 1; } break; case MSR_IA32_DEBUGCTLMSR: if (!data) { /* We support the non-activated case already */ break; } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) { /* Values other than LBR and BTF are vendor-specific, thus reserved and should throw a #GP */ return 1; } vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n", __func__, data); break; case 0x200 ... 0x2ff: return set_msr_mtrr(vcpu, msr, data); case MSR_IA32_APICBASE: kvm_set_apic_base(vcpu, data); break; case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: return kvm_x2apic_msr_write(vcpu, msr, data); case MSR_IA32_TSCDEADLINE: kvm_set_lapic_tscdeadline_msr(vcpu, data); break; case MSR_IA32_TSC_ADJUST: if (guest_cpuid_has_tsc_adjust(vcpu)) { if (!msr_info->host_initiated) { u64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true); } vcpu->arch.ia32_tsc_adjust_msr = data; } break; case MSR_IA32_MISC_ENABLE: vcpu->arch.ia32_misc_enable_msr = data; break; case MSR_KVM_WALL_CLOCK_NEW: case MSR_KVM_WALL_CLOCK: vcpu->kvm->arch.wall_clock = data; kvm_write_wall_clock(vcpu->kvm, data); break; case MSR_KVM_SYSTEM_TIME_NEW: case MSR_KVM_SYSTEM_TIME: { u64 gpa_offset; kvmclock_reset(vcpu); vcpu->arch.time = data; kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); /* we verify if the enable bit is set... */ if (!(data & 1)) break; gpa_offset = data & ~(PAGE_MASK | 1); if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_time, data & ~1ULL, sizeof(struct pvclock_vcpu_time_info))) vcpu->arch.pv_time_enabled = false; else vcpu->arch.pv_time_enabled = true; break; } case MSR_KVM_ASYNC_PF_EN: if (kvm_pv_enable_async_pf(vcpu, data)) return 1; break; case MSR_KVM_STEAL_TIME: if (unlikely(!sched_info_on())) return 1; if (data & KVM_STEAL_RESERVED_MASK) return 1; if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, data & KVM_STEAL_VALID_BITS, sizeof(struct kvm_steal_time))) return 1; vcpu->arch.st.msr_val = data; if (!(data & KVM_MSR_ENABLED)) break; vcpu->arch.st.last_steal = current->sched_info.run_delay; preempt_disable(); accumulate_steal_time(vcpu); preempt_enable(); kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); break; case MSR_KVM_PV_EOI_EN: if (kvm_lapic_enable_pv_eoi(vcpu, data)) return 1; break; case MSR_IA32_MCG_CTL: case MSR_IA32_MCG_STATUS: case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1: return set_msr_mce(vcpu, msr, data); /* Performance counters are not protected by a CPUID bit, * so we should check all of them in the generic path for the sake of * cross vendor migration. * Writing a zero into the event select MSRs disables them, * which we perfectly emulate ;-). Any other value should be at least * reported, some guests depend on them. */ case MSR_K7_EVNTSEL0: case MSR_K7_EVNTSEL1: case MSR_K7_EVNTSEL2: case MSR_K7_EVNTSEL3: if (data != 0) vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: " "0x%x data 0x%llx\n", msr, data); break; /* at least RHEL 4 unconditionally writes to the perfctr registers, * so we ignore writes to make it happy. */ case MSR_K7_PERFCTR0: case MSR_K7_PERFCTR1: case MSR_K7_PERFCTR2: case MSR_K7_PERFCTR3: vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: " "0x%x data 0x%llx\n", msr, data); break; case MSR_P6_PERFCTR0: case MSR_P6_PERFCTR1: pr = true; case MSR_P6_EVNTSEL0: case MSR_P6_EVNTSEL1: if (kvm_pmu_msr(vcpu, msr)) return kvm_pmu_set_msr(vcpu, msr_info); if (pr || data != 0) vcpu_unimpl(vcpu, "disabled perfctr wrmsr: " "0x%x data 0x%llx\n", msr, data); break; case MSR_K7_CLK_CTL: /* * Ignore all writes to this no longer documented MSR. * Writes are only relevant for old K7 processors, * all pre-dating SVM, but a recommended workaround from * AMD for these chips. It is possible to specify the * affected processor models on the command line, hence * the need to ignore the workaround. */ break; case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: if (kvm_hv_msr_partition_wide(msr)) { int r; mutex_lock(&vcpu->kvm->lock); r = set_msr_hyperv_pw(vcpu, msr, data); mutex_unlock(&vcpu->kvm->lock); return r; } else return set_msr_hyperv(vcpu, msr, data); break; case MSR_IA32_BBL_CR_CTL3: /* Drop writes to this legacy MSR -- see rdmsr * counterpart for further detail. */ vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data); break; case MSR_AMD64_OSVW_ID_LENGTH: if (!guest_cpuid_has_osvw(vcpu)) return 1; vcpu->arch.osvw.length = data; break; case MSR_AMD64_OSVW_STATUS: if (!guest_cpuid_has_osvw(vcpu)) return 1; vcpu->arch.osvw.status = data; break; default: if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) return xen_hvm_config(vcpu, data); if (kvm_pmu_msr(vcpu, msr)) return kvm_pmu_set_msr(vcpu, msr_info); if (!ignore_msrs) { vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data); return 1; } else { vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data); break; } } return 0; } EXPORT_SYMBOL_GPL(kvm_set_msr_common); /* * Reads an msr value (of 'msr_index') into 'pdata'. * Returns 0 on success, non-0 otherwise. * Assumes vcpu_load() was already called. */ int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) { return kvm_x86_ops->get_msr(vcpu, msr_index, pdata); } static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges; if (!msr_mtrr_valid(msr)) return 1; if (msr == MSR_MTRRdefType) *pdata = vcpu->arch.mtrr_state.def_type + (vcpu->arch.mtrr_state.enabled << 10); else if (msr == MSR_MTRRfix64K_00000) *pdata = p[0]; else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000) *pdata = p[1 + msr - MSR_MTRRfix16K_80000]; else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000) *pdata = p[3 + msr - MSR_MTRRfix4K_C0000]; else if (msr == MSR_IA32_CR_PAT) *pdata = vcpu->arch.pat; else { /* Variable MTRRs */ int idx, is_mtrr_mask; u64 *pt; idx = (msr - 0x200) / 2; is_mtrr_mask = msr - 0x200 - 2 * idx; if (!is_mtrr_mask) pt = (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo; else pt = (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo; *pdata = *pt; } return 0; } static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 data; u64 mcg_cap = vcpu->arch.mcg_cap; unsigned bank_num = mcg_cap & 0xff; switch (msr) { case MSR_IA32_P5_MC_ADDR: case MSR_IA32_P5_MC_TYPE: data = 0; break; case MSR_IA32_MCG_CAP: data = vcpu->arch.mcg_cap; break; case MSR_IA32_MCG_CTL: if (!(mcg_cap & MCG_CTL_P)) return 1; data = vcpu->arch.mcg_ctl; break; case MSR_IA32_MCG_STATUS: data = vcpu->arch.mcg_status; break; default: if (msr >= MSR_IA32_MC0_CTL && msr < MSR_IA32_MC0_CTL + 4 * bank_num) { u32 offset = msr - MSR_IA32_MC0_CTL; data = vcpu->arch.mce_banks[offset]; break; } return 1; } *pdata = data; return 0; } static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 data = 0; struct kvm *kvm = vcpu->kvm; switch (msr) { case HV_X64_MSR_GUEST_OS_ID: data = kvm->arch.hv_guest_os_id; break; case HV_X64_MSR_HYPERCALL: data = kvm->arch.hv_hypercall; break; default: vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); return 1; } *pdata = data; return 0; } static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 data = 0; switch (msr) { case HV_X64_MSR_VP_INDEX: { int r; struct kvm_vcpu *v; kvm_for_each_vcpu(r, v, vcpu->kvm) if (v == vcpu) data = r; break; } case HV_X64_MSR_EOI: return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata); case HV_X64_MSR_ICR: return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata); case HV_X64_MSR_TPR: return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata); case HV_X64_MSR_APIC_ASSIST_PAGE: data = vcpu->arch.hv_vapic; break; default: vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); return 1; } *pdata = data; return 0; } int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { u64 data; switch (msr) { case MSR_IA32_PLATFORM_ID: case MSR_IA32_EBL_CR_POWERON: case MSR_IA32_DEBUGCTLMSR: case MSR_IA32_LASTBRANCHFROMIP: case MSR_IA32_LASTBRANCHTOIP: case MSR_IA32_LASTINTFROMIP: case MSR_IA32_LASTINTTOIP: case MSR_K8_SYSCFG: case MSR_K7_HWCR: case MSR_VM_HSAVE_PA: case MSR_K7_EVNTSEL0: case MSR_K7_PERFCTR0: case MSR_K8_INT_PENDING_MSG: case MSR_AMD64_NB_CFG: case MSR_FAM10H_MMIO_CONF_BASE: case MSR_AMD64_BU_CFG2: data = 0; break; case MSR_P6_PERFCTR0: case MSR_P6_PERFCTR1: case MSR_P6_EVNTSEL0: case MSR_P6_EVNTSEL1: if (kvm_pmu_msr(vcpu, msr)) return kvm_pmu_get_msr(vcpu, msr, pdata); data = 0; break; case MSR_IA32_UCODE_REV: data = 0x100000000ULL; break; case MSR_MTRRcap: data = 0x500 | KVM_NR_VAR_MTRR; break; case 0x200 ... 0x2ff: return get_msr_mtrr(vcpu, msr, pdata); case 0xcd: /* fsb frequency */ data = 3; break; /* * MSR_EBC_FREQUENCY_ID * Conservative value valid for even the basic CPU models. * Models 0,1: 000 in bits 23:21 indicating a bus speed of * 100MHz, model 2 000 in bits 18:16 indicating 100MHz, * and 266MHz for model 3, or 4. Set Core Clock * Frequency to System Bus Frequency Ratio to 1 (bits * 31:24) even though these are only valid for CPU * models > 2, however guests may end up dividing or * multiplying by zero otherwise. */ case MSR_EBC_FREQUENCY_ID: data = 1 << 24; break; case MSR_IA32_APICBASE: data = kvm_get_apic_base(vcpu); break; case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: return kvm_x2apic_msr_read(vcpu, msr, pdata); break; case MSR_IA32_TSCDEADLINE: data = kvm_get_lapic_tscdeadline_msr(vcpu); break; case MSR_IA32_TSC_ADJUST: data = (u64)vcpu->arch.ia32_tsc_adjust_msr; break; case MSR_IA32_MISC_ENABLE: data = vcpu->arch.ia32_misc_enable_msr; break; case MSR_IA32_PERF_STATUS: /* TSC increment by tick */ data = 1000ULL; /* CPU multiplier */ data |= (((uint64_t)4ULL) << 40); break; case MSR_EFER: data = vcpu->arch.efer; break; case MSR_KVM_WALL_CLOCK: case MSR_KVM_WALL_CLOCK_NEW: data = vcpu->kvm->arch.wall_clock; break; case MSR_KVM_SYSTEM_TIME: case MSR_KVM_SYSTEM_TIME_NEW: data = vcpu->arch.time; break; case MSR_KVM_ASYNC_PF_EN: data = vcpu->arch.apf.msr_val; break; case MSR_KVM_STEAL_TIME: data = vcpu->arch.st.msr_val; break; case MSR_KVM_PV_EOI_EN: data = vcpu->arch.pv_eoi.msr_val; break; case MSR_IA32_P5_MC_ADDR: case MSR_IA32_P5_MC_TYPE: case MSR_IA32_MCG_CAP: case MSR_IA32_MCG_CTL: case MSR_IA32_MCG_STATUS: case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1: return get_msr_mce(vcpu, msr, pdata); case MSR_K7_CLK_CTL: /* * Provide expected ramp-up count for K7. All other * are set to zero, indicating minimum divisors for * every field. * * This prevents guest kernels on AMD host with CPU * type 6, model 8 and higher from exploding due to * the rdmsr failing. */ data = 0x20000000; break; case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15: if (kvm_hv_msr_partition_wide(msr)) { int r; mutex_lock(&vcpu->kvm->lock); r = get_msr_hyperv_pw(vcpu, msr, pdata); mutex_unlock(&vcpu->kvm->lock); return r; } else return get_msr_hyperv(vcpu, msr, pdata); break; case MSR_IA32_BBL_CR_CTL3: /* This legacy MSR exists but isn't fully documented in current * silicon. It is however accessed by winxp in very narrow * scenarios where it sets bit #19, itself documented as * a "reserved" bit. Best effort attempt to source coherent * read data here should the balance of the register be * interpreted by the guest: * * L2 cache control register 3: 64GB range, 256KB size, * enabled, latency 0x1, configured */ data = 0xbe702111; break; case MSR_AMD64_OSVW_ID_LENGTH: if (!guest_cpuid_has_osvw(vcpu)) return 1; data = vcpu->arch.osvw.length; break; case MSR_AMD64_OSVW_STATUS: if (!guest_cpuid_has_osvw(vcpu)) return 1; data = vcpu->arch.osvw.status; break; default: if (kvm_pmu_msr(vcpu, msr)) return kvm_pmu_get_msr(vcpu, msr, pdata); if (!ignore_msrs) { vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr); return 1; } else { vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr); data = 0; } break; } *pdata = data; return 0; } EXPORT_SYMBOL_GPL(kvm_get_msr_common); /* * Read or write a bunch of msrs. All parameters are kernel addresses. * * @return number of msrs set successfully. */ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, struct kvm_msr_entry *entries, int (*do_msr)(struct kvm_vcpu *vcpu, unsigned index, u64 *data)) { int i, idx; idx = srcu_read_lock(&vcpu->kvm->srcu); for (i = 0; i < msrs->nmsrs; ++i) if (do_msr(vcpu, entries[i].index, &entries[i].data)) break; srcu_read_unlock(&vcpu->kvm->srcu, idx); return i; } /* * Read or write a bunch of msrs. Parameters are user addresses. * * @return number of msrs set successfully. */ static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs, int (*do_msr)(struct kvm_vcpu *vcpu, unsigned index, u64 *data), int writeback) { struct kvm_msrs msrs; struct kvm_msr_entry *entries; int r, n; unsigned size; r = -EFAULT; if (copy_from_user(&msrs, user_msrs, sizeof msrs)) goto out; r = -E2BIG; if (msrs.nmsrs >= MAX_IO_MSRS) goto out; size = sizeof(struct kvm_msr_entry) * msrs.nmsrs; entries = memdup_user(user_msrs->entries, size); if (IS_ERR(entries)) { r = PTR_ERR(entries); goto out; } r = n = __msr_io(vcpu, &msrs, entries, do_msr); if (r < 0) goto out_free; r = -EFAULT; if (writeback && copy_to_user(user_msrs->entries, entries, size)) goto out_free; r = n; out_free: kfree(entries); out: return r; } int kvm_dev_ioctl_check_extension(long ext) { int r; switch (ext) { case KVM_CAP_IRQCHIP: case KVM_CAP_HLT: case KVM_CAP_MMU_SHADOW_CACHE_CONTROL: case KVM_CAP_SET_TSS_ADDR: case KVM_CAP_EXT_CPUID: case KVM_CAP_EXT_EMUL_CPUID: case KVM_CAP_CLOCKSOURCE: case KVM_CAP_PIT: case KVM_CAP_NOP_IO_DELAY: case KVM_CAP_MP_STATE: case KVM_CAP_SYNC_MMU: case KVM_CAP_USER_NMI: case KVM_CAP_REINJECT_CONTROL: case KVM_CAP_IRQ_INJECT_STATUS: case KVM_CAP_IRQFD: case KVM_CAP_IOEVENTFD: case KVM_CAP_PIT2: case KVM_CAP_PIT_STATE2: case KVM_CAP_SET_IDENTITY_MAP_ADDR: case KVM_CAP_XEN_HVM: case KVM_CAP_ADJUST_CLOCK: case KVM_CAP_VCPU_EVENTS: case KVM_CAP_HYPERV: case KVM_CAP_HYPERV_VAPIC: case KVM_CAP_HYPERV_SPIN: case KVM_CAP_PCI_SEGMENT: case KVM_CAP_DEBUGREGS: case KVM_CAP_X86_ROBUST_SINGLESTEP: case KVM_CAP_XSAVE: case KVM_CAP_ASYNC_PF: case KVM_CAP_GET_TSC_KHZ: case KVM_CAP_KVMCLOCK_CTRL: case KVM_CAP_READONLY_MEM: #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT case KVM_CAP_ASSIGN_DEV_IRQ: case KVM_CAP_PCI_2_3: #endif r = 1; break; case KVM_CAP_COALESCED_MMIO: r = KVM_COALESCED_MMIO_PAGE_OFFSET; break; case KVM_CAP_VAPIC: r = !kvm_x86_ops->cpu_has_accelerated_tpr(); break; case KVM_CAP_NR_VCPUS: r = KVM_SOFT_MAX_VCPUS; break; case KVM_CAP_MAX_VCPUS: r = KVM_MAX_VCPUS; break; case KVM_CAP_NR_MEMSLOTS: r = KVM_USER_MEM_SLOTS; break; case KVM_CAP_PV_MMU: /* obsolete */ r = 0; break; #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT case KVM_CAP_IOMMU: r = iommu_present(&pci_bus_type); break; #endif case KVM_CAP_MCE: r = KVM_MAX_MCE_BANKS; break; case KVM_CAP_XCRS: r = cpu_has_xsave; break; case KVM_CAP_TSC_CONTROL: r = kvm_has_tsc_control; break; case KVM_CAP_TSC_DEADLINE_TIMER: r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER); break; default: r = 0; break; } return r; } long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { void __user *argp = (void __user *)arg; long r; switch (ioctl) { case KVM_GET_MSR_INDEX_LIST: { struct kvm_msr_list __user *user_msr_list = argp; struct kvm_msr_list msr_list; unsigned n; r = -EFAULT; if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list)) goto out; n = msr_list.nmsrs; msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs); if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list)) goto out; r = -E2BIG; if (n < msr_list.nmsrs) goto out; r = -EFAULT; if (copy_to_user(user_msr_list->indices, &msrs_to_save, num_msrs_to_save * sizeof(u32))) goto out; if (copy_to_user(user_msr_list->indices + num_msrs_to_save, &emulated_msrs, ARRAY_SIZE(emulated_msrs) * sizeof(u32))) goto out; r = 0; break; } case KVM_GET_SUPPORTED_CPUID: case KVM_GET_EMULATED_CPUID: { struct kvm_cpuid2 __user *cpuid_arg = argp; struct kvm_cpuid2 cpuid; r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries, ioctl); if (r) goto out; r = -EFAULT; if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) goto out; r = 0; break; } case KVM_X86_GET_MCE_CAP_SUPPORTED: { u64 mce_cap; mce_cap = KVM_MCE_CAP_SUPPORTED; r = -EFAULT; if (copy_to_user(argp, &mce_cap, sizeof mce_cap)) goto out; r = 0; break; } default: r = -EINVAL; } out: return r; } static void wbinvd_ipi(void *garbage) { wbinvd(); } static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu) { return kvm_arch_has_noncoherent_dma(vcpu->kvm); } void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { /* Address WBINVD may be executed by guest */ if (need_emulate_wbinvd(vcpu)) { if (kvm_x86_ops->has_wbinvd_exit()) cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); else if (vcpu->cpu != -1 && vcpu->cpu != cpu) smp_call_function_single(vcpu->cpu, wbinvd_ipi, NULL, 1); } kvm_x86_ops->vcpu_load(vcpu, cpu); /* Apply any externally detected TSC adjustments (due to suspend) */ if (unlikely(vcpu->arch.tsc_offset_adjustment)) { adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); vcpu->arch.tsc_offset_adjustment = 0; set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests); } if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) { s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : native_read_tsc() - vcpu->arch.last_host_tsc; if (tsc_delta < 0) mark_tsc_unstable("KVM discovered backwards TSC"); if (check_tsc_unstable()) { u64 offset = kvm_x86_ops->compute_tsc_offset(vcpu, vcpu->arch.last_guest_tsc); kvm_x86_ops->write_tsc_offset(vcpu, offset); vcpu->arch.tsc_catchup = 1; } /* * On a host with synchronized TSC, there is no need to update * kvmclock on vcpu->cpu migration */ if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); if (vcpu->cpu != cpu) kvm_migrate_timers(vcpu); vcpu->cpu = cpu; } accumulate_steal_time(vcpu); kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { kvm_x86_ops->vcpu_put(vcpu); kvm_put_guest_fpu(vcpu); vcpu->arch.last_host_tsc = native_read_tsc(); } static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) { kvm_x86_ops->sync_pir_to_irr(vcpu); memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s); return 0; } static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) { kvm_apic_post_state_restore(vcpu, s); update_cr8_intercept(vcpu); return 0; } static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) { if (irq->irq >= KVM_NR_INTERRUPTS) return -EINVAL; if (irqchip_in_kernel(vcpu->kvm)) return -ENXIO; kvm_queue_interrupt(vcpu, irq->irq, false); kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu) { kvm_inject_nmi(vcpu); return 0; } static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, struct kvm_tpr_access_ctl *tac) { if (tac->flags) return -EINVAL; vcpu->arch.tpr_access_reporting = !!tac->enabled; return 0; } static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, u64 mcg_cap) { int r; unsigned bank_num = mcg_cap & 0xff, bank; r = -EINVAL; if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS) goto out; if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000)) goto out; r = 0; vcpu->arch.mcg_cap = mcg_cap; /* Init IA32_MCG_CTL to all 1s */ if (mcg_cap & MCG_CTL_P) vcpu->arch.mcg_ctl = ~(u64)0; /* Init IA32_MCi_CTL to all 1s */ for (bank = 0; bank < bank_num; bank++) vcpu->arch.mce_banks[bank*4] = ~(u64)0; out: return r; } static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu, struct kvm_x86_mce *mce) { u64 mcg_cap = vcpu->arch.mcg_cap; unsigned bank_num = mcg_cap & 0xff; u64 *banks = vcpu->arch.mce_banks; if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL)) return -EINVAL; /* * if IA32_MCG_CTL is not all 1s, the uncorrected error * reporting is disabled */ if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) && vcpu->arch.mcg_ctl != ~(u64)0) return 0; banks += 4 * mce->bank; /* * if IA32_MCi_CTL is not all 1s, the uncorrected error * reporting is disabled for the bank */ if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0) return 0; if (mce->status & MCI_STATUS_UC) { if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) { kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); return 0; } if (banks[1] & MCI_STATUS_VAL) mce->status |= MCI_STATUS_OVER; banks[2] = mce->addr; banks[3] = mce->misc; vcpu->arch.mcg_status = mce->mcg_status; banks[1] = mce->status; kvm_queue_exception(vcpu, MC_VECTOR); } else if (!(banks[1] & MCI_STATUS_VAL) || !(banks[1] & MCI_STATUS_UC)) { if (banks[1] & MCI_STATUS_VAL) mce->status |= MCI_STATUS_OVER; banks[2] = mce->addr; banks[3] = mce->misc; banks[1] = mce->status; } else banks[1] |= MCI_STATUS_OVER; return 0; } static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events) { process_nmi(vcpu); events->exception.injected = vcpu->arch.exception.pending && !kvm_exception_is_soft(vcpu->arch.exception.nr); events->exception.nr = vcpu->arch.exception.nr; events->exception.has_error_code = vcpu->arch.exception.has_error_code; events->exception.pad = 0; events->exception.error_code = vcpu->arch.exception.error_code; events->interrupt.injected = vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft; events->interrupt.nr = vcpu->arch.interrupt.nr; events->interrupt.soft = 0; events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI); events->nmi.injected = vcpu->arch.nmi_injected; events->nmi.pending = vcpu->arch.nmi_pending != 0; events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); events->nmi.pad = 0; events->sipi_vector = 0; /* never valid when reporting to user space */ events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SHADOW); memset(&events->reserved, 0, sizeof(events->reserved)); } static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events) { if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR | KVM_VCPUEVENT_VALID_SHADOW)) return -EINVAL; process_nmi(vcpu); vcpu->arch.exception.pending = events->exception.injected; vcpu->arch.exception.nr = events->exception.nr; vcpu->arch.exception.has_error_code = events->exception.has_error_code; vcpu->arch.exception.error_code = events->exception.error_code; vcpu->arch.interrupt.pending = events->interrupt.injected; vcpu->arch.interrupt.nr = events->interrupt.nr; vcpu->arch.interrupt.soft = events->interrupt.soft; if (events->flags & KVM_VCPUEVENT_VALID_SHADOW) kvm_x86_ops->set_interrupt_shadow(vcpu, events->interrupt.shadow); vcpu->arch.nmi_injected = events->nmi.injected; if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) vcpu->arch.nmi_pending = events->nmi.pending; kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked); if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR && kvm_vcpu_has_lapic(vcpu)) vcpu->arch.apic->sipi_vector = events->sipi_vector; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, struct kvm_debugregs *dbgregs) { memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); dbgregs->dr6 = vcpu->arch.dr6; dbgregs->dr7 = vcpu->arch.dr7; dbgregs->flags = 0; memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved)); } static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, struct kvm_debugregs *dbgregs) { if (dbgregs->flags) return -EINVAL; memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); vcpu->arch.dr6 = dbgregs->dr6; vcpu->arch.dr7 = dbgregs->dr7; return 0; } static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, struct kvm_xsave *guest_xsave) { if (cpu_has_xsave) { memcpy(guest_xsave->region, &vcpu->arch.guest_fpu.state->xsave, vcpu->arch.guest_xstate_size); *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] &= vcpu->arch.guest_supported_xcr0 | XSTATE_FPSSE; } else { memcpy(guest_xsave->region, &vcpu->arch.guest_fpu.state->fxsave, sizeof(struct i387_fxsave_struct)); *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] = XSTATE_FPSSE; } } static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, struct kvm_xsave *guest_xsave) { u64 xstate_bv = *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)]; if (cpu_has_xsave) { /* * Here we allow setting states that are not present in * CPUID leaf 0xD, index 0, EDX:EAX. This is for compatibility * with old userspace. */ if (xstate_bv & ~KVM_SUPPORTED_XCR0) return -EINVAL; if (xstate_bv & ~host_xcr0) return -EINVAL; memcpy(&vcpu->arch.guest_fpu.state->xsave, guest_xsave->region, vcpu->arch.guest_xstate_size); } else { if (xstate_bv & ~XSTATE_FPSSE) return -EINVAL; memcpy(&vcpu->arch.guest_fpu.state->fxsave, guest_xsave->region, sizeof(struct i387_fxsave_struct)); } return 0; } static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu, struct kvm_xcrs *guest_xcrs) { if (!cpu_has_xsave) { guest_xcrs->nr_xcrs = 0; return; } guest_xcrs->nr_xcrs = 1; guest_xcrs->flags = 0; guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK; guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; } static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, struct kvm_xcrs *guest_xcrs) { int i, r = 0; if (!cpu_has_xsave) return -EINVAL; if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags) return -EINVAL; for (i = 0; i < guest_xcrs->nr_xcrs; i++) /* Only support XCR0 currently */ if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) { r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK, guest_xcrs->xcrs[i].value); break; } if (r) r = -EINVAL; return r; } /* * kvm_set_guest_paused() indicates to the guest kernel that it has been * stopped by the hypervisor. This function will be called from the host only. * EINVAL is returned when the host attempts to set the flag for a guest that * does not support pv clocks. */ static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) { if (!vcpu->arch.pv_time_enabled) return -EINVAL; vcpu->arch.pvclock_set_guest_stopped_request = true; kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); return 0; } long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = (void __user *)arg; int r; union { struct kvm_lapic_state *lapic; struct kvm_xsave *xsave; struct kvm_xcrs *xcrs; void *buffer; } u; u.buffer = NULL; switch (ioctl) { case KVM_GET_LAPIC: { r = -EINVAL; if (!vcpu->arch.apic) goto out; u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL); r = -ENOMEM; if (!u.lapic) goto out; r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state))) goto out; r = 0; break; } case KVM_SET_LAPIC: { r = -EINVAL; if (!vcpu->arch.apic) goto out; u.lapic = memdup_user(argp, sizeof(*u.lapic)); if (IS_ERR(u.lapic)) return PTR_ERR(u.lapic); r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic); break; } case KVM_INTERRUPT: { struct kvm_interrupt irq; r = -EFAULT; if (copy_from_user(&irq, argp, sizeof irq)) goto out; r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); break; } case KVM_NMI: { r = kvm_vcpu_ioctl_nmi(vcpu); break; } case KVM_SET_CPUID: { struct kvm_cpuid __user *cpuid_arg = argp; struct kvm_cpuid cpuid; r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); break; } case KVM_SET_CPUID2: { struct kvm_cpuid2 __user *cpuid_arg = argp; struct kvm_cpuid2 cpuid; r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid, cpuid_arg->entries); break; } case KVM_GET_CPUID2: { struct kvm_cpuid2 __user *cpuid_arg = argp; struct kvm_cpuid2 cpuid; r = -EFAULT; if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) goto out; r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid, cpuid_arg->entries); if (r) goto out; r = -EFAULT; if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) goto out; r = 0; break; } case KVM_GET_MSRS: r = msr_io(vcpu, argp, kvm_get_msr, 1); break; case KVM_SET_MSRS: r = msr_io(vcpu, argp, do_set_msr, 0); break; case KVM_TPR_ACCESS_REPORTING: { struct kvm_tpr_access_ctl tac; r = -EFAULT; if (copy_from_user(&tac, argp, sizeof tac)) goto out; r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, &tac, sizeof tac)) goto out; r = 0; break; }; case KVM_SET_VAPIC_ADDR: { struct kvm_vapic_addr va; r = -EINVAL; if (!irqchip_in_kernel(vcpu->kvm)) goto out; r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; r = 0; kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { u64 mcg_cap; r = -EFAULT; if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap)) goto out; r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap); break; } case KVM_X86_SET_MCE: { struct kvm_x86_mce mce; r = -EFAULT; if (copy_from_user(&mce, argp, sizeof mce)) goto out; r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); break; } case KVM_GET_VCPU_EVENTS: { struct kvm_vcpu_events events; kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events); r = -EFAULT; if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events))) break; r = 0; break; } case KVM_SET_VCPU_EVENTS: { struct kvm_vcpu_events events; r = -EFAULT; if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events))) break; r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events); break; } case KVM_GET_DEBUGREGS: { struct kvm_debugregs dbgregs; kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs); r = -EFAULT; if (copy_to_user(argp, &dbgregs, sizeof(struct kvm_debugregs))) break; r = 0; break; } case KVM_SET_DEBUGREGS: { struct kvm_debugregs dbgregs; r = -EFAULT; if (copy_from_user(&dbgregs, argp, sizeof(struct kvm_debugregs))) break; r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs); break; } case KVM_GET_XSAVE: { u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL); r = -ENOMEM; if (!u.xsave) break; kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave); r = -EFAULT; if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave))) break; r = 0; break; } case KVM_SET_XSAVE: { u.xsave = memdup_user(argp, sizeof(*u.xsave)); if (IS_ERR(u.xsave)) return PTR_ERR(u.xsave); r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave); break; } case KVM_GET_XCRS: { u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL); r = -ENOMEM; if (!u.xcrs) break; kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs); r = -EFAULT; if (copy_to_user(argp, u.xcrs, sizeof(struct kvm_xcrs))) break; r = 0; break; } case KVM_SET_XCRS: { u.xcrs = memdup_user(argp, sizeof(*u.xcrs)); if (IS_ERR(u.xcrs)) return PTR_ERR(u.xcrs); r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs); break; } case KVM_SET_TSC_KHZ: { u32 user_tsc_khz; r = -EINVAL; user_tsc_khz = (u32)arg; if (user_tsc_khz >= kvm_max_guest_tsc_khz) goto out; if (user_tsc_khz == 0) user_tsc_khz = tsc_khz; kvm_set_tsc_khz(vcpu, user_tsc_khz); r = 0; goto out; } case KVM_GET_TSC_KHZ: { r = vcpu->arch.virtual_tsc_khz; goto out; } case KVM_KVMCLOCK_CTRL: { r = kvm_set_guest_paused(vcpu); goto out; } default: r = -EINVAL; } out: kfree(u.buffer); return r; } int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) { return VM_FAULT_SIGBUS; } static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr) { int ret; if (addr > (unsigned int)(-3 * PAGE_SIZE)) return -EINVAL; ret = kvm_x86_ops->set_tss_addr(kvm, addr); return ret; } static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm, u64 ident_addr) { kvm->arch.ept_identity_map_addr = ident_addr; return 0; } static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, u32 kvm_nr_mmu_pages) { if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) return -EINVAL; mutex_lock(&kvm->slots_lock); kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; mutex_unlock(&kvm->slots_lock); return 0; } static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm) { return kvm->arch.n_max_mmu_pages; } static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) { int r; r = 0; switch (chip->chip_id) { case KVM_IRQCHIP_PIC_MASTER: memcpy(&chip->chip.pic, &pic_irqchip(kvm)->pics[0], sizeof(struct kvm_pic_state)); break; case KVM_IRQCHIP_PIC_SLAVE: memcpy(&chip->chip.pic, &pic_irqchip(kvm)->pics[1], sizeof(struct kvm_pic_state)); break; case KVM_IRQCHIP_IOAPIC: r = kvm_get_ioapic(kvm, &chip->chip.ioapic); break; default: r = -EINVAL; break; } return r; } static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) { int r; r = 0; switch (chip->chip_id) { case KVM_IRQCHIP_PIC_MASTER: spin_lock(&pic_irqchip(kvm)->lock); memcpy(&pic_irqchip(kvm)->pics[0], &chip->chip.pic, sizeof(struct kvm_pic_state)); spin_unlock(&pic_irqchip(kvm)->lock); break; case KVM_IRQCHIP_PIC_SLAVE: spin_lock(&pic_irqchip(kvm)->lock); memcpy(&pic_irqchip(kvm)->pics[1], &chip->chip.pic, sizeof(struct kvm_pic_state)); spin_unlock(&pic_irqchip(kvm)->lock); break; case KVM_IRQCHIP_IOAPIC: r = kvm_set_ioapic(kvm, &chip->chip.ioapic); break; default: r = -EINVAL; break; } kvm_pic_update_irq(pic_irqchip(kvm)); return r; } static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps) { int r = 0; mutex_lock(&kvm->arch.vpit->pit_state.lock); memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state)); mutex_unlock(&kvm->arch.vpit->pit_state.lock); return r; } static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps) { int r = 0; mutex_lock(&kvm->arch.vpit->pit_state.lock); memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state)); kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0); mutex_unlock(&kvm->arch.vpit->pit_state.lock); return r; } static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) { int r = 0; mutex_lock(&kvm->arch.vpit->pit_state.lock); memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels, sizeof(ps->channels)); ps->flags = kvm->arch.vpit->pit_state.flags; mutex_unlock(&kvm->arch.vpit->pit_state.lock); memset(&ps->reserved, 0, sizeof(ps->reserved)); return r; } static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) { int r = 0, start = 0; u32 prev_legacy, cur_legacy; mutex_lock(&kvm->arch.vpit->pit_state.lock); prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY; if (!prev_legacy && cur_legacy) start = 1; memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels, sizeof(kvm->arch.vpit->pit_state.channels)); kvm->arch.vpit->pit_state.flags = ps->flags; kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start); mutex_unlock(&kvm->arch.vpit->pit_state.lock); return r; } static int kvm_vm_ioctl_reinject(struct kvm *kvm, struct kvm_reinject_control *control) { if (!kvm->arch.vpit) return -ENXIO; mutex_lock(&kvm->arch.vpit->pit_state.lock); kvm->arch.vpit->pit_state.reinject = control->pit_reinject; mutex_unlock(&kvm->arch.vpit->pit_state.lock); return 0; } /** * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot * @kvm: kvm instance * @log: slot id and address to which we copy the log * * We need to keep it in mind that VCPU threads can write to the bitmap * concurrently. So, to avoid losing data, we keep the following order for * each bit: * * 1. Take a snapshot of the bit and clear it if needed. * 2. Write protect the corresponding page. * 3. Flush TLB's if needed. * 4. Copy the snapshot to the userspace. * * Between 2 and 3, the guest may write to the page using the remaining TLB * entry. This is not a problem because the page will be reported dirty at * step 4 using the snapshot taken before and step 3 ensures that successive * writes will be logged for the next call. */ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { int r; struct kvm_memory_slot *memslot; unsigned long n, i; unsigned long *dirty_bitmap; unsigned long *dirty_bitmap_buffer; bool is_dirty = false; mutex_lock(&kvm->slots_lock); r = -EINVAL; if (log->slot >= KVM_USER_MEM_SLOTS) goto out; memslot = id_to_memslot(kvm->memslots, log->slot); dirty_bitmap = memslot->dirty_bitmap; r = -ENOENT; if (!dirty_bitmap) goto out; n = kvm_dirty_bitmap_bytes(memslot); dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long); memset(dirty_bitmap_buffer, 0, n); spin_lock(&kvm->mmu_lock); for (i = 0; i < n / sizeof(long); i++) { unsigned long mask; gfn_t offset; if (!dirty_bitmap[i]) continue; is_dirty = true; mask = xchg(&dirty_bitmap[i], 0); dirty_bitmap_buffer[i] = mask; offset = i * BITS_PER_LONG; kvm_mmu_write_protect_pt_masked(kvm, memslot, offset, mask); } if (is_dirty) kvm_flush_remote_tlbs(kvm); spin_unlock(&kvm->mmu_lock); r = -EFAULT; if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) goto out; r = 0; out: mutex_unlock(&kvm->slots_lock); return r; } int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, bool line_status) { if (!irqchip_in_kernel(kvm)) return -ENXIO; irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irq_event->irq, irq_event->level, line_status); return 0; } long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm *kvm = filp->private_data; void __user *argp = (void __user *)arg; int r = -ENOTTY; /* * This union makes it completely explicit to gcc-3.x * that these two variables' stack usage should be * combined, not added together. */ union { struct kvm_pit_state ps; struct kvm_pit_state2 ps2; struct kvm_pit_config pit_config; } u; switch (ioctl) { case KVM_SET_TSS_ADDR: r = kvm_vm_ioctl_set_tss_addr(kvm, arg); break; case KVM_SET_IDENTITY_MAP_ADDR: { u64 ident_addr; r = -EFAULT; if (copy_from_user(&ident_addr, argp, sizeof ident_addr)) goto out; r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr); break; } case KVM_SET_NR_MMU_PAGES: r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg); break; case KVM_GET_NR_MMU_PAGES: r = kvm_vm_ioctl_get_nr_mmu_pages(kvm); break; case KVM_CREATE_IRQCHIP: { struct kvm_pic *vpic; mutex_lock(&kvm->lock); r = -EEXIST; if (kvm->arch.vpic) goto create_irqchip_unlock; r = -EINVAL; if (atomic_read(&kvm->online_vcpus)) goto create_irqchip_unlock; r = -ENOMEM; vpic = kvm_create_pic(kvm); if (vpic) { r = kvm_ioapic_init(kvm); if (r) { mutex_lock(&kvm->slots_lock); kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_master); kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_slave); kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_eclr); mutex_unlock(&kvm->slots_lock); kfree(vpic); goto create_irqchip_unlock; } } else goto create_irqchip_unlock; smp_wmb(); kvm->arch.vpic = vpic; smp_wmb(); r = kvm_setup_default_irq_routing(kvm); if (r) { mutex_lock(&kvm->slots_lock); mutex_lock(&kvm->irq_lock); kvm_ioapic_destroy(kvm); kvm_destroy_pic(kvm); mutex_unlock(&kvm->irq_lock); mutex_unlock(&kvm->slots_lock); } create_irqchip_unlock: mutex_unlock(&kvm->lock); break; } case KVM_CREATE_PIT: u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY; goto create_pit; case KVM_CREATE_PIT2: r = -EFAULT; if (copy_from_user(&u.pit_config, argp, sizeof(struct kvm_pit_config))) goto out; create_pit: mutex_lock(&kvm->slots_lock); r = -EEXIST; if (kvm->arch.vpit) goto create_pit_unlock; r = -ENOMEM; kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags); if (kvm->arch.vpit) r = 0; create_pit_unlock: mutex_unlock(&kvm->slots_lock); break; case KVM_GET_IRQCHIP: { /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ struct kvm_irqchip *chip; chip = memdup_user(argp, sizeof(*chip)); if (IS_ERR(chip)) { r = PTR_ERR(chip); goto out; } r = -ENXIO; if (!irqchip_in_kernel(kvm)) goto get_irqchip_out; r = kvm_vm_ioctl_get_irqchip(kvm, chip); if (r) goto get_irqchip_out; r = -EFAULT; if (copy_to_user(argp, chip, sizeof *chip)) goto get_irqchip_out; r = 0; get_irqchip_out: kfree(chip); break; } case KVM_SET_IRQCHIP: { /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ struct kvm_irqchip *chip; chip = memdup_user(argp, sizeof(*chip)); if (IS_ERR(chip)) { r = PTR_ERR(chip); goto out; } r = -ENXIO; if (!irqchip_in_kernel(kvm)) goto set_irqchip_out; r = kvm_vm_ioctl_set_irqchip(kvm, chip); if (r) goto set_irqchip_out; r = 0; set_irqchip_out: kfree(chip); break; } case KVM_GET_PIT: { r = -EFAULT; if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state))) goto out; r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_get_pit(kvm, &u.ps); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state))) goto out; r = 0; break; } case KVM_SET_PIT: { r = -EFAULT; if (copy_from_user(&u.ps, argp, sizeof u.ps)) goto out; r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_set_pit(kvm, &u.ps); break; } case KVM_GET_PIT2: { r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2); if (r) goto out; r = -EFAULT; if (copy_to_user(argp, &u.ps2, sizeof(u.ps2))) goto out; r = 0; break; } case KVM_SET_PIT2: { r = -EFAULT; if (copy_from_user(&u.ps2, argp, sizeof(u.ps2))) goto out; r = -ENXIO; if (!kvm->arch.vpit) goto out; r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2); break; } case KVM_REINJECT_CONTROL: { struct kvm_reinject_control control; r = -EFAULT; if (copy_from_user(&control, argp, sizeof(control))) goto out; r = kvm_vm_ioctl_reinject(kvm, &control); break; } case KVM_XEN_HVM_CONFIG: { r = -EFAULT; if (copy_from_user(&kvm->arch.xen_hvm_config, argp, sizeof(struct kvm_xen_hvm_config))) goto out; r = -EINVAL; if (kvm->arch.xen_hvm_config.flags) goto out; r = 0; break; } case KVM_SET_CLOCK: { struct kvm_clock_data user_ns; u64 now_ns; s64 delta; r = -EFAULT; if (copy_from_user(&user_ns, argp, sizeof(user_ns))) goto out; r = -EINVAL; if (user_ns.flags) goto out; r = 0; local_irq_disable(); now_ns = get_kernel_ns(); delta = user_ns.clock - now_ns; local_irq_enable(); kvm->arch.kvmclock_offset = delta; kvm_gen_update_masterclock(kvm); break; } case KVM_GET_CLOCK: { struct kvm_clock_data user_ns; u64 now_ns; local_irq_disable(); now_ns = get_kernel_ns(); user_ns.clock = kvm->arch.kvmclock_offset + now_ns; local_irq_enable(); user_ns.flags = 0; memset(&user_ns.pad, 0, sizeof(user_ns.pad)); r = -EFAULT; if (copy_to_user(argp, &user_ns, sizeof(user_ns))) goto out; r = 0; break; } default: ; } out: return r; } static void kvm_init_msr_list(void) { u32 dummy[2]; unsigned i, j; /* skip the first msrs in the list. KVM-specific */ for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) { if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0) continue; if (j < i) msrs_to_save[j] = msrs_to_save[i]; j++; } num_msrs_to_save = j; } static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, const void *v) { int handled = 0; int n; do { n = min(len, 8); if (!(vcpu->arch.apic && !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, n, v)) && kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, n, v)) break; handled += n; addr += n; len -= n; v += n; } while (len); return handled; } static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) { int handled = 0; int n; do { n = min(len, 8); if (!(vcpu->arch.apic && !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, n, v)) && kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, n, v)) break; trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v); handled += n; addr += n; len -= n; v += n; } while (len); return handled; } static void kvm_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { kvm_x86_ops->set_segment(vcpu, var, seg); } void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg) { kvm_x86_ops->get_segment(vcpu, var, seg); } gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access) { gpa_t t_gpa; struct x86_exception exception; BUG_ON(!mmu_is_nested(vcpu)); /* NPT walks are always user-walks */ access |= PFERR_USER_MASK; t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception); return t_gpa; } gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; access |= PFERR_FETCH_MASK; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; access |= PFERR_WRITE_MASK; return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } /* uses this to access any guest's mapped memory without checking CPL */ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, struct x86_exception *exception) { return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception); } static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, struct kvm_vcpu *vcpu, u32 access, struct x86_exception *exception) { void *data = val; int r = X86EMUL_CONTINUE; while (bytes) { gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access, exception); unsigned offset = addr & (PAGE_SIZE-1); unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); int ret; if (gpa == UNMAPPED_GVA) return X86EMUL_PROPAGATE_FAULT; ret = kvm_read_guest(vcpu->kvm, gpa, data, toread); if (ret < 0) { r = X86EMUL_IO_NEEDED; goto out; } bytes -= toread; data += toread; addr += toread; } out: return r; } /* used for instruction fetching */ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access | PFERR_FETCH_MASK, exception); } int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); } EXPORT_SYMBOL_GPL(kvm_read_guest_virt); static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception); } int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); void *data = val; int r = X86EMUL_CONTINUE; while (bytes) { gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, PFERR_WRITE_MASK, exception); unsigned offset = addr & (PAGE_SIZE-1); unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); int ret; if (gpa == UNMAPPED_GVA) return X86EMUL_PROPAGATE_FAULT; ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite); if (ret < 0) { r = X86EMUL_IO_NEEDED; goto out; } bytes -= towrite; data += towrite; addr += towrite; } out: return r; } EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system); static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, gpa_t *gpa, struct x86_exception *exception, bool write) { u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0) | (write ? PFERR_WRITE_MASK : 0); if (vcpu_match_mmio_gva(vcpu, gva) && !permission_fault(vcpu->arch.walk_mmu, vcpu->arch.access, access)) { *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | (gva & (PAGE_SIZE - 1)); trace_vcpu_match_mmio(gva, *gpa, write, false); return 1; } *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); if (*gpa == UNMAPPED_GVA) return -1; /* For APIC access vmexit */ if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) return 1; if (vcpu_match_mmio_gpa(vcpu, *gpa)) { trace_vcpu_match_mmio(gva, *gpa, write, true); return 1; } return 0; } int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, const void *val, int bytes) { int ret; ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes); if (ret < 0) return 0; kvm_mmu_pte_write(vcpu, gpa, val, bytes); return 1; } struct read_write_emulator_ops { int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val, int bytes); int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes); int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val); int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes); bool write; }; static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) { if (vcpu->mmio_read_completed) { trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, vcpu->mmio_fragments[0].gpa, *(u64 *)val); vcpu->mmio_read_completed = 0; return 1; } return 0; } static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes) { return !kvm_read_guest(vcpu->kvm, gpa, val, bytes); } static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes) { return emulator_write_phys(vcpu, gpa, val, bytes); } static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val) { trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val); return vcpu_mmio_write(vcpu, gpa, bytes, val); } static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes) { trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0); return X86EMUL_IO_NEEDED; } static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes) { struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); return X86EMUL_CONTINUE; } static const struct read_write_emulator_ops read_emultor = { .read_write_prepare = read_prepare, .read_write_emulate = read_emulate, .read_write_mmio = vcpu_mmio_read, .read_write_exit_mmio = read_exit_mmio, }; static const struct read_write_emulator_ops write_emultor = { .read_write_emulate = write_emulate, .read_write_mmio = write_mmio, .read_write_exit_mmio = write_exit_mmio, .write = true, }; static int emulator_read_write_onepage(unsigned long addr, void *val, unsigned int bytes, struct x86_exception *exception, struct kvm_vcpu *vcpu, const struct read_write_emulator_ops *ops) { gpa_t gpa; int handled, ret; bool write = ops->write; struct kvm_mmio_fragment *frag; ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write); if (ret < 0) return X86EMUL_PROPAGATE_FAULT; /* For APIC access vmexit */ if (ret) goto mmio; if (ops->read_write_emulate(vcpu, gpa, val, bytes)) return X86EMUL_CONTINUE; mmio: /* * Is this MMIO handled locally? */ handled = ops->read_write_mmio(vcpu, gpa, bytes, val); if (handled == bytes) return X86EMUL_CONTINUE; gpa += handled; bytes -= handled; val += handled; WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS); frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; frag->gpa = gpa; frag->data = val; frag->len = bytes; return X86EMUL_CONTINUE; } int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, struct x86_exception *exception, const struct read_write_emulator_ops *ops) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); gpa_t gpa; int rc; if (ops->read_write_prepare && ops->read_write_prepare(vcpu, val, bytes)) return X86EMUL_CONTINUE; vcpu->mmio_nr_fragments = 0; /* Crossing a page boundary? */ if (((addr + bytes - 1) ^ addr) & PAGE_MASK) { int now; now = -addr & ~PAGE_MASK; rc = emulator_read_write_onepage(addr, val, now, exception, vcpu, ops); if (rc != X86EMUL_CONTINUE) return rc; addr += now; val += now; bytes -= now; } rc = emulator_read_write_onepage(addr, val, bytes, exception, vcpu, ops); if (rc != X86EMUL_CONTINUE) return rc; if (!vcpu->mmio_nr_fragments) return rc; gpa = vcpu->mmio_fragments[0].gpa; vcpu->mmio_needed = 1; vcpu->mmio_cur_fragment = 0; vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len); vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write; vcpu->run->exit_reason = KVM_EXIT_MMIO; vcpu->run->mmio.phys_addr = gpa; return ops->read_write_exit_mmio(vcpu, gpa, val, bytes); } static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt, unsigned long addr, void *val, unsigned int bytes, struct x86_exception *exception) { return emulator_read_write(ctxt, addr, val, bytes, exception, &read_emultor); } int emulator_write_emulated(struct x86_emulate_ctxt *ctxt, unsigned long addr, const void *val, unsigned int bytes, struct x86_exception *exception) { return emulator_read_write(ctxt, addr, (void *)val, bytes, exception, &write_emultor); } #define CMPXCHG_TYPE(t, ptr, old, new) \ (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old)) #ifdef CONFIG_X86_64 # define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new) #else # define CMPXCHG64(ptr, old, new) \ (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old)) #endif static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, unsigned long addr, const void *old, const void *new, unsigned int bytes, struct x86_exception *exception) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); gpa_t gpa; struct page *page; char *kaddr; bool exchanged; /* guests cmpxchg8b have to be emulated atomically */ if (bytes > 8 || (bytes & (bytes - 1))) goto emul_write; gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL); if (gpa == UNMAPPED_GVA || (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) goto emul_write; if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK)) goto emul_write; page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); if (is_error_page(page)) goto emul_write; kaddr = kmap_atomic(page); kaddr += offset_in_page(gpa); switch (bytes) { case 1: exchanged = CMPXCHG_TYPE(u8, kaddr, old, new); break; case 2: exchanged = CMPXCHG_TYPE(u16, kaddr, old, new); break; case 4: exchanged = CMPXCHG_TYPE(u32, kaddr, old, new); break; case 8: exchanged = CMPXCHG64(kaddr, old, new); break; default: BUG(); } kunmap_atomic(kaddr); kvm_release_page_dirty(page); if (!exchanged) return X86EMUL_CMPXCHG_FAILED; kvm_mmu_pte_write(vcpu, gpa, new, bytes); return X86EMUL_CONTINUE; emul_write: printk_once(KERN_WARNING "kvm: emulating exchange as write\n"); return emulator_write_emulated(ctxt, addr, new, bytes, exception); } static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) { /* TODO: String I/O for in kernel device */ int r; if (vcpu->arch.pio.in) r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port, vcpu->arch.pio.size, pd); else r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port, vcpu->arch.pio.size, pd); return r; } static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size, unsigned short port, void *val, unsigned int count, bool in) { trace_kvm_pio(!in, port, size, count); vcpu->arch.pio.port = port; vcpu->arch.pio.in = in; vcpu->arch.pio.count = count; vcpu->arch.pio.size = size; if (!kernel_pio(vcpu, vcpu->arch.pio_data)) { vcpu->arch.pio.count = 0; return 1; } vcpu->run->exit_reason = KVM_EXIT_IO; vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; vcpu->run->io.size = size; vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; vcpu->run->io.count = count; vcpu->run->io.port = port; return 0; } static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt, int size, unsigned short port, void *val, unsigned int count) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); int ret; if (vcpu->arch.pio.count) goto data_avail; ret = emulator_pio_in_out(vcpu, size, port, val, count, true); if (ret) { data_avail: memcpy(val, vcpu->arch.pio_data, size * count); vcpu->arch.pio.count = 0; return 1; } return 0; } static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt, int size, unsigned short port, const void *val, unsigned int count) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); memcpy(vcpu->arch.pio_data, val, size * count); return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false); } static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) { return kvm_x86_ops->get_segment_base(vcpu, seg); } static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address) { kvm_mmu_invlpg(emul_to_vcpu(ctxt), address); } int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) { if (!need_emulate_wbinvd(vcpu)) return X86EMUL_CONTINUE; if (kvm_x86_ops->has_wbinvd_exit()) { int cpu = get_cpu(); cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, wbinvd_ipi, NULL, 1); put_cpu(); cpumask_clear(vcpu->arch.wbinvd_dirty_mask); } else wbinvd(); return X86EMUL_CONTINUE; } EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd); static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt) { kvm_emulate_wbinvd(emul_to_vcpu(ctxt)); } int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest) { return _kvm_get_dr(emul_to_vcpu(ctxt), dr, dest); } int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value) { return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value); } static u64 mk_cr_64(u64 curr_cr, u32 new_val) { return (curr_cr & ~((1ULL << 32) - 1)) | new_val; } static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); unsigned long value; switch (cr) { case 0: value = kvm_read_cr0(vcpu); break; case 2: value = vcpu->arch.cr2; break; case 3: value = kvm_read_cr3(vcpu); break; case 4: value = kvm_read_cr4(vcpu); break; case 8: value = kvm_get_cr8(vcpu); break; default: kvm_err("%s: unexpected cr %u\n", __func__, cr); return 0; } return value; } static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); int res = 0; switch (cr) { case 0: res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val)); break; case 2: vcpu->arch.cr2 = val; break; case 3: res = kvm_set_cr3(vcpu, val); break; case 4: res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val)); break; case 8: res = kvm_set_cr8(vcpu, val); break; default: kvm_err("%s: unexpected cr %u\n", __func__, cr); res = -1; } return res; } static void emulator_set_rflags(struct x86_emulate_ctxt *ctxt, ulong val) { kvm_set_rflags(emul_to_vcpu(ctxt), val); } static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt) { return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt)); } static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt); } static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt); } static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt); } static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) { kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt); } static unsigned long emulator_get_cached_segment_base( struct x86_emulate_ctxt *ctxt, int seg) { return get_segment_base(emul_to_vcpu(ctxt), seg); } static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector, struct desc_struct *desc, u32 *base3, int seg) { struct kvm_segment var; kvm_get_segment(emul_to_vcpu(ctxt), &var, seg); *selector = var.selector; if (var.unusable) { memset(desc, 0, sizeof(*desc)); return false; } if (var.g) var.limit >>= 12; set_desc_limit(desc, var.limit); set_desc_base(desc, (unsigned long)var.base); #ifdef CONFIG_X86_64 if (base3) *base3 = var.base >> 32; #endif desc->type = var.type; desc->s = var.s; desc->dpl = var.dpl; desc->p = var.present; desc->avl = var.avl; desc->l = var.l; desc->d = var.db; desc->g = var.g; return true; } static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector, struct desc_struct *desc, u32 base3, int seg) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); struct kvm_segment var; var.selector = selector; var.base = get_desc_base(desc); #ifdef CONFIG_X86_64 var.base |= ((u64)base3) << 32; #endif var.limit = get_desc_limit(desc); if (desc->g) var.limit = (var.limit << 12) | 0xfff; var.type = desc->type; var.present = desc->p; var.dpl = desc->dpl; var.db = desc->d; var.s = desc->s; var.l = desc->l; var.g = desc->g; var.avl = desc->avl; var.present = desc->p; var.unusable = !var.present; var.padding = 0; kvm_set_segment(vcpu, &var, seg); return; } static int emulator_get_msr(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata) { return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata); } static int emulator_set_msr(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data) { struct msr_data msr; msr.data = data; msr.index = msr_index; msr.host_initiated = false; return kvm_set_msr(emul_to_vcpu(ctxt), &msr); } static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt, u32 pmc, u64 *pdata) { return kvm_pmu_read_pmc(emul_to_vcpu(ctxt), pmc, pdata); } static void emulator_halt(struct x86_emulate_ctxt *ctxt) { emul_to_vcpu(ctxt)->arch.halt_request = 1; } static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt) { preempt_disable(); kvm_load_guest_fpu(emul_to_vcpu(ctxt)); /* * CR0.TS may reference the host fpu state, not the guest fpu state, * so it may be clear at this point. */ clts(); } static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt) { preempt_enable(); } static int emulator_intercept(struct x86_emulate_ctxt *ctxt, struct x86_instruction_info *info, enum x86_intercept_stage stage) { return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage); } static void emulator_get_cpuid(struct x86_emulate_ctxt *ctxt, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx) { kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx); } static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg) { return kvm_register_read(emul_to_vcpu(ctxt), reg); } static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val) { kvm_register_write(emul_to_vcpu(ctxt), reg, val); } static const struct x86_emulate_ops emulate_ops = { .read_gpr = emulator_read_gpr, .write_gpr = emulator_write_gpr, .read_std = kvm_read_guest_virt_system, .write_std = kvm_write_guest_virt_system, .fetch = kvm_fetch_guest_virt, .read_emulated = emulator_read_emulated, .write_emulated = emulator_write_emulated, .cmpxchg_emulated = emulator_cmpxchg_emulated, .invlpg = emulator_invlpg, .pio_in_emulated = emulator_pio_in_emulated, .pio_out_emulated = emulator_pio_out_emulated, .get_segment = emulator_get_segment, .set_segment = emulator_set_segment, .get_cached_segment_base = emulator_get_cached_segment_base, .get_gdt = emulator_get_gdt, .get_idt = emulator_get_idt, .set_gdt = emulator_set_gdt, .set_idt = emulator_set_idt, .get_cr = emulator_get_cr, .set_cr = emulator_set_cr, .set_rflags = emulator_set_rflags, .cpl = emulator_get_cpl, .get_dr = emulator_get_dr, .set_dr = emulator_set_dr, .set_msr = emulator_set_msr, .get_msr = emulator_get_msr, .read_pmc = emulator_read_pmc, .halt = emulator_halt, .wbinvd = emulator_wbinvd, .fix_hypercall = emulator_fix_hypercall, .get_fpu = emulator_get_fpu, .put_fpu = emulator_put_fpu, .intercept = emulator_intercept, .get_cpuid = emulator_get_cpuid, }; static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) { u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, mask); /* * an sti; sti; sequence only disable interrupts for the first * instruction. So, if the last instruction, be it emulated or * not, left the system with the INT_STI flag enabled, it * means that the last instruction is an sti. We should not * leave the flag on in this case. The same goes for mov ss */ if (!(int_shadow & mask)) kvm_x86_ops->set_interrupt_shadow(vcpu, mask); } static void inject_emulated_exception(struct kvm_vcpu *vcpu) { struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; if (ctxt->exception.vector == PF_VECTOR) kvm_propagate_fault(vcpu, &ctxt->exception); else if (ctxt->exception.error_code_valid) kvm_queue_exception_e(vcpu, ctxt->exception.vector, ctxt->exception.error_code); else kvm_queue_exception(vcpu, ctxt->exception.vector); } static void init_decode_cache(struct x86_emulate_ctxt *ctxt) { memset(&ctxt->opcode_len, 0, (void *)&ctxt->_regs - (void *)&ctxt->opcode_len); ctxt->fetch.start = 0; ctxt->fetch.end = 0; ctxt->io_read.pos = 0; ctxt->io_read.end = 0; ctxt->mem_read.pos = 0; ctxt->mem_read.end = 0; } static void init_emulate_ctxt(struct kvm_vcpu *vcpu) { struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; int cs_db, cs_l; kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); ctxt->eflags = kvm_get_rflags(vcpu); ctxt->eip = kvm_rip_read(vcpu); ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : cs_l ? X86EMUL_MODE_PROT64 : cs_db ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; ctxt->guest_mode = is_guest_mode(vcpu); init_decode_cache(ctxt); vcpu->arch.emulate_regs_need_sync_from_vcpu = false; } int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip) { struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; int ret; init_emulate_ctxt(vcpu); ctxt->op_bytes = 2; ctxt->ad_bytes = 2; ctxt->_eip = ctxt->eip + inc_eip; ret = emulate_int_real(ctxt, irq); if (ret != X86EMUL_CONTINUE) return EMULATE_FAIL; ctxt->eip = ctxt->_eip; kvm_rip_write(vcpu, ctxt->eip); kvm_set_rflags(vcpu, ctxt->eflags); if (irq == NMI_VECTOR) vcpu->arch.nmi_pending = 0; else vcpu->arch.interrupt.pending = false; return EMULATE_DONE; } EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt); static int handle_emulation_failure(struct kvm_vcpu *vcpu) { int r = EMULATE_DONE; ++vcpu->stat.insn_emulation_fail; trace_kvm_emulate_insn_failed(vcpu); if (!is_guest_mode(vcpu)) { vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; vcpu->run->internal.ndata = 0; r = EMULATE_FAIL; } kvm_queue_exception(vcpu, UD_VECTOR); return r; } static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2, bool write_fault_to_shadow_pgtable, int emulation_type) { gpa_t gpa = cr2; pfn_t pfn; if (emulation_type & EMULTYPE_NO_REEXECUTE) return false; if (!vcpu->arch.mmu.direct_map) { /* * Write permission should be allowed since only * write access need to be emulated. */ gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL); /* * If the mapping is invalid in guest, let cpu retry * it to generate fault. */ if (gpa == UNMAPPED_GVA) return true; } /* * Do not retry the unhandleable instruction if it faults on the * readonly host memory, otherwise it will goto a infinite loop: * retry instruction -> write #PF -> emulation fail -> retry * instruction -> ... */ pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa)); /* * If the instruction failed on the error pfn, it can not be fixed, * report the error to userspace. */ if (is_error_noslot_pfn(pfn)) return false; kvm_release_pfn_clean(pfn); /* The instructions are well-emulated on direct mmu. */ if (vcpu->arch.mmu.direct_map) { unsigned int indirect_shadow_pages; spin_lock(&vcpu->kvm->mmu_lock); indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; spin_unlock(&vcpu->kvm->mmu_lock); if (indirect_shadow_pages) kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); return true; } /* * if emulation was due to access to shadowed page table * and it failed try to unshadow page and re-enter the * guest to let CPU execute the instruction. */ kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); /* * If the access faults on its page table, it can not * be fixed by unprotecting shadow page and it should * be reported to userspace. */ return !write_fault_to_shadow_pgtable; } static bool retry_instruction(struct x86_emulate_ctxt *ctxt, unsigned long cr2, int emulation_type) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); unsigned long last_retry_eip, last_retry_addr, gpa = cr2; last_retry_eip = vcpu->arch.last_retry_eip; last_retry_addr = vcpu->arch.last_retry_addr; /* * If the emulation is caused by #PF and it is non-page_table * writing instruction, it means the VM-EXIT is caused by shadow * page protected, we can zap the shadow page and retry this * instruction directly. * * Note: if the guest uses a non-page-table modifying instruction * on the PDE that points to the instruction, then we will unmap * the instruction and go to an infinite loop. So, we cache the * last retried eip and the last fault address, if we meet the eip * and the address again, we can break out of the potential infinite * loop. */ vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; if (!(emulation_type & EMULTYPE_RETRY)) return false; if (x86_page_table_writing_insn(ctxt)) return false; if (ctxt->eip == last_retry_eip && last_retry_addr == cr2) return false; vcpu->arch.last_retry_eip = ctxt->eip; vcpu->arch.last_retry_addr = cr2; if (!vcpu->arch.mmu.direct_map) gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL); kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); return true; } static int complete_emulated_mmio(struct kvm_vcpu *vcpu); static int complete_emulated_pio(struct kvm_vcpu *vcpu); static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7, unsigned long *db) { u32 dr6 = 0; int i; u32 enable, rwlen; enable = dr7; rwlen = dr7 >> 16; for (i = 0; i < 4; i++, enable >>= 2, rwlen >>= 4) if ((enable & 3) && (rwlen & 15) == type && db[i] == addr) dr6 |= (1 << i); return dr6; } static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, int *r) { struct kvm_run *kvm_run = vcpu->run; /* * Use the "raw" value to see if TF was passed to the processor. * Note that the new value of the flags has not been saved yet. * * This is correct even for TF set by the guest, because "the * processor will not generate this exception after the instruction * that sets the TF flag". */ unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); if (unlikely(rflags & X86_EFLAGS_TF)) { if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1; kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; kvm_run->debug.arch.exception = DB_VECTOR; kvm_run->exit_reason = KVM_EXIT_DEBUG; *r = EMULATE_USER_EXIT; } else { vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF; /* * "Certain debug exceptions may clear bit 0-3. The * remaining contents of the DR6 register are never * cleared by the processor". */ vcpu->arch.dr6 &= ~15; vcpu->arch.dr6 |= DR6_BS; kvm_queue_exception(vcpu, DB_VECTOR); } } } static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r) { struct kvm_run *kvm_run = vcpu->run; unsigned long eip = vcpu->arch.emulate_ctxt.eip; u32 dr6 = 0; if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { dr6 = kvm_vcpu_check_hw_bp(eip, 0, vcpu->arch.guest_debug_dr7, vcpu->arch.eff_db); if (dr6 != 0) { kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1; kvm_run->debug.arch.pc = kvm_rip_read(vcpu) + get_segment_base(vcpu, VCPU_SREG_CS); kvm_run->debug.arch.exception = DB_VECTOR; kvm_run->exit_reason = KVM_EXIT_DEBUG; *r = EMULATE_USER_EXIT; return true; } } if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK)) { dr6 = kvm_vcpu_check_hw_bp(eip, 0, vcpu->arch.dr7, vcpu->arch.db); if (dr6 != 0) { vcpu->arch.dr6 &= ~15; vcpu->arch.dr6 |= dr6; kvm_queue_exception(vcpu, DB_VECTOR); *r = EMULATE_DONE; return true; } } return false; } int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, int emulation_type, void *insn, int insn_len) { int r; struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; bool writeback = true; bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; /* * Clear write_fault_to_shadow_pgtable here to ensure it is * never reused. */ vcpu->arch.write_fault_to_shadow_pgtable = false; kvm_clear_exception_queue(vcpu); if (!(emulation_type & EMULTYPE_NO_DECODE)) { init_emulate_ctxt(vcpu); /* * We will reenter on the same instruction since * we do not set complete_userspace_io. This does not * handle watchpoints yet, those would be handled in * the emulate_ops. */ if (kvm_vcpu_check_breakpoint(vcpu, &r)) return r; ctxt->interruptibility = 0; ctxt->have_exception = false; ctxt->perm_ok = false; ctxt->ud = emulation_type & EMULTYPE_TRAP_UD; r = x86_decode_insn(ctxt, insn, insn_len); trace_kvm_emulate_insn_start(vcpu); ++vcpu->stat.insn_emulation; if (r != EMULATION_OK) { if (emulation_type & EMULTYPE_TRAP_UD) return EMULATE_FAIL; if (reexecute_instruction(vcpu, cr2, write_fault_to_spt, emulation_type)) return EMULATE_DONE; if (emulation_type & EMULTYPE_SKIP) return EMULATE_FAIL; return handle_emulation_failure(vcpu); } } if (emulation_type & EMULTYPE_SKIP) { kvm_rip_write(vcpu, ctxt->_eip); return EMULATE_DONE; } if (retry_instruction(ctxt, cr2, emulation_type)) return EMULATE_DONE; /* this is needed for vmware backdoor interface to work since it changes registers values during IO operation */ if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { vcpu->arch.emulate_regs_need_sync_from_vcpu = false; emulator_invalidate_register_cache(ctxt); } restart: r = x86_emulate_insn(ctxt); if (r == EMULATION_INTERCEPTED) return EMULATE_DONE; if (r == EMULATION_FAILED) { if (reexecute_instruction(vcpu, cr2, write_fault_to_spt, emulation_type)) return EMULATE_DONE; return handle_emulation_failure(vcpu); } if (ctxt->have_exception) { inject_emulated_exception(vcpu); r = EMULATE_DONE; } else if (vcpu->arch.pio.count) { if (!vcpu->arch.pio.in) { /* FIXME: return into emulator if single-stepping. */ vcpu->arch.pio.count = 0; } else { writeback = false; vcpu->arch.complete_userspace_io = complete_emulated_pio; } r = EMULATE_USER_EXIT; } else if (vcpu->mmio_needed) { if (!vcpu->mmio_is_write) writeback = false; r = EMULATE_USER_EXIT; vcpu->arch.complete_userspace_io = complete_emulated_mmio; } else if (r == EMULATION_RESTART) goto restart; else r = EMULATE_DONE; if (writeback) { toggle_interruptibility(vcpu, ctxt->interruptibility); kvm_make_request(KVM_REQ_EVENT, vcpu); vcpu->arch.emulate_regs_need_sync_to_vcpu = false; kvm_rip_write(vcpu, ctxt->eip); if (r == EMULATE_DONE) kvm_vcpu_check_singlestep(vcpu, &r); kvm_set_rflags(vcpu, ctxt->eflags); } else vcpu->arch.emulate_regs_need_sync_to_vcpu = true; return r; } EXPORT_SYMBOL_GPL(x86_emulate_instruction); int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port) { unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX); int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt, size, port, &val, 1); /* do not return to emulator after return from userspace */ vcpu->arch.pio.count = 0; return ret; } EXPORT_SYMBOL_GPL(kvm_fast_pio_out); static void tsc_bad(void *info) { __this_cpu_write(cpu_tsc_khz, 0); } static void tsc_khz_changed(void *data) { struct cpufreq_freqs *freq = data; unsigned long khz = 0; if (data) khz = freq->new; else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) khz = cpufreq_quick_get(raw_smp_processor_id()); if (!khz) khz = tsc_khz; __this_cpu_write(cpu_tsc_khz, khz); } static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_freqs *freq = data; struct kvm *kvm; struct kvm_vcpu *vcpu; int i, send_ipi = 0; /* * We allow guests to temporarily run on slowing clocks, * provided we notify them after, or to run on accelerating * clocks, provided we notify them before. Thus time never * goes backwards. * * However, we have a problem. We can't atomically update * the frequency of a given CPU from this function; it is * merely a notifier, which can be called from any CPU. * Changing the TSC frequency at arbitrary points in time * requires a recomputation of local variables related to * the TSC for each VCPU. We must flag these local variables * to be updated and be sure the update takes place with the * new frequency before any guests proceed. * * Unfortunately, the combination of hotplug CPU and frequency * change creates an intractable locking scenario; the order * of when these callouts happen is undefined with respect to * CPU hotplug, and they can race with each other. As such, * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is * undefined; you can actually have a CPU frequency change take * place in between the computation of X and the setting of the * variable. To protect against this problem, all updates of * the per_cpu tsc_khz variable are done in an interrupt * protected IPI, and all callers wishing to update the value * must wait for a synchronous IPI to complete (which is trivial * if the caller is on the CPU already). This establishes the * necessary total order on variable updates. * * Note that because a guest time update may take place * anytime after the setting of the VCPU's request bit, the * correct TSC value must be set before the request. However, * to ensure the update actually makes it to any guest which * starts running in hardware virtualization between the set * and the acquisition of the spinlock, we must also ping the * CPU after setting the request bit. * */ if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) return 0; if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) return 0; smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); spin_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) { kvm_for_each_vcpu(i, vcpu, kvm) { if (vcpu->cpu != freq->cpu) continue; kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); if (vcpu->cpu != smp_processor_id()) send_ipi = 1; } } spin_unlock(&kvm_lock); if (freq->old < freq->new && send_ipi) { /* * We upscale the frequency. Must make the guest * doesn't see old kvmclock values while running with * the new frequency, otherwise we risk the guest sees * time go backwards. * * In case we update the frequency for another cpu * (which might be in guest context) send an interrupt * to kick the cpu out of guest context. Next time * guest context is entered kvmclock will be updated, * so the guest will not see stale values. */ smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); } return 0; } static struct notifier_block kvmclock_cpufreq_notifier_block = { .notifier_call = kvmclock_cpufreq_notifier }; static int kvmclock_cpu_notifier(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; switch (action) { case CPU_ONLINE: case CPU_DOWN_FAILED: smp_call_function_single(cpu, tsc_khz_changed, NULL, 1); break; case CPU_DOWN_PREPARE: smp_call_function_single(cpu, tsc_bad, NULL, 1); break; } return NOTIFY_OK; } static struct notifier_block kvmclock_cpu_notifier_block = { .notifier_call = kvmclock_cpu_notifier, .priority = -INT_MAX }; static void kvm_timer_init(void) { int cpu; max_tsc_khz = tsc_khz; register_hotcpu_notifier(&kvmclock_cpu_notifier_block); if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { #ifdef CONFIG_CPU_FREQ struct cpufreq_policy policy; memset(&policy, 0, sizeof(policy)); cpu = get_cpu(); cpufreq_get_policy(&policy, cpu); if (policy.cpuinfo.max_freq) max_tsc_khz = policy.cpuinfo.max_freq; put_cpu(); #endif cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); } pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz); for_each_online_cpu(cpu) smp_call_function_single(cpu, tsc_khz_changed, NULL, 1); } static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu); int kvm_is_in_guest(void) { return __this_cpu_read(current_vcpu) != NULL; } static int kvm_is_user_mode(void) { int user_mode = 3; if (__this_cpu_read(current_vcpu)) user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu)); return user_mode != 0; } static unsigned long kvm_get_guest_ip(void) { unsigned long ip = 0; if (__this_cpu_read(current_vcpu)) ip = kvm_rip_read(__this_cpu_read(current_vcpu)); return ip; } static struct perf_guest_info_callbacks kvm_guest_cbs = { .is_in_guest = kvm_is_in_guest, .is_user_mode = kvm_is_user_mode, .get_guest_ip = kvm_get_guest_ip, }; void kvm_before_handle_nmi(struct kvm_vcpu *vcpu) { __this_cpu_write(current_vcpu, vcpu); } EXPORT_SYMBOL_GPL(kvm_before_handle_nmi); void kvm_after_handle_nmi(struct kvm_vcpu *vcpu) { __this_cpu_write(current_vcpu, NULL); } EXPORT_SYMBOL_GPL(kvm_after_handle_nmi); static void kvm_set_mmio_spte_mask(void) { u64 mask; int maxphyaddr = boot_cpu_data.x86_phys_bits; /* * Set the reserved bits and the present bit of an paging-structure * entry to generate page fault with PFER.RSV = 1. */ /* Mask the reserved physical address bits. */ mask = ((1ull << (51 - maxphyaddr + 1)) - 1) << maxphyaddr; /* Bit 62 is always reserved for 32bit host. */ mask |= 0x3ull << 62; /* Set the present bit. */ mask |= 1ull; #ifdef CONFIG_X86_64 /* * If reserved bit is not supported, clear the present bit to disable * mmio page fault. */ if (maxphyaddr == 52) mask &= ~1ull; #endif kvm_mmu_set_mmio_spte_mask(mask); } #ifdef CONFIG_X86_64 static void pvclock_gtod_update_fn(struct work_struct *work) { struct kvm *kvm; struct kvm_vcpu *vcpu; int i; spin_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) kvm_for_each_vcpu(i, vcpu, kvm) set_bit(KVM_REQ_MASTERCLOCK_UPDATE, &vcpu->requests); atomic_set(&kvm_guest_has_master_clock, 0); spin_unlock(&kvm_lock); } static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn); /* * Notification about pvclock gtod data update. */ static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused, void *priv) { struct pvclock_gtod_data *gtod = &pvclock_gtod_data; struct timekeeper *tk = priv; update_pvclock_gtod(tk); /* disable master clock if host does not trust, or does not * use, TSC clocksource */ if (gtod->clock.vclock_mode != VCLOCK_TSC && atomic_read(&kvm_guest_has_master_clock) != 0) queue_work(system_long_wq, &pvclock_gtod_work); return 0; } static struct notifier_block pvclock_gtod_notifier = { .notifier_call = pvclock_gtod_notify, }; #endif int kvm_arch_init(void *opaque) { int r; struct kvm_x86_ops *ops = opaque; if (kvm_x86_ops) { printk(KERN_ERR "kvm: already loaded the other module\n"); r = -EEXIST; goto out; } if (!ops->cpu_has_kvm_support()) { printk(KERN_ERR "kvm: no hardware support\n"); r = -EOPNOTSUPP; goto out; } if (ops->disabled_by_bios()) { printk(KERN_ERR "kvm: disabled by bios\n"); r = -EOPNOTSUPP; goto out; } r = -ENOMEM; shared_msrs = alloc_percpu(struct kvm_shared_msrs); if (!shared_msrs) { printk(KERN_ERR "kvm: failed to allocate percpu kvm_shared_msrs\n"); goto out; } r = kvm_mmu_module_init(); if (r) goto out_free_percpu; kvm_set_mmio_spte_mask(); kvm_init_msr_list(); kvm_x86_ops = ops; kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, PT_DIRTY_MASK, PT64_NX_MASK, 0); kvm_timer_init(); perf_register_guest_info_callbacks(&kvm_guest_cbs); if (cpu_has_xsave) host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); kvm_lapic_init(); #ifdef CONFIG_X86_64 pvclock_gtod_register_notifier(&pvclock_gtod_notifier); #endif return 0; out_free_percpu: free_percpu(shared_msrs); out: return r; } void kvm_arch_exit(void) { perf_unregister_guest_info_callbacks(&kvm_guest_cbs); if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block); #ifdef CONFIG_X86_64 pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier); #endif kvm_x86_ops = NULL; kvm_mmu_module_exit(); free_percpu(shared_msrs); } int kvm_emulate_halt(struct kvm_vcpu *vcpu) { ++vcpu->stat.halt_exits; if (irqchip_in_kernel(vcpu->kvm)) { vcpu->arch.mp_state = KVM_MP_STATE_HALTED; return 1; } else { vcpu->run->exit_reason = KVM_EXIT_HLT; return 0; } } EXPORT_SYMBOL_GPL(kvm_emulate_halt); int kvm_hv_hypercall(struct kvm_vcpu *vcpu) { u64 param, ingpa, outgpa, ret; uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0; bool fast, longmode; int cs_db, cs_l; /* * hypercall generates UD from non zero cpl and real mode * per HYPER-V spec */ if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) { kvm_queue_exception(vcpu, UD_VECTOR); return 0; } kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); longmode = is_long_mode(vcpu) && cs_l == 1; if (!longmode) { param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) | (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff); ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) | (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff); outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) | (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff); } #ifdef CONFIG_X86_64 else { param = kvm_register_read(vcpu, VCPU_REGS_RCX); ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX); outgpa = kvm_register_read(vcpu, VCPU_REGS_R8); } #endif code = param & 0xffff; fast = (param >> 16) & 0x1; rep_cnt = (param >> 32) & 0xfff; rep_idx = (param >> 48) & 0xfff; trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa); switch (code) { case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT: kvm_vcpu_on_spin(vcpu); break; default: res = HV_STATUS_INVALID_HYPERCALL_CODE; break; } ret = res | (((u64)rep_done & 0xfff) << 32); if (longmode) { kvm_register_write(vcpu, VCPU_REGS_RAX, ret); } else { kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32); kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff); } return 1; } /* * kvm_pv_kick_cpu_op: Kick a vcpu. * * @apicid - apicid of vcpu to be kicked. */ static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid) { struct kvm_lapic_irq lapic_irq; lapic_irq.shorthand = 0; lapic_irq.dest_mode = 0; lapic_irq.dest_id = apicid; lapic_irq.delivery_mode = APIC_DM_REMRD; kvm_irq_delivery_to_apic(kvm, 0, &lapic_irq, NULL); } int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) { unsigned long nr, a0, a1, a2, a3, ret; int r = 1; if (kvm_hv_hypercall_enabled(vcpu->kvm)) return kvm_hv_hypercall(vcpu); nr = kvm_register_read(vcpu, VCPU_REGS_RAX); a0 = kvm_register_read(vcpu, VCPU_REGS_RBX); a1 = kvm_register_read(vcpu, VCPU_REGS_RCX); a2 = kvm_register_read(vcpu, VCPU_REGS_RDX); a3 = kvm_register_read(vcpu, VCPU_REGS_RSI); trace_kvm_hypercall(nr, a0, a1, a2, a3); if (!is_long_mode(vcpu)) { nr &= 0xFFFFFFFF; a0 &= 0xFFFFFFFF; a1 &= 0xFFFFFFFF; a2 &= 0xFFFFFFFF; a3 &= 0xFFFFFFFF; } if (kvm_x86_ops->get_cpl(vcpu) != 0) { ret = -KVM_EPERM; goto out; } switch (nr) { case KVM_HC_VAPIC_POLL_IRQ: ret = 0; break; case KVM_HC_KICK_CPU: kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1); ret = 0; break; default: ret = -KVM_ENOSYS; break; } out: kvm_register_write(vcpu, VCPU_REGS_RAX, ret); ++vcpu->stat.hypercalls; return r; } EXPORT_SYMBOL_GPL(kvm_emulate_hypercall); static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) { struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); char instruction[3]; unsigned long rip = kvm_rip_read(vcpu); kvm_x86_ops->patch_hypercall(vcpu, instruction); return emulator_write_emulated(ctxt, rip, instruction, 3, NULL); } /* * Check if userspace requested an interrupt window, and that the * interrupt window is open. * * No need to exit to userspace if we already have an interrupt queued. */ static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) { return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) && vcpu->run->request_interrupt_window && kvm_arch_interrupt_allowed(vcpu)); } static void post_kvm_run_save(struct kvm_vcpu *vcpu) { struct kvm_run *kvm_run = vcpu->run; kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0; kvm_run->cr8 = kvm_get_cr8(vcpu); kvm_run->apic_base = kvm_get_apic_base(vcpu); if (irqchip_in_kernel(vcpu->kvm)) kvm_run->ready_for_interrupt_injection = 1; else kvm_run->ready_for_interrupt_injection = kvm_arch_interrupt_allowed(vcpu) && !kvm_cpu_has_interrupt(vcpu) && !kvm_event_needs_reinjection(vcpu); } static int vapic_enter(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; struct page *page; if (!apic || !apic->vapic_addr) return 0; page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); if (is_error_page(page)) return -EFAULT; vcpu->arch.apic->vapic_page = page; return 0; } static void vapic_exit(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; int idx; if (!apic || !apic->vapic_addr) return; idx = srcu_read_lock(&vcpu->kvm->srcu); kvm_release_page_dirty(apic->vapic_page); mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); srcu_read_unlock(&vcpu->kvm->srcu, idx); } static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; if (!kvm_x86_ops->update_cr8_intercept) return; if (!vcpu->arch.apic) return; if (!vcpu->arch.apic->vapic_addr) max_irr = kvm_lapic_find_highest_irr(vcpu); else max_irr = -1; if (max_irr != -1) max_irr >>= 4; tpr = kvm_lapic_get_cr8(vcpu); kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr); } static void inject_pending_event(struct kvm_vcpu *vcpu) { /* try to reinject previous events if any */ if (vcpu->arch.exception.pending) { trace_kvm_inj_exception(vcpu->arch.exception.nr, vcpu->arch.exception.has_error_code, vcpu->arch.exception.error_code); kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr, vcpu->arch.exception.has_error_code, vcpu->arch.exception.error_code, vcpu->arch.exception.reinject); return; } if (vcpu->arch.nmi_injected) { kvm_x86_ops->set_nmi(vcpu); return; } if (vcpu->arch.interrupt.pending) { kvm_x86_ops->set_irq(vcpu); return; } /* try to inject new event if pending */ if (vcpu->arch.nmi_pending) { if (kvm_x86_ops->nmi_allowed(vcpu)) { --vcpu->arch.nmi_pending; vcpu->arch.nmi_injected = true; kvm_x86_ops->set_nmi(vcpu); } } else if (kvm_cpu_has_injectable_intr(vcpu)) { if (kvm_x86_ops->interrupt_allowed(vcpu)) { kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), false); kvm_x86_ops->set_irq(vcpu); } } } static void process_nmi(struct kvm_vcpu *vcpu) { unsigned limit = 2; /* * x86 is limited to one NMI running, and one NMI pending after it. * If an NMI is already in progress, limit further NMIs to just one. * Otherwise, allow two (and we'll inject the first one immediately). */ if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected) limit = 1; vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); kvm_make_request(KVM_REQ_EVENT, vcpu); } static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) { u64 eoi_exit_bitmap[4]; u32 tmr[8]; if (!kvm_apic_hw_enabled(vcpu->arch.apic)) return; memset(eoi_exit_bitmap, 0, 32); memset(tmr, 0, 32); kvm_ioapic_scan_entry(vcpu, eoi_exit_bitmap, tmr); kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap); kvm_apic_update_tmr(vcpu, tmr); } static int vcpu_enter_guest(struct kvm_vcpu *vcpu) { int r; bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && vcpu->run->request_interrupt_window; bool req_immediate_exit = false; if (vcpu->requests) { if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) kvm_mmu_unload(vcpu); if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu)) __kvm_migrate_timers(vcpu); if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu)) kvm_gen_update_masterclock(vcpu->kvm); if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu)) kvm_gen_kvmclock_update(vcpu); if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) { r = kvm_guest_time_update(vcpu); if (unlikely(r)) goto out; } if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu)) kvm_mmu_sync_roots(vcpu); if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) kvm_x86_ops->tlb_flush(vcpu); if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; r = 0; goto out; } if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; r = 0; goto out; } if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) { vcpu->fpu_active = 0; kvm_x86_ops->fpu_deactivate(vcpu); } if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) { /* Page is swapped out. Do synthetic halt */ vcpu->arch.apf.halted = true; r = 1; goto out; } if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) record_steal_time(vcpu); if (kvm_check_request(KVM_REQ_NMI, vcpu)) process_nmi(vcpu); if (kvm_check_request(KVM_REQ_PMU, vcpu)) kvm_handle_pmu_event(vcpu); if (kvm_check_request(KVM_REQ_PMI, vcpu)) kvm_deliver_pmi(vcpu); if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu)) vcpu_scan_ioapic(vcpu); } if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) { kvm_apic_accept_events(vcpu); if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { r = 1; goto out; } inject_pending_event(vcpu); /* enable NMI/IRQ window open exits if needed */ if (vcpu->arch.nmi_pending) req_immediate_exit = kvm_x86_ops->enable_nmi_window(vcpu) != 0; else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) req_immediate_exit = kvm_x86_ops->enable_irq_window(vcpu) != 0; if (kvm_lapic_enabled(vcpu)) { /* * Update architecture specific hints for APIC * virtual interrupt delivery. */ if (kvm_x86_ops->hwapic_irr_update) kvm_x86_ops->hwapic_irr_update(vcpu, kvm_lapic_find_highest_irr(vcpu)); update_cr8_intercept(vcpu); kvm_lapic_sync_to_vapic(vcpu); } } r = kvm_mmu_reload(vcpu); if (unlikely(r)) { goto cancel_injection; } preempt_disable(); kvm_x86_ops->prepare_guest_switch(vcpu); if (vcpu->fpu_active) kvm_load_guest_fpu(vcpu); kvm_load_guest_xcr0(vcpu); vcpu->mode = IN_GUEST_MODE; srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); /* We should set ->mode before check ->requests, * see the comment in make_all_cpus_request. */ smp_mb__after_srcu_read_unlock(); local_irq_disable(); if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests || need_resched() || signal_pending(current)) { vcpu->mode = OUTSIDE_GUEST_MODE; smp_wmb(); local_irq_enable(); preempt_enable(); vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); r = 1; goto cancel_injection; } if (req_immediate_exit) smp_send_reschedule(vcpu->cpu); kvm_guest_enter(); if (unlikely(vcpu->arch.switch_db_regs)) { set_debugreg(0, 7); set_debugreg(vcpu->arch.eff_db[0], 0); set_debugreg(vcpu->arch.eff_db[1], 1); set_debugreg(vcpu->arch.eff_db[2], 2); set_debugreg(vcpu->arch.eff_db[3], 3); } trace_kvm_entry(vcpu->vcpu_id); kvm_x86_ops->run(vcpu); /* * If the guest has used debug registers, at least dr7 * will be disabled while returning to the host. * If we don't have active breakpoints in the host, we don't * care about the messed up debug address registers. But if * we have some of them active, restore the old state. */ if (hw_breakpoint_active()) hw_breakpoint_restore(); vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc()); vcpu->mode = OUTSIDE_GUEST_MODE; smp_wmb(); /* Interrupt is enabled by handle_external_intr() */ kvm_x86_ops->handle_external_intr(vcpu); ++vcpu->stat.exits; /* * We must have an instruction between local_irq_enable() and * kvm_guest_exit(), so the timer interrupt isn't delayed by * the interrupt shadow. The stat.exits increment will do nicely. * But we need to prevent reordering, hence this barrier(): */ barrier(); kvm_guest_exit(); preempt_enable(); vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); /* * Profile KVM exit RIPs: */ if (unlikely(prof_on == KVM_PROFILING)) { unsigned long rip = kvm_rip_read(vcpu); profile_hit(KVM_PROFILING, (void *)rip); } if (unlikely(vcpu->arch.tsc_always_catchup)) kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); if (vcpu->arch.apic_attention) kvm_lapic_sync_from_vapic(vcpu); r = kvm_x86_ops->handle_exit(vcpu); return r; cancel_injection: kvm_x86_ops->cancel_injection(vcpu); if (unlikely(vcpu->arch.apic_attention)) kvm_lapic_sync_from_vapic(vcpu); out: return r; } static int __vcpu_run(struct kvm_vcpu *vcpu) { int r; struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); r = vapic_enter(vcpu); if (r) { srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); return r; } r = 1; while (r > 0) { if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && !vcpu->arch.apf.halted) r = vcpu_enter_guest(vcpu); else { srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); kvm_vcpu_block(vcpu); vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { kvm_apic_accept_events(vcpu); switch(vcpu->arch.mp_state) { case KVM_MP_STATE_HALTED: vcpu->arch.pv.pv_unhalted = false; vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; case KVM_MP_STATE_RUNNABLE: vcpu->arch.apf.halted = false; break; case KVM_MP_STATE_INIT_RECEIVED: break; default: r = -EINTR; break; } } } if (r <= 0) break; clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests); if (kvm_cpu_has_pending_timer(vcpu)) kvm_inject_pending_timer_irqs(vcpu); if (dm_request_for_irq_injection(vcpu)) { r = -EINTR; vcpu->run->exit_reason = KVM_EXIT_INTR; ++vcpu->stat.request_irq_exits; } kvm_check_async_pf_completion(vcpu); if (signal_pending(current)) { r = -EINTR; vcpu->run->exit_reason = KVM_EXIT_INTR; ++vcpu->stat.signal_exits; } if (need_resched()) { srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); kvm_resched(vcpu); vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); } } srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); vapic_exit(vcpu); return r; } static inline int complete_emulated_io(struct kvm_vcpu *vcpu) { int r; vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE); srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); if (r != EMULATE_DONE) return 0; return 1; } static int complete_emulated_pio(struct kvm_vcpu *vcpu) { BUG_ON(!vcpu->arch.pio.count); return complete_emulated_io(vcpu); } /* * Implements the following, as a state machine: * * read: * for each fragment * for each mmio piece in the fragment * write gpa, len * exit * copy data * execute insn * * write: * for each fragment * for each mmio piece in the fragment * write gpa, len * copy data * exit */ static int complete_emulated_mmio(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; struct kvm_mmio_fragment *frag; unsigned len; BUG_ON(!vcpu->mmio_needed); /* Complete previous fragment */ frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; len = min(8u, frag->len); if (!vcpu->mmio_is_write) memcpy(frag->data, run->mmio.data, len); if (frag->len <= 8) { /* Switch to the next fragment. */ frag++; vcpu->mmio_cur_fragment++; } else { /* Go forward to the next mmio piece. */ frag->data += len; frag->gpa += len; frag->len -= len; } if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) { vcpu->mmio_needed = 0; /* FIXME: return into emulator if single-stepping. */ if (vcpu->mmio_is_write) return 1; vcpu->mmio_read_completed = 1; return complete_emulated_io(vcpu); } run->exit_reason = KVM_EXIT_MMIO; run->mmio.phys_addr = frag->gpa; if (vcpu->mmio_is_write) memcpy(run->mmio.data, frag->data, min(8u, frag->len)); run->mmio.len = min(8u, frag->len); run->mmio.is_write = vcpu->mmio_is_write; vcpu->arch.complete_userspace_io = complete_emulated_mmio; return 0; } int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) { int r; sigset_t sigsaved; if (!tsk_used_math(current) && init_fpu(current)) return -ENOMEM; if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { kvm_vcpu_block(vcpu); kvm_apic_accept_events(vcpu); clear_bit(KVM_REQ_UNHALT, &vcpu->requests); r = -EAGAIN; goto out; } /* re-sync apic's tpr */ if (!irqchip_in_kernel(vcpu->kvm)) { if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) { r = -EINVAL; goto out; } } if (unlikely(vcpu->arch.complete_userspace_io)) { int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; vcpu->arch.complete_userspace_io = NULL; r = cui(vcpu); if (r <= 0) goto out; } else WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed); r = __vcpu_run(vcpu); out: post_kvm_run_save(vcpu); if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &sigsaved, NULL); return r; } int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { /* * We are here if userspace calls get_regs() in the middle of * instruction emulation. Registers state needs to be copied * back from emulation context to vcpu. Userspace shouldn't do * that usually, but some bad designed PV devices (vmware * backdoor interface) need this to work */ emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt); vcpu->arch.emulate_regs_need_sync_to_vcpu = false; } regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX); regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX); regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX); regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX); regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI); regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI); regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP); #ifdef CONFIG_X86_64 regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8); regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9); regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10); regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11); regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12); regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13); regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14); regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15); #endif regs->rip = kvm_rip_read(vcpu); regs->rflags = kvm_get_rflags(vcpu); return 0; } int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { vcpu->arch.emulate_regs_need_sync_from_vcpu = true; vcpu->arch.emulate_regs_need_sync_to_vcpu = false; kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax); kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx); kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx); kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx); kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi); kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi); kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp); kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp); #ifdef CONFIG_X86_64 kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8); kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9); kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10); kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11); kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12); kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13); kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14); kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15); #endif kvm_rip_write(vcpu, regs->rip); kvm_set_rflags(vcpu, regs->rflags); vcpu->arch.exception.pending = false; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) { struct kvm_segment cs; kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); *db = cs.db; *l = cs.l; } EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits); int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { struct desc_ptr dt; kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS); kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS); kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS); kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); kvm_x86_ops->get_idt(vcpu, &dt); sregs->idt.limit = dt.size; sregs->idt.base = dt.address; kvm_x86_ops->get_gdt(vcpu, &dt); sregs->gdt.limit = dt.size; sregs->gdt.base = dt.address; sregs->cr0 = kvm_read_cr0(vcpu); sregs->cr2 = vcpu->arch.cr2; sregs->cr3 = kvm_read_cr3(vcpu); sregs->cr4 = kvm_read_cr4(vcpu); sregs->cr8 = kvm_get_cr8(vcpu); sregs->efer = vcpu->arch.efer; sregs->apic_base = kvm_get_apic_base(vcpu); memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap); if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft) set_bit(vcpu->arch.interrupt.nr, (unsigned long *)sregs->interrupt_bitmap); return 0; } int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { kvm_apic_accept_events(vcpu); if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED && vcpu->arch.pv.pv_unhalted) mp_state->mp_state = KVM_MP_STATE_RUNNABLE; else mp_state->mp_state = vcpu->arch.mp_state; return 0; } int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { if (!kvm_vcpu_has_lapic(vcpu) && mp_state->mp_state != KVM_MP_STATE_RUNNABLE) return -EINVAL; if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) { vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); } else vcpu->arch.mp_state = mp_state->mp_state; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, int reason, bool has_error_code, u32 error_code) { struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; int ret; init_emulate_ctxt(vcpu); ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason, has_error_code, error_code); if (ret) return EMULATE_FAIL; kvm_rip_write(vcpu, ctxt->eip); kvm_set_rflags(vcpu, ctxt->eflags); kvm_make_request(KVM_REQ_EVENT, vcpu); return EMULATE_DONE; } EXPORT_SYMBOL_GPL(kvm_task_switch); int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { int mmu_reset_needed = 0; int pending_vec, max_bits, idx; struct desc_ptr dt; if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE)) return -EINVAL; dt.size = sregs->idt.limit; dt.address = sregs->idt.base; kvm_x86_ops->set_idt(vcpu, &dt); dt.size = sregs->gdt.limit; dt.address = sregs->gdt.base; kvm_x86_ops->set_gdt(vcpu, &dt); vcpu->arch.cr2 = sregs->cr2; mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; vcpu->arch.cr3 = sregs->cr3; __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); kvm_set_cr8(vcpu, sregs->cr8); mmu_reset_needed |= vcpu->arch.efer != sregs->efer; kvm_x86_ops->set_efer(vcpu, sregs->efer); kvm_set_apic_base(vcpu, sregs->apic_base); mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; kvm_x86_ops->set_cr0(vcpu, sregs->cr0); vcpu->arch.cr0 = sregs->cr0; mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; kvm_x86_ops->set_cr4(vcpu, sregs->cr4); if (sregs->cr4 & X86_CR4_OSXSAVE) kvm_update_cpuid(vcpu); idx = srcu_read_lock(&vcpu->kvm->srcu); if (!is_long_mode(vcpu) && is_pae(vcpu)) { load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); mmu_reset_needed = 1; } srcu_read_unlock(&vcpu->kvm->srcu, idx); if (mmu_reset_needed) kvm_mmu_reset_context(vcpu); max_bits = KVM_NR_INTERRUPTS; pending_vec = find_first_bit( (const unsigned long *)sregs->interrupt_bitmap, max_bits); if (pending_vec < max_bits) { kvm_queue_interrupt(vcpu, pending_vec, false); pr_debug("Set back pending irq %d\n", pending_vec); } kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES); kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); update_cr8_intercept(vcpu); /* Older userspace won't unhalt the vcpu on reset. */ if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 && sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && !is_protmode(vcpu)) vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; kvm_make_request(KVM_REQ_EVENT, vcpu); return 0; } int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { unsigned long rflags; int i, r; if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) { r = -EBUSY; if (vcpu->arch.exception.pending) goto out; if (dbg->control & KVM_GUESTDBG_INJECT_DB) kvm_queue_exception(vcpu, DB_VECTOR); else kvm_queue_exception(vcpu, BP_VECTOR); } /* * Read rflags as long as potentially injected trace flags are still * filtered out. */ rflags = kvm_get_rflags(vcpu); vcpu->guest_debug = dbg->control; if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) vcpu->guest_debug = 0; if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { for (i = 0; i < KVM_NR_DB_REGS; ++i) vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7]; } else { for (i = 0; i < KVM_NR_DB_REGS; i++) vcpu->arch.eff_db[i] = vcpu->arch.db[i]; } kvm_update_dr7(vcpu); if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) + get_segment_base(vcpu, VCPU_SREG_CS); /* * Trigger an rflags update that will inject or remove the trace * flags. */ kvm_set_rflags(vcpu, rflags); kvm_x86_ops->update_db_bp_intercept(vcpu); r = 0; out: return r; } /* * Translate a guest virtual address to a guest physical address. */ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) { unsigned long vaddr = tr->linear_address; gpa_t gpa; int idx; idx = srcu_read_lock(&vcpu->kvm->srcu); gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL); srcu_read_unlock(&vcpu->kvm->srcu, idx); tr->physical_address = gpa; tr->valid = gpa != UNMAPPED_GVA; tr->writeable = 1; tr->usermode = 0; return 0; } int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { struct i387_fxsave_struct *fxsave = &vcpu->arch.guest_fpu.state->fxsave; memcpy(fpu->fpr, fxsave->st_space, 128); fpu->fcw = fxsave->cwd; fpu->fsw = fxsave->swd; fpu->ftwx = fxsave->twd; fpu->last_opcode = fxsave->fop; fpu->last_ip = fxsave->rip; fpu->last_dp = fxsave->rdp; memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space); return 0; } int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { struct i387_fxsave_struct *fxsave = &vcpu->arch.guest_fpu.state->fxsave; memcpy(fxsave->st_space, fpu->fpr, 128); fxsave->cwd = fpu->fcw; fxsave->swd = fpu->fsw; fxsave->twd = fpu->ftwx; fxsave->fop = fpu->last_opcode; fxsave->rip = fpu->last_ip; fxsave->rdp = fpu->last_dp; memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space); return 0; } int fx_init(struct kvm_vcpu *vcpu) { int err; err = fpu_alloc(&vcpu->arch.guest_fpu); if (err) return err; fpu_finit(&vcpu->arch.guest_fpu); /* * Ensure guest xcr0 is valid for loading */ vcpu->arch.xcr0 = XSTATE_FP; vcpu->arch.cr0 |= X86_CR0_ET; return 0; } EXPORT_SYMBOL_GPL(fx_init); static void fx_free(struct kvm_vcpu *vcpu) { fpu_free(&vcpu->arch.guest_fpu); } void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) { if (vcpu->guest_fpu_loaded) return; /* * Restore all possible states in the guest, * and assume host would use all available bits. * Guest xcr0 would be loaded later. */ kvm_put_guest_xcr0(vcpu); vcpu->guest_fpu_loaded = 1; __kernel_fpu_begin(); fpu_restore_checking(&vcpu->arch.guest_fpu); trace_kvm_fpu(1); } void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) { kvm_put_guest_xcr0(vcpu); if (!vcpu->guest_fpu_loaded) return; vcpu->guest_fpu_loaded = 0; fpu_save_init(&vcpu->arch.guest_fpu); __kernel_fpu_end(); ++vcpu->stat.fpu_reload; kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu); trace_kvm_fpu(0); } void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) { kvmclock_reset(vcpu); free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); fx_free(vcpu); kvm_x86_ops->vcpu_free(vcpu); } struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0) printk_once(KERN_WARNING "kvm: SMP vm created on host with unstable TSC; " "guest TSC will not be reliable\n"); return kvm_x86_ops->vcpu_create(kvm, id); } int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { int r; vcpu->arch.mtrr_state.have_fixed = 1; r = vcpu_load(vcpu); if (r) return r; kvm_vcpu_reset(vcpu); kvm_mmu_setup(vcpu); vcpu_put(vcpu); return r; } int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) { int r; struct msr_data msr; r = vcpu_load(vcpu); if (r) return r; msr.data = 0x0; msr.index = MSR_IA32_TSC; msr.host_initiated = true; kvm_write_tsc(vcpu, &msr); vcpu_put(vcpu); return r; } void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { int r; vcpu->arch.apf.msr_val = 0; r = vcpu_load(vcpu); BUG_ON(r); kvm_mmu_unload(vcpu); vcpu_put(vcpu); fx_free(vcpu); kvm_x86_ops->vcpu_free(vcpu); } void kvm_vcpu_reset(struct kvm_vcpu *vcpu) { atomic_set(&vcpu->arch.nmi_queued, 0); vcpu->arch.nmi_pending = 0; vcpu->arch.nmi_injected = false; memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); vcpu->arch.dr6 = DR6_FIXED_1; vcpu->arch.dr7 = DR7_FIXED_1; kvm_update_dr7(vcpu); kvm_make_request(KVM_REQ_EVENT, vcpu); vcpu->arch.apf.msr_val = 0; vcpu->arch.st.msr_val = 0; kvmclock_reset(vcpu); kvm_clear_async_pf_completion_queue(vcpu); kvm_async_pf_hash_reset(vcpu); vcpu->arch.apf.halted = false; kvm_pmu_reset(vcpu); memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); vcpu->arch.regs_avail = ~0; vcpu->arch.regs_dirty = ~0; kvm_x86_ops->vcpu_reset(vcpu); } void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, unsigned int vector) { struct kvm_segment cs; kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); cs.selector = vector << 8; cs.base = vector << 12; kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); kvm_rip_write(vcpu, 0); } int kvm_arch_hardware_enable(void *garbage) { struct kvm *kvm; struct kvm_vcpu *vcpu; int i; int ret; u64 local_tsc; u64 max_tsc = 0; bool stable, backwards_tsc = false; kvm_shared_msr_cpu_online(); ret = kvm_x86_ops->hardware_enable(garbage); if (ret != 0) return ret; local_tsc = native_read_tsc(); stable = !check_tsc_unstable(); list_for_each_entry(kvm, &vm_list, vm_list) { kvm_for_each_vcpu(i, vcpu, kvm) { if (!stable && vcpu->cpu == smp_processor_id()) set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests); if (stable && vcpu->arch.last_host_tsc > local_tsc) { backwards_tsc = true; if (vcpu->arch.last_host_tsc > max_tsc) max_tsc = vcpu->arch.last_host_tsc; } } } /* * Sometimes, even reliable TSCs go backwards. This happens on * platforms that reset TSC during suspend or hibernate actions, but * maintain synchronization. We must compensate. Fortunately, we can * detect that condition here, which happens early in CPU bringup, * before any KVM threads can be running. Unfortunately, we can't * bring the TSCs fully up to date with real time, as we aren't yet far * enough into CPU bringup that we know how much real time has actually * elapsed; our helper function, get_kernel_ns() will be using boot * variables that haven't been updated yet. * * So we simply find the maximum observed TSC above, then record the * adjustment to TSC in each VCPU. When the VCPU later gets loaded, * the adjustment will be applied. Note that we accumulate * adjustments, in case multiple suspend cycles happen before some VCPU * gets a chance to run again. In the event that no KVM threads get a * chance to run, we will miss the entire elapsed period, as we'll have * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may * loose cycle time. This isn't too big a deal, since the loss will be * uniform across all VCPUs (not to mention the scenario is extremely * unlikely). It is possible that a second hibernate recovery happens * much faster than a first, causing the observed TSC here to be * smaller; this would require additional padding adjustment, which is * why we set last_host_tsc to the local tsc observed here. * * N.B. - this code below runs only on platforms with reliable TSC, * as that is the only way backwards_tsc is set above. Also note * that this runs for ALL vcpus, which is not a bug; all VCPUs should * have the same delta_cyc adjustment applied if backwards_tsc * is detected. Note further, this adjustment is only done once, * as we reset last_host_tsc on all VCPUs to stop this from being * called multiple times (one for each physical CPU bringup). * * Platforms with unreliable TSCs don't have to deal with this, they * will be compensated by the logic in vcpu_load, which sets the TSC to * catchup mode. This will catchup all VCPUs to real time, but cannot * guarantee that they stay in perfect synchronization. */ if (backwards_tsc) { u64 delta_cyc = max_tsc - local_tsc; list_for_each_entry(kvm, &vm_list, vm_list) { kvm_for_each_vcpu(i, vcpu, kvm) { vcpu->arch.tsc_offset_adjustment += delta_cyc; vcpu->arch.last_host_tsc = local_tsc; set_bit(KVM_REQ_MASTERCLOCK_UPDATE, &vcpu->requests); } /* * We have to disable TSC offset matching.. if you were * booting a VM while issuing an S4 host suspend.... * you may have some problem. Solving this issue is * left as an exercise to the reader. */ kvm->arch.last_tsc_nsec = 0; kvm->arch.last_tsc_write = 0; } } return 0; } void kvm_arch_hardware_disable(void *garbage) { kvm_x86_ops->hardware_disable(garbage); drop_user_return_notifiers(garbage); } int kvm_arch_hardware_setup(void) { return kvm_x86_ops->hardware_setup(); } void kvm_arch_hardware_unsetup(void) { kvm_x86_ops->hardware_unsetup(); } void kvm_arch_check_processor_compat(void *rtn) { kvm_x86_ops->check_processor_compatibility(rtn); } bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL); } struct static_key kvm_no_apic_vcpu __read_mostly; int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) { struct page *page; struct kvm *kvm; int r; BUG_ON(vcpu->kvm == NULL); kvm = vcpu->kvm; vcpu->arch.pv.pv_unhalted = false; vcpu->arch.emulate_ctxt.ops = &emulate_ops; if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu)) vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; else vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!page) { r = -ENOMEM; goto fail; } vcpu->arch.pio_data = page_address(page); kvm_set_tsc_khz(vcpu, max_tsc_khz); r = kvm_mmu_create(vcpu); if (r < 0) goto fail_free_pio_data; if (irqchip_in_kernel(kvm)) { r = kvm_create_lapic(vcpu); if (r < 0) goto fail_mmu_destroy; } else static_key_slow_inc(&kvm_no_apic_vcpu); vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4, GFP_KERNEL); if (!vcpu->arch.mce_banks) { r = -ENOMEM; goto fail_free_lapic; } vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) { r = -ENOMEM; goto fail_free_mce_banks; } r = fx_init(vcpu); if (r) goto fail_free_wbinvd_dirty_mask; vcpu->arch.ia32_tsc_adjust_msr = 0x0; vcpu->arch.pv_time_enabled = false; vcpu->arch.guest_supported_xcr0 = 0; vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; kvm_async_pf_hash_reset(vcpu); kvm_pmu_init(vcpu); return 0; fail_free_wbinvd_dirty_mask: free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); fail_free_mce_banks: kfree(vcpu->arch.mce_banks); fail_free_lapic: kvm_free_lapic(vcpu); fail_mmu_destroy: kvm_mmu_destroy(vcpu); fail_free_pio_data: free_page((unsigned long)vcpu->arch.pio_data); fail: return r; } void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) { int idx; kvm_pmu_destroy(vcpu); kfree(vcpu->arch.mce_banks); kvm_free_lapic(vcpu); idx = srcu_read_lock(&vcpu->kvm->srcu); kvm_mmu_destroy(vcpu); srcu_read_unlock(&vcpu->kvm->srcu, idx); free_page((unsigned long)vcpu->arch.pio_data); if (!irqchip_in_kernel(vcpu->kvm)) static_key_slow_dec(&kvm_no_apic_vcpu); } int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { if (type) return -EINVAL; INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages); INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); atomic_set(&kvm->arch.noncoherent_dma_count, 0); /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */ set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); /* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */ set_bit(KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); raw_spin_lock_init(&kvm->arch.tsc_write_lock); mutex_init(&kvm->arch.apic_map_lock); spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock); pvclock_update_vm_gtod_copy(kvm); return 0; } static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) { int r; r = vcpu_load(vcpu); BUG_ON(r); kvm_mmu_unload(vcpu); vcpu_put(vcpu); } static void kvm_free_vcpus(struct kvm *kvm) { unsigned int i; struct kvm_vcpu *vcpu; /* * Unpin any mmu pages first. */ kvm_for_each_vcpu(i, vcpu, kvm) { kvm_clear_async_pf_completion_queue(vcpu); kvm_unload_vcpu_mmu(vcpu); } kvm_for_each_vcpu(i, vcpu, kvm) kvm_arch_vcpu_free(vcpu); mutex_lock(&kvm->lock); for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) kvm->vcpus[i] = NULL; atomic_set(&kvm->online_vcpus, 0); mutex_unlock(&kvm->lock); } void kvm_arch_sync_events(struct kvm *kvm) { kvm_free_all_assigned_devices(kvm); kvm_free_pit(kvm); } void kvm_arch_destroy_vm(struct kvm *kvm) { if (current->mm == kvm->mm) { /* * Free memory regions allocated on behalf of userspace, * unless the the memory map has changed due to process exit * or fd copying. */ struct kvm_userspace_memory_region mem; memset(&mem, 0, sizeof(mem)); mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; kvm_set_memory_region(kvm, &mem); mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT; kvm_set_memory_region(kvm, &mem); mem.slot = TSS_PRIVATE_MEMSLOT; kvm_set_memory_region(kvm, &mem); } kvm_iommu_unmap_guest(kvm); kfree(kvm->arch.vpic); kfree(kvm->arch.vioapic); kvm_free_vcpus(kvm); if (kvm->arch.apic_access_page) put_page(kvm->arch.apic_access_page); if (kvm->arch.ept_identity_pagetable) put_page(kvm->arch.ept_identity_pagetable); kfree(rcu_dereference_check(kvm->arch.apic_map, 1)); } void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, struct kvm_memory_slot *dont) { int i; for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) { kvm_kvfree(free->arch.rmap[i]); free->arch.rmap[i] = NULL; } if (i == 0) continue; if (!dont || free->arch.lpage_info[i - 1] != dont->arch.lpage_info[i - 1]) { kvm_kvfree(free->arch.lpage_info[i - 1]); free->arch.lpage_info[i - 1] = NULL; } } } int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, unsigned long npages) { int i; for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { unsigned long ugfn; int lpages; int level = i + 1; lpages = gfn_to_index(slot->base_gfn + npages - 1, slot->base_gfn, level) + 1; slot->arch.rmap[i] = kvm_kvzalloc(lpages * sizeof(*slot->arch.rmap[i])); if (!slot->arch.rmap[i]) goto out_free; if (i == 0) continue; slot->arch.lpage_info[i - 1] = kvm_kvzalloc(lpages * sizeof(*slot->arch.lpage_info[i - 1])); if (!slot->arch.lpage_info[i - 1]) goto out_free; if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) slot->arch.lpage_info[i - 1][0].write_count = 1; if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) slot->arch.lpage_info[i - 1][lpages - 1].write_count = 1; ugfn = slot->userspace_addr >> PAGE_SHIFT; /* * If the gfn and userspace address are not aligned wrt each * other, or if explicitly asked to, disable large page * support for this slot */ if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) || !kvm_largepages_enabled()) { unsigned long j; for (j = 0; j < lpages; ++j) slot->arch.lpage_info[i - 1][j].write_count = 1; } } return 0; out_free: for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) { kvm_kvfree(slot->arch.rmap[i]); slot->arch.rmap[i] = NULL; if (i == 0) continue; kvm_kvfree(slot->arch.lpage_info[i - 1]); slot->arch.lpage_info[i - 1] = NULL; } return -ENOMEM; } void kvm_arch_memslots_updated(struct kvm *kvm) { /* * memslots->generation has been incremented. * mmio generation may have reached its maximum value. */ kvm_mmu_invalidate_mmio_sptes(kvm); } int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_userspace_memory_region *mem, enum kvm_mr_change change) { /* * Only private memory slots need to be mapped here since * KVM_SET_MEMORY_REGION ioctl is no longer supported. */ if ((memslot->id >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_CREATE)) { unsigned long userspace_addr; /* * MAP_SHARED to prevent internal slot pages from being moved * by fork()/COW. */ userspace_addr = vm_mmap(NULL, 0, memslot->npages * PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0); if (IS_ERR((void *)userspace_addr)) return PTR_ERR((void *)userspace_addr); memslot->userspace_addr = userspace_addr; } return 0; } void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, enum kvm_mr_change change) { int nr_mmu_pages = 0; if ((mem->slot >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_DELETE)) { int ret; ret = vm_munmap(old->userspace_addr, old->npages * PAGE_SIZE); if (ret < 0) printk(KERN_WARNING "kvm_vm_ioctl_set_memory_region: " "failed to munmap memory\n"); } if (!kvm->arch.n_requested_mmu_pages) nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); if (nr_mmu_pages) kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); /* * Write protect all pages for dirty logging. * Existing largepage mappings are destroyed here and new ones will * not be created until the end of the logging. */ if ((change != KVM_MR_DELETE) && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES)) kvm_mmu_slot_remove_write_access(kvm, mem->slot); } void kvm_arch_flush_shadow_all(struct kvm *kvm) { kvm_mmu_invalidate_zap_all_pages(kvm); } void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) { kvm_mmu_invalidate_zap_all_pages(kvm); } int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) { return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && !vcpu->arch.apf.halted) || !list_empty_careful(&vcpu->async_pf.done) || kvm_apic_has_events(vcpu) || vcpu->arch.pv.pv_unhalted || atomic_read(&vcpu->arch.nmi_queued) || (kvm_arch_interrupt_allowed(vcpu) && kvm_cpu_has_interrupt(vcpu)); } int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) { return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; } int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) { return kvm_x86_ops->interrupt_allowed(vcpu); } bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip) { unsigned long current_rip = kvm_rip_read(vcpu) + get_segment_base(vcpu, VCPU_SREG_CS); return current_rip == linear_rip; } EXPORT_SYMBOL_GPL(kvm_is_linear_rip); unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) { unsigned long rflags; rflags = kvm_x86_ops->get_rflags(vcpu); if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) rflags &= ~X86_EFLAGS_TF; return rflags; } EXPORT_SYMBOL_GPL(kvm_get_rflags); void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) { if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) rflags |= X86_EFLAGS_TF; kvm_x86_ops->set_rflags(vcpu, rflags); kvm_make_request(KVM_REQ_EVENT, vcpu); } EXPORT_SYMBOL_GPL(kvm_set_rflags); void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) { int r; if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) || work->wakeup_all) return; r = kvm_mmu_reload(vcpu); if (unlikely(r)) return; if (!vcpu->arch.mmu.direct_map && work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu)) return; vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true); } static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) { return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU)); } static inline u32 kvm_async_pf_next_probe(u32 key) { return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1); } static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) { u32 key = kvm_async_pf_hash_fn(gfn); while (vcpu->arch.apf.gfns[key] != ~0) key = kvm_async_pf_next_probe(key); vcpu->arch.apf.gfns[key] = gfn; } static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn) { int i; u32 key = kvm_async_pf_hash_fn(gfn); for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) && (vcpu->arch.apf.gfns[key] != gfn && vcpu->arch.apf.gfns[key] != ~0); i++) key = kvm_async_pf_next_probe(key); return key; } bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) { return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; } static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) { u32 i, j, k; i = j = kvm_async_pf_gfn_slot(vcpu, gfn); while (true) { vcpu->arch.apf.gfns[i] = ~0; do { j = kvm_async_pf_next_probe(j); if (vcpu->arch.apf.gfns[j] == ~0) return; k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); /* * k lies cyclically in ]i,j] * | i.k.j | * |....j i.k.| or |.k..j i...| */ } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j)); vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; i = j; } } static int apf_put_user(struct kvm_vcpu *vcpu, u32 val) { return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val, sizeof(val)); } void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) { struct x86_exception fault; trace_kvm_async_pf_not_present(work->arch.token, work->gva); kvm_add_async_pf_gfn(vcpu, work->arch.gfn); if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) || (vcpu->arch.apf.send_user_only && kvm_x86_ops->get_cpl(vcpu) == 0)) kvm_make_request(KVM_REQ_APF_HALT, vcpu); else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) { fault.vector = PF_VECTOR; fault.error_code_valid = true; fault.error_code = 0; fault.nested_page_fault = false; fault.address = work->arch.token; kvm_inject_page_fault(vcpu, &fault); } } void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) { struct x86_exception fault; trace_kvm_async_pf_ready(work->arch.token, work->gva); if (work->wakeup_all) work->arch.token = ~0; /* broadcast wakeup */ else kvm_del_async_pf_gfn(vcpu, work->arch.gfn); if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { fault.vector = PF_VECTOR; fault.error_code_valid = true; fault.error_code = 0; fault.nested_page_fault = false; fault.address = work->arch.token; kvm_inject_page_fault(vcpu, &fault); } vcpu->arch.apf.halted = false; vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; } bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) { if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED)) return true; else return !kvm_event_needs_reinjection(vcpu) && kvm_x86_ops->interrupt_allowed(vcpu); } void kvm_arch_register_noncoherent_dma(struct kvm *kvm) { atomic_inc(&kvm->arch.noncoherent_dma_count); } EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma); void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) { atomic_dec(&kvm->arch.noncoherent_dma_count); } EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma); bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) { return atomic_read(&kvm->arch.noncoherent_dma_count); } EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset);
./CrossVul/dataset_final_sorted/CWE-20/c/bad_5794_2
crossvul-cpp_data_bad_5849_0
/* * IKEv2 parent SA creation routines * Copyright (C) 2007-2008 Michael Richardson <mcr@xelerance.com> * Copyright (C) 2008-2011 Paul Wouters <paul@xelerance.com> * Copyright (C) 2008 Antony Antony <antony@xelerance.com> * Copyright (C) 2008-2009 David McCullough <david_mccullough@securecomputing.com> * Copyright (C) 2010,2012 Avesh Agarwal <avagarwa@redhat.com> * Copyright (C) 2010 Tuomo Soini <tis@foobar.fi * Copyright (C) 2012 Paul Wouters <pwouters@redhat.com> * Copyright (C) 2012 Antony Antony <antony@phenome.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * */ #include <stdio.h> #include <string.h> #include <stddef.h> #include <stdlib.h> #include <unistd.h> #include <gmp.h> #include <libreswan.h> #include <libreswan/ipsec_policy.h> #include "sysdep.h" #include "constants.h" #include "defs.h" #include "state.h" #include "id.h" #include "connections.h" #include "crypto.h" /* requires sha1.h and md5.h */ #include "x509.h" #include "x509more.h" #include "ike_alg.h" #include "kernel_alg.h" #include "plutoalg.h" #include "pluto_crypt.h" #include "packet.h" #include "demux.h" #include "ikev2.h" #include "log.h" #include "spdb.h" /* for out_sa */ #include "ipsec_doi.h" #include "vendor.h" #include "timer.h" #include "ike_continuations.h" #include "cookie.h" #include "rnd.h" #include "pending.h" #include "kernel.h" #define SEND_NOTIFICATION_AA(t, d) \ if (st) \ send_v2_notification_from_state(st, st->st_state, t, d); \ else \ send_v2_notification_from_md(md, t, d); #define SEND_NOTIFICATION(t) \ if (st) \ send_v2_notification_from_state(st, st->st_state, t, NULL); \ else \ send_v2_notification_from_md(md, t, NULL); static void ikev2_parent_outI1_continue(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r, err_t ugh); static stf_status ikev2_parent_outI1_tail(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r); static bool ikev2_get_dcookie(u_char *dcookie, chunk_t st_ni, ip_address *addr, u_int8_t *spiI); static stf_status ikev2_parent_outI1_common(struct msg_digest *md, struct state *st); static int build_ike_version(); /* * *************************************************************** ***** PARENT_OUTI1 ***** *************************************************************** * * * Initiate an Oakley Main Mode exchange. * HDR, SAi1, KEi, Ni --> * * Note: this is not called from demux.c, but from ipsecdoi_initiate(). * */ stf_status ikev2parent_outI1(int whack_sock, struct connection *c, struct state *predecessor, lset_t policy, unsigned long try, enum crypto_importance importance #ifdef HAVE_LABELED_IPSEC , struct xfrm_user_sec_ctx_ike * uctx #endif ) { struct state *st = new_state(); struct db_sa *sadb; int groupnum; int policy_index = POLICY_ISAKMP(policy, c->spd.this.xauth_server, c->spd.this.xauth_client); /* set up new state */ get_cookie(TRUE, st->st_icookie, COOKIE_SIZE, &c->spd.that.host_addr); initialize_new_state(st, c, policy, try, whack_sock, importance); st->st_ikev2 = TRUE; change_state(st, STATE_PARENT_I1); st->st_msgid_lastack = INVALID_MSGID; st->st_msgid_nextuse = 0; st->st_try = try; if (HAS_IPSEC_POLICY(policy)) { #ifdef HAVE_LABELED_IPSEC st->sec_ctx = NULL; if ( uctx != NULL) libreswan_log( "Labeled ipsec is not supported with ikev2 yet"); #endif add_pending(dup_any( whack_sock), st, c, policy, 1, predecessor == NULL ? SOS_NOBODY : predecessor->st_serialno #ifdef HAVE_LABELED_IPSEC , st->sec_ctx #endif ); } if (predecessor == NULL) libreswan_log("initiating v2 parent SA"); else libreswan_log("initiating v2 parent SA to replace #%lu", predecessor->st_serialno); if (predecessor != NULL) { update_pending(predecessor, st); whack_log(RC_NEW_STATE + STATE_PARENT_I1, "%s: initiate, replacing #%lu", enum_name(&state_names, st->st_state), predecessor->st_serialno); } else { whack_log(RC_NEW_STATE + STATE_PARENT_I1, "%s: initiate", enum_name(&state_names, st->st_state)); } /* * now, we need to initialize st->st_oakley, specifically, the group * number needs to be initialized. */ groupnum = 0; st->st_sadb = &oakley_sadb[policy_index]; sadb = oakley_alg_makedb(st->st_connection->alg_info_ike, st->st_sadb, 0); if (sadb != NULL) st->st_sadb = sadb; sadb = st->st_sadb = sa_v2_convert(st->st_sadb); { unsigned int pc_cnt; /* look at all the proposals */ if (st->st_sadb->prop_disj != NULL) { for (pc_cnt = 0; pc_cnt < st->st_sadb->prop_disj_cnt && groupnum == 0; pc_cnt++) { struct db_v2_prop *vp = &st->st_sadb->prop_disj[pc_cnt]; unsigned int pr_cnt; /* look at all the proposals */ if (vp->props != NULL) { for (pr_cnt = 0; pr_cnt < vp->prop_cnt && groupnum == 0; pr_cnt++) { unsigned int ts_cnt; struct db_v2_prop_conj *vpc = &vp->props[pr_cnt]; for (ts_cnt = 0; ts_cnt < vpc->trans_cnt && groupnum == 0; ts_cnt++) { struct db_v2_trans *tr = &vpc-> trans[ ts_cnt ]; if (tr != NULL && tr->transform_type == IKEv2_TRANS_TYPE_DH) { groupnum = tr-> transid; } } } } } } } if (groupnum == 0) groupnum = OAKLEY_GROUP_MODP2048; st->st_oakley.group = lookup_group(groupnum); st->st_oakley.groupnum = groupnum; /* now. we need to go calculate the nonce, and the KE */ { struct ke_continuation *ke = alloc_thing( struct ke_continuation, "ikev2_outI1 KE"); stf_status e; ke->md = alloc_md(); ke->md->from_state = STATE_IKEv2_BASE; ke->md->svm = ikev2_parent_firststate(); ke->md->st = st; set_suspended(st, ke->md); if (!st->st_sec_in_use) { pcrc_init(&ke->ke_pcrc); ke->ke_pcrc.pcrc_func = ikev2_parent_outI1_continue; e = build_ke(&ke->ke_pcrc, st, st->st_oakley.group, importance); if ( (e != STF_SUSPEND && e != STF_INLINE) || (e == STF_TOOMUCHCRYPTO)) { loglog(RC_CRYPTOFAILED, "system too busy - Enabling dcookies [TODO]"); delete_state(st); } } else { e = ikev2_parent_outI1_tail( (struct pluto_crypto_req_cont *)ke, NULL); } reset_globals(); return e; } } static void ikev2_parent_outI1_continue(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r, err_t ugh) { struct ke_continuation *ke = (struct ke_continuation *)pcrc; struct msg_digest *md = ke->md; struct state *const st = md->st; stf_status e; DBG(DBG_CONTROLMORE, DBG_log("ikev2 parent outI1: calculated ke+nonce, sending I1")); if (st == NULL) { loglog(RC_LOG_SERIOUS, "%s: Request was disconnected from state", __FUNCTION__); if (ke->md) release_md(ke->md); return; } /* XXX should check out ugh */ passert(ugh == NULL); passert(cur_state == NULL); passert(st != NULL); passert(st->st_suspended_md == ke->md); set_suspended(st, NULL); /* no longer connected or suspended */ set_cur_state(st); st->st_calculating = FALSE; e = ikev2_parent_outI1_tail(pcrc, r); if (ke->md != NULL) { complete_v2_state_transition(&ke->md, e); if (ke->md) release_md(ke->md); } reset_cur_state(); reset_globals(); passert(GLOBALS_ARE_RESET()); } /* * unpack the calculate KE value, store it in state. * used by IKEv2: parent, child (PFS) */ static int unpack_v2KE(struct state *st, struct pluto_crypto_req *r, chunk_t *g) { struct pcr_kenonce *kn = &r->pcr_d.kn; unpack_KE(st, r, g); return kn->oakley_group; } /* * package up the calculate KE value, and emit it as a KE payload. * used by IKEv2: parent, child (PFS) */ static bool justship_v2KE(struct state *st UNUSED, chunk_t *g, unsigned int oakley_group, pb_stream *outs, u_int8_t np) { struct ikev2_ke v2ke; pb_stream kepbs; memset(&v2ke, 0, sizeof(v2ke)); v2ke.isak_np = np; v2ke.isak_group = oakley_group; if (!out_struct(&v2ke, &ikev2_ke_desc, outs, &kepbs)) return FALSE; if (!out_chunk(*g, &kepbs, "ikev2 g^x")) return FALSE; close_output_pbs(&kepbs); return TRUE; } static bool ship_v2KE(struct state *st, struct pluto_crypto_req *r, chunk_t *g, pb_stream *outs, u_int8_t np) { int oakley_group = unpack_v2KE(st, r, g); return justship_v2KE(st, g, oakley_group, outs, np); } static stf_status ikev2_parent_outI1_tail(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r) { struct ke_continuation *ke = (struct ke_continuation *)pcrc; struct msg_digest *md = ke->md; struct state *const st = md->st; unpack_v2KE(st, r, &st->st_gi); unpack_nonce(&st->st_ni, r); return ikev2_parent_outI1_common(md, st); } static stf_status ikev2_parent_outI1_common(struct msg_digest *md, struct state *st) { struct connection *c = st->st_connection; int numvidtosend = 0; /* set up reply */ init_pbs(&reply_stream, reply_buffer, sizeof(reply_buffer), "reply packet"); /* HDR out */ { struct isakmp_hdr hdr; zero(&hdr); /* default to 0 */ /* Impair function will raise major/minor by 1 for testing */ hdr.isa_version = build_ike_version(); if (st->st_dcookie.ptr) hdr.isa_np = ISAKMP_NEXT_v2N; else hdr.isa_np = ISAKMP_NEXT_v2SA; hdr.isa_xchg = ISAKMP_v2_SA_INIT; hdr.isa_flags = ISAKMP_FLAGS_I; memcpy(hdr.isa_icookie, st->st_icookie, COOKIE_SIZE); /* R-cookie, are left zero */ if (!out_struct(&hdr, &isakmp_hdr_desc, &reply_stream, &md->rbody)) { reset_cur_state(); return STF_INTERNAL_ERROR; } } /* send an anti DOS cookie, 4306 2.6, if we have received one from the * responder */ if (st->st_dcookie.ptr) { chunk_t child_spi; memset(&child_spi, 0, sizeof(child_spi)); ship_v2N(ISAKMP_NEXT_v2SA, DBGP( IMPAIR_SEND_BOGUS_ISAKMP_FLAG) ? (ISAKMP_PAYLOAD_NONCRITICAL | ISAKMP_PAYLOAD_LIBRESWAN_BOGUS) : ISAKMP_PAYLOAD_NONCRITICAL, PROTO_ISAKMP, &child_spi, v2N_COOKIE, &st->st_dcookie, &md->rbody); } /* SA out */ { u_char *sa_start = md->rbody.cur; if (st->st_sadb->prop_disj_cnt == 0 || st->st_sadb->prop_disj) st->st_sadb = sa_v2_convert(st->st_sadb); if (!ikev2_out_sa(&md->rbody, PROTO_ISAKMP, st->st_sadb, st, TRUE, /* parentSA */ ISAKMP_NEXT_v2KE)) { libreswan_log("outsa fail"); reset_cur_state(); return STF_INTERNAL_ERROR; } /* save initiator SA for later HASH */ if (st->st_p1isa.ptr == NULL) { /* no leak! (MUST be first time) */ clonetochunk(st->st_p1isa, sa_start, md->rbody.cur - sa_start, "sa in main_outI1"); } } /* send KE */ if (!justship_v2KE(st, &st->st_gi, st->st_oakley.groupnum, &md->rbody, ISAKMP_NEXT_v2Ni)) return STF_INTERNAL_ERROR; /* * Check which Vendor ID's we need to send - there will be more soon * In IKEv2, DPD and NAT-T are no longer vendorid's */ if (c->send_vendorid) { numvidtosend++; /* if we need to send Libreswan VID */ } /* send NONCE */ { int np = numvidtosend > 0 ? ISAKMP_NEXT_v2V : ISAKMP_NEXT_v2NONE; struct ikev2_generic in; pb_stream pb; memset(&in, 0, sizeof(in)); in.isag_np = np; in.isag_critical = ISAKMP_PAYLOAD_NONCRITICAL; if (DBGP(IMPAIR_SEND_BOGUS_ISAKMP_FLAG)) { libreswan_log( " setting bogus ISAKMP_PAYLOAD_LIBRESWAN_BOGUS flag in ISAKMP payload"); in.isag_critical |= ISAKMP_PAYLOAD_LIBRESWAN_BOGUS; } if (!out_struct(&in, &ikev2_nonce_desc, &md->rbody, &pb) || !out_raw(st->st_ni.ptr, st->st_ni.len, &pb, "IKEv2 nonce")) return STF_INTERNAL_ERROR; close_output_pbs(&pb); } /* Send Vendor VID if needed */ if (c->send_vendorid) { const char *myvid = ipsec_version_vendorid(); int np = --numvidtosend > 0 ? ISAKMP_NEXT_v2V : ISAKMP_NEXT_v2NONE; if (!out_generic_raw(np, &isakmp_vendor_id_desc, &md->rbody, myvid, strlen(myvid), "Vendor ID")) return STF_INTERNAL_ERROR; /* ensure our VID chain was valid */ passert(numvidtosend == 0); } close_message(&md->rbody, st); close_output_pbs(&reply_stream); freeanychunk(st->st_tpacket); clonetochunk(st->st_tpacket, reply_stream.start, pbs_offset(&reply_stream), "reply packet for ikev2_parent_outI1_tail"); /* save packet for later signing */ freeanychunk(st->st_firstpacket_me); clonetochunk(st->st_firstpacket_me, reply_stream.start, pbs_offset(&reply_stream), "saved first packet"); /* Transmit */ send_ike_msg(st, __FUNCTION__); delete_event(st); event_schedule(EVENT_v2_RETRANSMIT, EVENT_RETRANSMIT_DELAY_0, st); reset_cur_state(); return STF_OK; } /* * *************************************************************** * PARENT_INI1 ***** *************************************************************** * - * * */ static void ikev2_parent_inI1outR1_continue(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r, err_t ugh); static stf_status ikev2_parent_inI1outR1_tail( struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r); stf_status ikev2parent_inI1outR1(struct msg_digest *md) { struct state *st = md->st; lset_t policy = POLICY_IKEV2_ALLOW; struct connection *c = find_host_connection(&md->iface->ip_addr, md->iface->port, &md->sender, md->sender_port, POLICY_IKEV2_ALLOW); /* retrieve st->st_gi */ #if 0 if (c == NULL) { /* * make up a policy from the thing that was proposed, and see * if we can find a connection with that policy. */ pb_stream pre_sa_pbs = sa_pd->pbs; policy = preparse_isakmp_sa_body(&pre_sa_pbs); c = find_host_connection(&md->iface->ip_addr, pluto_port, (ip_address*)NULL, md->sender_port, policy); } #endif if (c == NULL) { /* See if a wildcarded connection can be found. * We cannot pick the right connection, so we're making a guess. * All Road Warrior connections are fair game: * we pick the first we come across (if any). * If we don't find any, we pick the first opportunistic * with the smallest subnet that includes the peer. * There is, of course, no necessary relationship between * an Initiator's address and that of its client, * but Food Groups kind of assumes one. */ { struct connection *d; d = find_host_connection(&md->iface->ip_addr, pluto_port, (ip_address*)NULL, md->sender_port, policy); for (; d != NULL; d = d->hp_next) { if (d->kind == CK_GROUP) { /* ignore */ } else { if (d->kind == CK_TEMPLATE && !(d->policy & POLICY_OPPO)) { /* must be Road Warrior: we have a winner */ c = d; break; } /* Opportunistic or Shunt: pick tightest match */ if (addrinsubnet(&md->sender, &d->spd.that.client) && (c == NULL || !subnetinsubnet(&c->spd.that. client, &d->spd.that. client))) c = d; } } } if (c == NULL) { loglog(RC_LOG_SERIOUS, "initial parent SA message received on %s:%u" " but no connection has been authorized%s%s", ip_str( &md->iface->ip_addr), ntohs(portof(&md->iface->ip_addr)), (policy != LEMPTY) ? " with policy=" : "", (policy != LEMPTY) ? bitnamesof(sa_policy_bit_names, policy) : ""); return STF_FAIL + v2N_NO_PROPOSAL_CHOSEN; } if (c->kind != CK_TEMPLATE) { loglog(RC_LOG_SERIOUS, "initial parent SA message received on %s:%u" " but \"%s\" forbids connection", ip_str( &md->iface->ip_addr), pluto_port, c->name); return STF_FAIL + v2N_NO_PROPOSAL_CHOSEN; } c = rw_instantiate(c, &md->sender, NULL, NULL); } else { /* we found a non-wildcard conn. double check if it needs instantiation anyway (eg vnet=) */ /* vnet=/vhost= should have set CK_TEMPLATE on connection loading */ if ((c->kind == CK_TEMPLATE) && c->spd.that.virt) { DBG(DBG_CONTROL, DBG_log( "local endpoint has virt (vnet/vhost) set without wildcards - needs instantiation")); c = rw_instantiate(c, &md->sender, NULL, NULL); } else if ((c->kind == CK_TEMPLATE) && (c->policy & POLICY_IKEV2_ALLOW_NARROWING)) { DBG(DBG_CONTROL, DBG_log( "local endpoint has narrowing=yes - needs instantiation")); c = rw_instantiate(c, &md->sender, NULL, NULL); } } DBG_log("found connection: %s\n", c ? c->name : "<none>"); if (!st) { st = new_state(); /* set up new state */ memcpy(st->st_icookie, md->hdr.isa_icookie, COOKIE_SIZE); /* initialize_new_state expects valid icookie/rcookie values, so create it now */ get_cookie(FALSE, st->st_rcookie, COOKIE_SIZE, &md->sender); initialize_new_state(st, c, policy, 0, NULL_FD, pcim_stranger_crypto); st->st_ikev2 = TRUE; change_state(st, STATE_PARENT_R1); st->st_msgid_lastack = INVALID_MSGID; st->st_msgid_nextuse = 0; md->st = st; md->from_state = STATE_IKEv2_BASE; } /* check,as a responder, are we under dos attack or not * if yes go to 6 message exchange mode. it is a config option for now. * TBD set force_busy dynamically * Paul: Can we check for STF_TOOMUCHCRYPTO ? */ if (force_busy == TRUE) { u_char dcookie[SHA1_DIGEST_SIZE]; chunk_t dc; ikev2_get_dcookie( dcookie, st->st_ni, &md->sender, st->st_icookie); dc.ptr = dcookie; dc.len = SHA1_DIGEST_SIZE; /* check if I1 packet contian KE and a v2N payload with type COOKIE */ if ( md->chain[ISAKMP_NEXT_v2KE] && md->chain[ISAKMP_NEXT_v2N] && (md->chain[ISAKMP_NEXT_v2N]->payload.v2n.isan_type == v2N_COOKIE)) { u_int8_t spisize; const pb_stream *dc_pbs; chunk_t blob; DBG(DBG_CONTROLMORE, DBG_log("received a DOS cookie in I1 verify it")); /* we received dcookie we send earlier verify it */ spisize = md->chain[ISAKMP_NEXT_v2N]->payload.v2n. isan_spisize; dc_pbs = &md->chain[ISAKMP_NEXT_v2N]->pbs; blob.ptr = dc_pbs->cur + spisize; blob.len = pbs_left(dc_pbs) - spisize; DBG(DBG_CONTROLMORE, DBG_dump_chunk("dcookie received in I1 Packet", blob); DBG_dump("dcookie computed", dcookie, SHA1_DIGEST_SIZE)); if (memcmp(blob.ptr, dcookie, SHA1_DIGEST_SIZE) != 0) { libreswan_log( "mismatch in DOS v2N_COOKIE,send a new one"); SEND_NOTIFICATION_AA(v2N_COOKIE, &dc); return STF_FAIL + v2N_INVALID_IKE_SPI; } DBG(DBG_CONTROLMORE, DBG_log("dcookie received match with computed one")); } else { /* we are under DOS attack I1 contains no DOS COOKIE */ DBG(DBG_CONTROLMORE, DBG_log( "busy mode on. receieved I1 without a valid dcookie"); DBG_log("send a dcookie and forget this state")); SEND_NOTIFICATION_AA(v2N_COOKIE, &dc); return STF_FAIL; } } else { DBG(DBG_CONTROLMORE, DBG_log("will not send/process a dcookie")); } /* * We have to agree to the DH group before we actually know who * we are talking to. If we support the group, we use it. * * It is really too hard here to go through all the possible policies * that might permit this group. If we think we are being DOS'ed * then we should demand a cookie. */ { struct ikev2_ke *ke; ke = &md->chain[ISAKMP_NEXT_v2KE]->payload.v2ke; st->st_oakley.group = lookup_group(ke->isak_group); if (st->st_oakley.group == NULL) { char fromname[ADDRTOT_BUF]; addrtot(&md->sender, 0, fromname, ADDRTOT_BUF); libreswan_log( "rejecting I1 from %s:%u, invalid DH group=%u", fromname, md->sender_port, ke->isak_group); return v2N_INVALID_KE_PAYLOAD; } } /* now. we need to go calculate the nonce, and the KE */ { struct ke_continuation *ke = alloc_thing( struct ke_continuation, "ikev2_inI1outR1 KE"); stf_status e; ke->md = md; set_suspended(st, ke->md); if (!st->st_sec_in_use) { pcrc_init(&ke->ke_pcrc); ke->ke_pcrc.pcrc_func = ikev2_parent_inI1outR1_continue; e = build_ke(&ke->ke_pcrc, st, st->st_oakley.group, pcim_stranger_crypto); if (e != STF_SUSPEND && e != STF_INLINE) { loglog(RC_CRYPTOFAILED, "system too busy"); delete_state(st); } } else { e = ikev2_parent_inI1outR1_tail((struct pluto_crypto_req_cont *)ke, NULL); } reset_globals(); return e; } } static void ikev2_parent_inI1outR1_continue(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r, err_t ugh) { struct ke_continuation *ke = (struct ke_continuation *)pcrc; struct msg_digest *md = ke->md; struct state *const st = md->st; stf_status e; DBG(DBG_CONTROLMORE, DBG_log("ikev2 parent inI1outR1: calculated ke+nonce, sending R1")); if (st == NULL) { loglog(RC_LOG_SERIOUS, "%s: Request was disconnected from state", __FUNCTION__); if (ke->md) release_md(ke->md); return; } /* XXX should check out ugh */ passert(ugh == NULL); passert(cur_state == NULL); passert(st != NULL); passert(st->st_suspended_md == ke->md); set_suspended(st, NULL); /* no longer connected or suspended */ set_cur_state(st); st->st_calculating = FALSE; e = ikev2_parent_inI1outR1_tail(pcrc, r); if (ke->md != NULL) { complete_v2_state_transition(&ke->md, e); if (ke->md) release_md(ke->md); } reset_globals(); passert(GLOBALS_ARE_RESET()); } static stf_status ikev2_parent_inI1outR1_tail( struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r) { struct ke_continuation *ke = (struct ke_continuation *)pcrc; struct msg_digest *md = ke->md; struct payload_digest *const sa_pd = md->chain[ISAKMP_NEXT_v2SA]; struct state *const st = md->st; struct connection *c = st->st_connection; pb_stream *keyex_pbs; int numvidtosend = 0; if (c->send_vendorid) { numvidtosend++; /* we send Libreswan VID */ } /* note that we don't update the state here yet */ /* record first packet for later checking of signature */ clonetochunk(st->st_firstpacket_him, md->message_pbs.start, pbs_offset( &md->message_pbs), "saved first received packet"); /* make sure HDR is at start of a clean buffer */ zero(reply_buffer); init_pbs(&reply_stream, reply_buffer, sizeof(reply_buffer), "reply packet"); /* HDR out */ { struct isakmp_hdr r_hdr = md->hdr; memcpy(r_hdr.isa_rcookie, st->st_rcookie, COOKIE_SIZE); r_hdr.isa_np = ISAKMP_NEXT_v2SA; /* major will be same, but their minor might be higher */ r_hdr.isa_version = build_ike_version(); r_hdr.isa_flags &= ~ISAKMP_FLAGS_I; r_hdr.isa_flags |= ISAKMP_FLAGS_R; /* PAUL shouldn't we set r_hdr.isa_msgid = [htonl](st->st_msgid); here? */ if (!out_struct(&r_hdr, &isakmp_hdr_desc, &reply_stream, &md->rbody)) return STF_INTERNAL_ERROR; } /* start of SA out */ { struct isakmp_sa r_sa = sa_pd->payload.sa; v2_notification_t rn; pb_stream r_sa_pbs; r_sa.isasa_np = ISAKMP_NEXT_v2KE; /* XXX */ if (!out_struct(&r_sa, &ikev2_sa_desc, &md->rbody, &r_sa_pbs)) return STF_INTERNAL_ERROR; /* SA body in and out */ rn = ikev2_parse_parent_sa_body(&sa_pd->pbs, &sa_pd->payload.v2sa, &r_sa_pbs, st, FALSE); if (rn != v2N_NOTHING_WRONG) return STF_FAIL + rn; } { v2_notification_t rn; chunk_t dc; keyex_pbs = &md->chain[ISAKMP_NEXT_v2KE]->pbs; /* KE in */ rn = accept_KE(&st->st_gi, "Gi", st->st_oakley.group, keyex_pbs); if (rn != v2N_NOTHING_WRONG) { u_int16_t group_number = htons( st->st_oakley.group->group); dc.ptr = (unsigned char *)&group_number; dc.len = 2; SEND_NOTIFICATION_AA(v2N_INVALID_KE_PAYLOAD, &dc); delete_state(st); return STF_FAIL + rn; } } /* Ni in */ RETURN_STF_FAILURE(accept_v2_nonce(md, &st->st_ni, "Ni")); /* send KE */ if (!ship_v2KE(st, r, &st->st_gr, &md->rbody, ISAKMP_NEXT_v2Nr)) return STF_INTERNAL_ERROR; /* send NONCE */ unpack_nonce(&st->st_nr, r); { int np = numvidtosend > 0 ? ISAKMP_NEXT_v2V : ISAKMP_NEXT_v2NONE; struct ikev2_generic in; pb_stream pb; memset(&in, 0, sizeof(in)); in.isag_np = np; in.isag_critical = ISAKMP_PAYLOAD_NONCRITICAL; if (DBGP(IMPAIR_SEND_BOGUS_ISAKMP_FLAG)) { libreswan_log( " setting bogus ISAKMP_PAYLOAD_LIBRESWAN_BOGUS flag in ISAKMP payload"); in.isag_critical |= ISAKMP_PAYLOAD_LIBRESWAN_BOGUS; } if (!out_struct(&in, &ikev2_nonce_desc, &md->rbody, &pb) || !out_raw(st->st_nr.ptr, st->st_nr.len, &pb, "IKEv2 nonce")) return STF_INTERNAL_ERROR; close_output_pbs(&pb); } /* Send VendrID if needed VID */ if (c->send_vendorid) { const char *myvid = ipsec_version_vendorid(); int np = --numvidtosend > 0 ? ISAKMP_NEXT_v2V : ISAKMP_NEXT_v2NONE; if (!out_generic_raw(np, &isakmp_vendor_id_desc, &md->rbody, myvid, strlen(myvid), "Vendor ID")) return STF_INTERNAL_ERROR; } close_message(&md->rbody, st); close_output_pbs(&reply_stream); /* keep it for a retransmit if necessary */ freeanychunk(st->st_tpacket); clonetochunk(st->st_tpacket, reply_stream.start, pbs_offset(&reply_stream), "reply packet for ikev2_parent_inI1outR1_tail"); /* save packet for later signing */ freeanychunk(st->st_firstpacket_me); clonetochunk(st->st_firstpacket_me, reply_stream.start, pbs_offset(&reply_stream), "saved first packet"); /* note: retransimission is driven by initiator */ return STF_OK; } /* * *************************************************************** * PARENT_inR1 ***** *************************************************************** * - * * */ static void ikev2_parent_inR1outI2_continue(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r, err_t ugh); static stf_status ikev2_parent_inR1outI2_tail( struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r); stf_status ikev2parent_inR1outI2(struct msg_digest *md) { struct state *st = md->st; /* struct connection *c = st->st_connection; */ pb_stream *keyex_pbs; /* check if the responder replied with v2N with DOS COOKIE */ if ( md->chain[ISAKMP_NEXT_v2N] && md->chain[ISAKMP_NEXT_v2N]->payload.v2n.isan_type == v2N_COOKIE) { u_int8_t spisize; const pb_stream *dc_pbs; DBG(DBG_CONTROLMORE, DBG_log( "inR1OutI2 received a DOS v2N_COOKIE from the responder"); DBG_log("resend the I1 with a cookie payload")); spisize = md->chain[ISAKMP_NEXT_v2N]->payload.v2n.isan_spisize; dc_pbs = &md->chain[ISAKMP_NEXT_v2N]->pbs; clonetochunk(st->st_dcookie, (dc_pbs->cur + spisize), (pbs_left( dc_pbs) - spisize), "saved received dcookie"); DBG(DBG_CONTROLMORE, DBG_dump_chunk("dcookie received (instead of a R1):", st->st_dcookie); DBG_log("next STATE_PARENT_I1 resend I1 with the dcookie")); md->svm = ikev2_parent_firststate(); change_state(st, STATE_PARENT_I1); st->st_msgid_lastack = INVALID_MSGID; md->msgid_received = INVALID_MSGID; /* AAA hack */ st->st_msgid_nextuse = 0; return ikev2_parent_outI1_common(md, st); } /* * If we did not get a KE payload, we cannot continue. There * should be * a Notify telling us why. We inform the user, but continue to try this * connection via regular retransmit intervals. */ if ( md->chain[ISAKMP_NEXT_v2N] && (md->chain[ISAKMP_NEXT_v2KE] == NULL)) { const char *from_state_name = enum_name(&state_names, st->st_state); const u_int16_t isan_type = md->chain[ISAKMP_NEXT_v2N]->payload.v2n.isan_type; libreswan_log("%s: received %s", from_state_name, enum_name(&ikev2_notify_names, isan_type)); return STF_FAIL + isan_type; } else if ( md->chain[ISAKMP_NEXT_v2N]) { DBG(DBG_CONTROL, DBG_log("received a notify..")); } /* * the responder sent us back KE, Gr, Nr, and it's our time to calculate * the shared key values. */ DBG(DBG_CONTROLMORE, DBG_log( "ikev2 parent inR1: calculating g^{xy} in order to send I2")); /* KE in */ keyex_pbs = &md->chain[ISAKMP_NEXT_v2KE]->pbs; RETURN_STF_FAILURE(accept_KE(&st->st_gr, "Gr", st->st_oakley.group, keyex_pbs)); /* Ni in */ RETURN_STF_FAILURE(accept_v2_nonce(md, &st->st_nr, "Ni")); if (md->chain[ISAKMP_NEXT_v2SA] == NULL) { libreswan_log("No responder SA proposal found"); return v2N_INVALID_SYNTAX; } /* process and confirm the SA selected */ { struct payload_digest *const sa_pd = md->chain[ISAKMP_NEXT_v2SA]; v2_notification_t rn; /* SA body in and out */ rn = ikev2_parse_parent_sa_body(&sa_pd->pbs, &sa_pd->payload.v2sa, NULL, st, FALSE); if (rn != v2N_NOTHING_WRONG) return STF_FAIL + rn; } /* update state */ ikev2_update_counters(md); /* now. we need to go calculate the g^xy */ { struct dh_continuation *dh = alloc_thing( struct dh_continuation, "ikev2_inR1outI2 KE"); stf_status e; dh->md = md; set_suspended(st, dh->md); pcrc_init(&dh->dh_pcrc); dh->dh_pcrc.pcrc_func = ikev2_parent_inR1outI2_continue; e = start_dh_v2(&dh->dh_pcrc, st, st->st_import, INITIATOR, st->st_oakley.groupnum); if (e != STF_SUSPEND && e != STF_INLINE) { loglog(RC_CRYPTOFAILED, "system too busy"); delete_state(st); } reset_globals(); return e; } } static void ikev2_parent_inR1outI2_continue(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r, err_t ugh) { struct dh_continuation *dh = (struct dh_continuation *)pcrc; struct msg_digest *md = dh->md; struct state *const st = md->st; stf_status e; DBG(DBG_CONTROLMORE, DBG_log("ikev2 parent inR1outI2: calculating g^{xy}, sending I2")); if (st == NULL) { loglog(RC_LOG_SERIOUS, "%s: Request was disconnected from state", __FUNCTION__); if (dh->md) release_md(dh->md); return; } /* XXX should check out ugh */ passert(ugh == NULL); passert(cur_state == NULL); passert(st != NULL); passert(st->st_suspended_md == dh->md); set_suspended(st, NULL); /* no longer connected or suspended */ set_cur_state(st); st->st_calculating = FALSE; e = ikev2_parent_inR1outI2_tail(pcrc, r); if (dh->md != NULL) { complete_v2_state_transition(&dh->md, e); if (dh->md) release_md(dh->md); } reset_globals(); passert(GLOBALS_ARE_RESET()); } static void ikev2_padup_pre_encrypt(struct msg_digest *md, pb_stream *e_pbs_cipher) { struct state *st = md->st; struct state *pst = st; if (st->st_clonedfrom != 0) pst = state_with_serialno(st->st_clonedfrom); /* pads things up to message size boundary */ { size_t blocksize = pst->st_oakley.encrypter->enc_blocksize; char *b = alloca(blocksize); unsigned int i; size_t padding = pad_up(pbs_offset(e_pbs_cipher), blocksize); if (padding == 0) padding = blocksize; for (i = 0; i < padding; i++) b[i] = i; out_raw(b, padding, e_pbs_cipher, "padding and length"); } } static unsigned char *ikev2_authloc(struct msg_digest *md, pb_stream *e_pbs) { unsigned char *b12; struct state *st = md->st; struct state *pst = st; if (st->st_clonedfrom != 0) { pst = state_with_serialno(st->st_clonedfrom); if ( pst == NULL) return NULL; } b12 = e_pbs->cur; if (!out_zero(pst->st_oakley.integ_hasher->hash_integ_len, e_pbs, "length of truncated HMAC")) return NULL; return b12; } static stf_status ikev2_encrypt_msg(struct msg_digest *md, enum phase1_role init, unsigned char *authstart, unsigned char *iv, unsigned char *encstart, unsigned char *authloc, pb_stream *e_pbs UNUSED, pb_stream *e_pbs_cipher) { struct state *st = md->st; struct state *pst = st; chunk_t *cipherkey, *authkey; if (st->st_clonedfrom != 0) pst = state_with_serialno(st->st_clonedfrom); if (init == INITIATOR) { cipherkey = &pst->st_skey_ei; authkey = &pst->st_skey_ai; } else { cipherkey = &pst->st_skey_er; authkey = &pst->st_skey_ar; } /* encrypt the block */ { size_t blocksize = pst->st_oakley.encrypter->enc_blocksize; unsigned char *savediv = alloca(blocksize); unsigned int cipherlen = e_pbs_cipher->cur - encstart; DBG(DBG_CRYPT, DBG_dump("data before encryption:", encstart, cipherlen)); memcpy(savediv, iv, blocksize); /* now, encrypt */ (st->st_oakley.encrypter->do_crypt)(encstart, cipherlen, cipherkey->ptr, cipherkey->len, savediv, TRUE); DBG(DBG_CRYPT, DBG_dump("data after encryption:", encstart, cipherlen)); } /* okay, authenticate from beginning of IV */ { struct hmac_ctx ctx; DBG(DBG_PARSING, DBG_log("Inside authloc")); DBG(DBG_CRYPT, DBG_dump("authkey value: ", authkey->ptr, authkey->len)); hmac_init_chunk(&ctx, pst->st_oakley.integ_hasher, *authkey); DBG(DBG_PARSING, DBG_log("Inside authloc after init")); hmac_update(&ctx, authstart, authloc - authstart); DBG(DBG_PARSING, DBG_log("Inside authloc after update")); hmac_final(authloc, &ctx); DBG(DBG_PARSING, DBG_log("Inside authloc after final")); DBG(DBG_PARSING, { DBG_dump("data being hmac:", authstart, authloc - authstart); DBG_dump("out calculated auth:", authloc, pst->st_oakley.integ_hasher-> hash_integ_len); }); } return STF_OK; } static stf_status ikev2_decrypt_msg(struct msg_digest *md, enum phase1_role init) { struct state *st = md->st; unsigned char *encend; pb_stream *e_pbs; unsigned int np; unsigned char *iv; chunk_t *cipherkey, *authkey; unsigned char *authstart; struct state *pst = st; if (st->st_clonedfrom != 0) pst = state_with_serialno(st->st_clonedfrom); if (init == INITIATOR) { cipherkey = &pst->st_skey_er; authkey = &pst->st_skey_ar; } else { cipherkey = &pst->st_skey_ei; authkey = &pst->st_skey_ai; } e_pbs = &md->chain[ISAKMP_NEXT_v2E]->pbs; np = md->chain[ISAKMP_NEXT_v2E]->payload.generic.isag_np; authstart = md->packet_pbs.start; iv = e_pbs->cur; encend = e_pbs->roof - pst->st_oakley.integ_hasher->hash_integ_len; /* start by checking authenticator */ { unsigned char *b12 = alloca( pst->st_oakley.integ_hasher->hash_digest_len); struct hmac_ctx ctx; hmac_init_chunk(&ctx, pst->st_oakley.integ_hasher, *authkey); hmac_update(&ctx, authstart, encend - authstart); hmac_final(b12, &ctx); DBG(DBG_PARSING, { DBG_dump("data being hmac:", authstart, encend - authstart); DBG_dump("R2 calculated auth:", b12, pst->st_oakley.integ_hasher-> hash_integ_len); DBG_dump("R2 provided auth:", encend, pst->st_oakley.integ_hasher-> hash_integ_len); }); /* compare first 96 bits == 12 bytes */ /* It is not always 96 bytes, it depends upon which integ algo is used*/ if (memcmp(b12, encend, pst->st_oakley.integ_hasher->hash_integ_len) != 0) { libreswan_log("R2 failed to match authenticator"); return STF_FAIL; } } DBG(DBG_PARSING, DBG_log("authenticator matched")); /* decrypt */ { size_t blocksize = pst->st_oakley.encrypter->enc_blocksize; unsigned char *encstart = iv + blocksize; unsigned int enclen = encend - encstart; unsigned int padlen; DBG(DBG_CRYPT, DBG_dump("data before decryption:", encstart, enclen)); /* now, decrypt */ (pst->st_oakley.encrypter->do_crypt)(encstart, enclen, cipherkey->ptr, cipherkey->len, iv, FALSE); padlen = encstart[enclen - 1]; encend = encend - padlen + 1; if (encend < encstart) { libreswan_log("invalid pad length: %u", padlen); return STF_FAIL; } DBG(DBG_CRYPT, { DBG_dump("decrypted payload:", encstart, enclen); DBG_log("striping %u bytes as pad", padlen + 1); }); init_pbs(&md->clr_pbs, encstart, enclen - (padlen + 1), "cleartext"); } { stf_status ret; ret = ikev2_process_payloads(md, &md->clr_pbs, st->st_state, np); if (ret != STF_OK) return ret; } return STF_OK; } static stf_status ikev2_send_auth(struct connection *c, struct state *st, enum phase1_role role, unsigned int np, unsigned char *idhash_out, pb_stream *outpbs) { struct ikev2_a a; pb_stream a_pbs; struct state *pst = st; if (st->st_clonedfrom != 0) pst = state_with_serialno(st->st_clonedfrom); a.isaa_critical = ISAKMP_PAYLOAD_NONCRITICAL; if (DBGP(IMPAIR_SEND_BOGUS_ISAKMP_FLAG)) { libreswan_log( " setting bogus ISAKMP_PAYLOAD_LIBRESWAN_BOGUS flag in ISAKMP payload"); a.isaa_critical |= ISAKMP_PAYLOAD_LIBRESWAN_BOGUS; } a.isaa_np = np; if (c->policy & POLICY_RSASIG) { a.isaa_type = v2_AUTH_RSA; } else if (c->policy & POLICY_PSK) { a.isaa_type = v2_AUTH_SHARED; } else { /* what else is there?... DSS not implemented. */ return STF_FAIL; } if (!out_struct(&a, &ikev2_a_desc, outpbs, &a_pbs)) return STF_INTERNAL_ERROR; if (c->policy & POLICY_RSASIG) { if (!ikev2_calculate_rsa_sha1(pst, role, idhash_out, &a_pbs)) return STF_FATAL + v2N_AUTHENTICATION_FAILED; } else if (c->policy & POLICY_PSK) { if (!ikev2_calculate_psk_auth(pst, role, idhash_out, &a_pbs)) return STF_FAIL + v2N_AUTHENTICATION_FAILED; } close_output_pbs(&a_pbs); return STF_OK; } static stf_status ikev2_parent_inR1outI2_tail( struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r) { struct dh_continuation *dh = (struct dh_continuation *)pcrc; struct msg_digest *md = dh->md; struct state *st = md->st; struct connection *c = st->st_connection; struct ikev2_generic e; unsigned char *encstart; pb_stream e_pbs, e_pbs_cipher; unsigned char *iv; int ivsize; stf_status ret; unsigned char *idhash; unsigned char *authstart; struct state *pst = st; bool send_cert = FALSE; finish_dh_v2(st, r); if (DBGP(DBG_PRIVATE) && DBGP(DBG_CRYPT)) ikev2_log_parentSA(st); pst = st; st = duplicate_state(pst); st->st_msgid = htonl(pst->st_msgid_nextuse); /* PAUL: note ordering */ insert_state(st); md->st = st; md->pst = pst; /* parent had crypto failed, replace it with rekey! */ delete_event(pst); event_schedule(EVENT_SA_REPLACE, c->sa_ike_life_seconds, pst); /* need to force parent state to I2 */ change_state(pst, STATE_PARENT_I2); /* record first packet for later checking of signature */ clonetochunk(pst->st_firstpacket_him, md->message_pbs.start, pbs_offset( &md->message_pbs), "saved first received packet"); /* beginning of data going out */ authstart = reply_stream.cur; /* make sure HDR is at start of a clean buffer */ zero(reply_buffer); init_pbs(&reply_stream, reply_buffer, sizeof(reply_buffer), "reply packet"); /* HDR out */ { struct isakmp_hdr r_hdr = md->hdr; r_hdr.isa_np = ISAKMP_NEXT_v2E; r_hdr.isa_xchg = ISAKMP_v2_AUTH; r_hdr.isa_flags = ISAKMP_FLAGS_I; r_hdr.isa_msgid = st->st_msgid; memcpy(r_hdr.isa_icookie, st->st_icookie, COOKIE_SIZE); memcpy(r_hdr.isa_rcookie, st->st_rcookie, COOKIE_SIZE); if (!out_struct(&r_hdr, &isakmp_hdr_desc, &reply_stream, &md->rbody)) return STF_INTERNAL_ERROR; } /* insert an Encryption payload header */ e.isag_np = ISAKMP_NEXT_v2IDi; e.isag_critical = ISAKMP_PAYLOAD_NONCRITICAL; if (DBGP(IMPAIR_SEND_BOGUS_ISAKMP_FLAG)) { libreswan_log( " setting bogus ISAKMP_PAYLOAD_LIBRESWAN_BOGUS flag in ISAKMP payload"); e.isag_critical |= ISAKMP_PAYLOAD_LIBRESWAN_BOGUS; } if (!out_struct(&e, &ikev2_e_desc, &md->rbody, &e_pbs)) return STF_INTERNAL_ERROR; /* insert IV */ iv = e_pbs.cur; ivsize = st->st_oakley.encrypter->iv_size; if (!out_zero(ivsize, &e_pbs, "iv")) return STF_INTERNAL_ERROR; get_rnd_bytes(iv, ivsize); /* note where cleartext starts */ init_pbs(&e_pbs_cipher, e_pbs.cur, e_pbs.roof - e_pbs.cur, "cleartext"); e_pbs_cipher.container = &e_pbs; e_pbs_cipher.desc = NULL; e_pbs_cipher.cur = e_pbs.cur; encstart = e_pbs_cipher.cur; /* send out the IDi payload */ { struct ikev2_id r_id; pb_stream r_id_pbs; chunk_t id_b; struct hmac_ctx id_ctx; unsigned char *id_start; unsigned int id_len; hmac_init_chunk(&id_ctx, pst->st_oakley.prf_hasher, pst->st_skey_pi); build_id_payload((struct isakmp_ipsec_id *)&r_id, &id_b, &c->spd.this); r_id.isai_critical = ISAKMP_PAYLOAD_NONCRITICAL; if (DBGP(IMPAIR_SEND_BOGUS_ISAKMP_FLAG)) { libreswan_log( " setting bogus ISAKMP_PAYLOAD_LIBRESWAN_BOGUS flag in ISAKMP payload"); r_id.isai_critical |= ISAKMP_PAYLOAD_LIBRESWAN_BOGUS; } { /* decide to send CERT payload */ send_cert = doi_send_ikev2_cert_thinking(st); if (send_cert) r_id.isai_np = ISAKMP_NEXT_v2CERT; else r_id.isai_np = ISAKMP_NEXT_v2AUTH; } id_start = e_pbs_cipher.cur; if (!out_struct(&r_id, &ikev2_id_desc, &e_pbs_cipher, &r_id_pbs) || !out_chunk(id_b, &r_id_pbs, "my identity")) return STF_INTERNAL_ERROR; /* HASH of ID is not done over common header */ id_start += 4; close_output_pbs(&r_id_pbs); /* calculate hash of IDi for AUTH below */ id_len = e_pbs_cipher.cur - id_start; DBG(DBG_CRYPT, DBG_dump_chunk("idhash calc pi", pst->st_skey_pi)); DBG(DBG_CRYPT, DBG_dump("idhash calc I2", id_start, id_len)); hmac_update(&id_ctx, id_start, id_len); idhash = alloca(pst->st_oakley.prf_hasher->hash_digest_len); hmac_final(idhash, &id_ctx); } /* send [CERT,] payload RFC 4306 3.6, 1.2) */ { if (send_cert) { stf_status certstat = ikev2_send_cert( st, md, INITIATOR, ISAKMP_NEXT_v2AUTH, &e_pbs_cipher); if (certstat != STF_OK) return certstat; } } /* send out the AUTH payload */ { lset_t policy; struct connection *c0 = first_pending(pst, &policy, &st->st_whack_sock); unsigned int np = (c0 ? ISAKMP_NEXT_v2SA : ISAKMP_NEXT_v2NONE); DBG(DBG_CONTROL, DBG_log(" payload after AUTH will be %s", (c0) ? "ISAKMP_NEXT_v2SA" : "ISAKMP_NEXT_v2NONE/NOTIFY")); stf_status authstat = ikev2_send_auth(c, st, INITIATOR, np, idhash, &e_pbs_cipher); if (authstat != STF_OK) return authstat; /* * now, find an eligible child SA from the pending list, and emit * SA2i, TSi and TSr and (v2N_USE_TRANSPORT_MODE notification in transport mode) for it . */ if (c0) { chunk_t child_spi, notify_data; st->st_connection = c0; ikev2_emit_ipsec_sa(md, &e_pbs_cipher, ISAKMP_NEXT_v2TSi, c0, policy); st->st_ts_this = ikev2_end_to_ts(&c0->spd.this); st->st_ts_that = ikev2_end_to_ts(&c0->spd.that); ikev2_calc_emit_ts(md, &e_pbs_cipher, INITIATOR, c0, policy); if ( !(st->st_connection->policy & POLICY_TUNNEL) ) { DBG_log( "Initiator child policy is transport mode, sending v2N_USE_TRANSPORT_MODE"); memset(&child_spi, 0, sizeof(child_spi)); memset(&notify_data, 0, sizeof(notify_data)); ship_v2N(ISAKMP_NEXT_v2NONE, ISAKMP_PAYLOAD_NONCRITICAL, 0, &child_spi, v2N_USE_TRANSPORT_MODE, &notify_data, &e_pbs_cipher); } } else { libreswan_log( "no pending SAs found, PARENT SA keyed only"); } } /* * need to extend the packet so that we will know how big it is * since the length is under the integrity check */ ikev2_padup_pre_encrypt(md, &e_pbs_cipher); close_output_pbs(&e_pbs_cipher); { unsigned char *authloc = ikev2_authloc(md, &e_pbs); if (authloc == NULL) return STF_INTERNAL_ERROR; close_output_pbs(&e_pbs); close_output_pbs(&md->rbody); close_output_pbs(&reply_stream); ret = ikev2_encrypt_msg(md, INITIATOR, authstart, iv, encstart, authloc, &e_pbs, &e_pbs_cipher); if (ret != STF_OK) return ret; } /* keep it for a retransmit if necessary, but on initiator * we never do that, but send_ike_msg() uses it. */ freeanychunk(pst->st_tpacket); clonetochunk(pst->st_tpacket, reply_stream.start, pbs_offset(&reply_stream), "reply packet for ikev2_parent_outI1"); /* * Delete previous retransmission event. */ delete_event(st); event_schedule(EVENT_v2_RETRANSMIT, EVENT_RETRANSMIT_DELAY_0, st); return STF_OK; } /* * *************************************************************** * PARENT_inI2 ***** *************************************************************** * - * * */ static void ikev2_parent_inI2outR2_continue(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r, err_t ugh); static stf_status ikev2_parent_inI2outR2_tail( struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r); stf_status ikev2parent_inI2outR2(struct msg_digest *md) { struct state *st = md->st; /* struct connection *c = st->st_connection; */ /* * the initiator sent us an encrypted payload. We need to calculate * our g^xy, and skeyseed values, and then decrypt the payload. */ DBG(DBG_CONTROLMORE, DBG_log( "ikev2 parent inI2outR2: calculating g^{xy} in order to decrypt I2")); /* verify that there is in fact an encrypted payload */ if (!md->chain[ISAKMP_NEXT_v2E]) { libreswan_log("R2 state should receive an encrypted payload"); reset_globals(); return STF_FATAL; } /* now. we need to go calculate the g^xy */ { struct dh_continuation *dh = alloc_thing( struct dh_continuation, "ikev2_inI2outR2 KE"); stf_status e; dh->md = md; set_suspended(st, dh->md); pcrc_init(&dh->dh_pcrc); dh->dh_pcrc.pcrc_func = ikev2_parent_inI2outR2_continue; e = start_dh_v2(&dh->dh_pcrc, st, st->st_import, RESPONDER, st->st_oakley.groupnum); if (e != STF_SUSPEND && e != STF_INLINE) { loglog(RC_CRYPTOFAILED, "system too busy"); delete_state(st); } reset_globals(); return e; } } static void ikev2_parent_inI2outR2_continue(struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r, err_t ugh) { struct dh_continuation *dh = (struct dh_continuation *)pcrc; struct msg_digest *md = dh->md; struct state *const st = md->st; stf_status e; DBG(DBG_CONTROLMORE, DBG_log("ikev2 parent inI2outR2: calculating g^{xy}, sending R2")); if (st == NULL) { loglog(RC_LOG_SERIOUS, "%s: Request was disconnected from state", __FUNCTION__); if (dh->md) release_md(dh->md); return; } /* XXX should check out ugh */ passert(ugh == NULL); passert(cur_state == NULL); passert(st != NULL); passert(st->st_suspended_md == dh->md); set_suspended(st, NULL); /* no longer connected or suspended */ set_cur_state(st); st->st_calculating = FALSE; e = ikev2_parent_inI2outR2_tail(pcrc, r); if ( e > STF_FAIL) { /* we do not send a notify because we are the initiator that could be responding to an error notification */ int v2_notify_num = e - STF_FAIL; DBG_log( "ikev2_parent_inI2outR2_tail returned STF_FAIL with %s", enum_name(&ikev2_notify_names, v2_notify_num)); } else if ( e != STF_OK) { DBG_log("ikev2_parent_inI2outR2_tail returned %s", enum_name(&stfstatus_name, e)); } if (dh->md != NULL) { complete_v2_state_transition(&dh->md, e); if (dh->md) release_md(dh->md); } reset_globals(); passert(GLOBALS_ARE_RESET()); } static stf_status ikev2_parent_inI2outR2_tail( struct pluto_crypto_req_cont *pcrc, struct pluto_crypto_req *r) { struct dh_continuation *dh = (struct dh_continuation *)pcrc; struct msg_digest *md = dh->md; struct state *const st = md->st; struct connection *c = st->st_connection; unsigned char *idhash_in, *idhash_out; unsigned char *authstart; unsigned int np; int v2_notify_num = 0; /* extract calculated values from r */ finish_dh_v2(st, r); if (DBGP(DBG_PRIVATE) && DBGP(DBG_CRYPT)) ikev2_log_parentSA(st); /* decrypt things. */ { stf_status ret; ret = ikev2_decrypt_msg(md, RESPONDER); if (ret != STF_OK) return ret; } /*Once the message has been decrypted, then only we can check for auth payload*/ /*check the presense of auth payload now so that it does not crash in rehash_state if auth payload has not been received*/ if (!md->chain[ISAKMP_NEXT_v2AUTH]) { libreswan_log("no authentication payload found"); return STF_FAIL; } if (!ikev2_decode_peer_id(md, RESPONDER)) return STF_FAIL + v2N_AUTHENTICATION_FAILED; { struct hmac_ctx id_ctx; const pb_stream *id_pbs = &md->chain[ISAKMP_NEXT_v2IDi]->pbs; unsigned char *idstart = id_pbs->start + 4; unsigned int idlen = pbs_room(id_pbs) - 4; hmac_init_chunk(&id_ctx, st->st_oakley.prf_hasher, st->st_skey_pi); /* calculate hash of IDi for AUTH below */ DBG(DBG_CRYPT, DBG_dump_chunk("idhash verify pi", st->st_skey_pi)); DBG(DBG_CRYPT, DBG_dump("idhash verify I2", idstart, idlen)); hmac_update(&id_ctx, idstart, idlen); idhash_in = alloca(st->st_oakley.prf_hasher->hash_digest_len); hmac_final(idhash_in, &id_ctx); } /* process CERT payload */ { if (md->chain[ISAKMP_NEXT_v2CERT]) { /* should we check if we should accept a cert payload ? * has_preloaded_public_key(st) */ DBG(DBG_CONTROLMORE, DBG_log( "has a v2_CERT payload going to process it ")); ikev2_decode_cert(md); } } /* process CERTREQ payload */ if (md->chain[ISAKMP_NEXT_v2CERTREQ]) { DBG(DBG_CONTROLMORE, DBG_log("has a v2CERTREQ payload going to decode it")); ikev2_decode_cr(md, &st->st_connection->requested_ca); } /* process AUTH payload now */ /* now check signature from RSA key */ switch (md->chain[ISAKMP_NEXT_v2AUTH]->payload.v2a.isaa_type) { case v2_AUTH_RSA: { stf_status authstat = ikev2_verify_rsa_sha1(st, RESPONDER, idhash_in, NULL, /* keys from DNS */ NULL, /* gateways from DNS */ &md->chain[ ISAKMP_NEXT_v2AUTH]->pbs); if (authstat != STF_OK) { libreswan_log("RSA authentication failed"); SEND_NOTIFICATION(v2N_AUTHENTICATION_FAILED); return STF_FATAL; } break; } case v2_AUTH_SHARED: { stf_status authstat = ikev2_verify_psk_auth(st, RESPONDER, idhash_in, &md->chain[ ISAKMP_NEXT_v2AUTH]->pbs); if (authstat != STF_OK) { libreswan_log( "PSK authentication failed AUTH mismatch!"); SEND_NOTIFICATION(v2N_AUTHENTICATION_FAILED); return STF_FATAL; } break; } default: libreswan_log("authentication method: %s not supported", enum_name(&ikev2_auth_names, md->chain[ISAKMP_NEXT_v2AUTH]->payload. v2a.isaa_type)); return STF_FATAL; } /* Is there a notify about an error ? */ if (md->chain[ISAKMP_NEXT_v2N] != NULL) { DBG(DBG_CONTROL, DBG_log( " notify payload detected, should be processed....")); } /* good. now create child state */ /* note: as we will switch to child state, we force the parent to the * new state now */ change_state(st, STATE_PARENT_R2); c->newest_isakmp_sa = st->st_serialno; delete_event(st); event_schedule(EVENT_SA_REPLACE, c->sa_ike_life_seconds, st); authstart = reply_stream.cur; /* send response */ { unsigned char *encstart; unsigned char *iv; unsigned int ivsize; struct ikev2_generic e; pb_stream e_pbs, e_pbs_cipher; stf_status ret; bool send_cert = FALSE; /* make sure HDR is at start of a clean buffer */ zero(reply_buffer); init_pbs(&reply_stream, reply_buffer, sizeof(reply_buffer), "reply packet"); /* HDR out */ { struct isakmp_hdr r_hdr = md->hdr; r_hdr.isa_np = ISAKMP_NEXT_v2E; r_hdr.isa_xchg = ISAKMP_v2_AUTH; r_hdr.isa_flags = ISAKMP_FLAGS_R; memcpy(r_hdr.isa_icookie, st->st_icookie, COOKIE_SIZE); memcpy(r_hdr.isa_rcookie, st->st_rcookie, COOKIE_SIZE); if (!out_struct(&r_hdr, &isakmp_hdr_desc, &reply_stream, &md->rbody)) return STF_INTERNAL_ERROR; } /* insert an Encryption payload header */ e.isag_np = ISAKMP_NEXT_v2IDr; e.isag_critical = ISAKMP_PAYLOAD_NONCRITICAL; if (!out_struct(&e, &ikev2_e_desc, &md->rbody, &e_pbs)) return STF_INTERNAL_ERROR; /* insert IV */ iv = e_pbs.cur; ivsize = st->st_oakley.encrypter->iv_size; if (!out_zero(ivsize, &e_pbs, "iv")) return STF_INTERNAL_ERROR; get_rnd_bytes(iv, ivsize); /* note where cleartext starts */ init_pbs(&e_pbs_cipher, e_pbs.cur, e_pbs.roof - e_pbs.cur, "cleartext"); e_pbs_cipher.container = &e_pbs; e_pbs_cipher.desc = NULL; e_pbs_cipher.cur = e_pbs.cur; encstart = e_pbs_cipher.cur; /* decide to send CERT payload before we generate IDr */ send_cert = doi_send_ikev2_cert_thinking(st); /* send out the IDr payload */ { struct ikev2_id r_id; pb_stream r_id_pbs; chunk_t id_b; struct hmac_ctx id_ctx; unsigned char *id_start; unsigned int id_len; hmac_init_chunk(&id_ctx, st->st_oakley.prf_hasher, st->st_skey_pr); build_id_payload((struct isakmp_ipsec_id *)&r_id, &id_b, &c->spd.this); r_id.isai_critical = ISAKMP_PAYLOAD_NONCRITICAL; if (send_cert) r_id.isai_np = ISAKMP_NEXT_v2CERT; else r_id.isai_np = ISAKMP_NEXT_v2AUTH; id_start = e_pbs_cipher.cur; if (!out_struct(&r_id, &ikev2_id_desc, &e_pbs_cipher, &r_id_pbs) || !out_chunk(id_b, &r_id_pbs, "my identity")) return STF_INTERNAL_ERROR; close_output_pbs(&r_id_pbs); id_start += 4; /* calculate hash of IDi for AUTH below */ id_len = e_pbs_cipher.cur - id_start; DBG(DBG_CRYPT, DBG_dump_chunk("idhash calc pr", st->st_skey_pr)); DBG(DBG_CRYPT, DBG_dump("idhash calc R2", id_start, id_len)); hmac_update(&id_ctx, id_start, id_len); idhash_out = alloca( st->st_oakley.prf_hasher->hash_digest_len); hmac_final(idhash_out, &id_ctx); } DBG(DBG_CONTROLMORE, DBG_log("assembled IDr payload -- CERT next")); /* send CERT payload RFC 4306 3.6, 1.2:([CERT,] ) */ if (send_cert) { stf_status certstat = ikev2_send_cert(st, md, RESPONDER, ISAKMP_NEXT_v2AUTH, &e_pbs_cipher); if (certstat != STF_OK) return certstat; } /* authentication good, see if there is a child SA being proposed */ if (md->chain[ISAKMP_NEXT_v2SA] == NULL || md->chain[ISAKMP_NEXT_v2TSi] == NULL || md->chain[ISAKMP_NEXT_v2TSr] == NULL) { /* initiator didn't propose anything. Weird. Try unpending out end. */ /* UNPEND XXX */ libreswan_log("No CHILD SA proposals received."); np = ISAKMP_NEXT_v2NONE; } else { DBG_log("CHILD SA proposals received"); libreswan_log( "PAUL: this is where we have to check the TSi/TSr"); np = ISAKMP_NEXT_v2SA; } DBG(DBG_CONTROLMORE, DBG_log("going to assemble AUTH payload")); /* now send AUTH payload */ { stf_status authstat = ikev2_send_auth(c, st, RESPONDER, np, idhash_out, &e_pbs_cipher); if (authstat != STF_OK) return authstat; } if (np == ISAKMP_NEXT_v2SA) { /* must have enough to build an CHILD_SA */ ret = ikev2_child_sa_respond(md, RESPONDER, &e_pbs_cipher); if (ret > STF_FAIL) { v2_notify_num = ret - STF_FAIL; DBG(DBG_CONTROL, DBG_log( "ikev2_child_sa_respond returned STF_FAIL with %s", enum_name(&ikev2_notify_names, v2_notify_num))); np = ISAKMP_NEXT_v2NONE; } else if (ret != STF_OK) { DBG_log("ikev2_child_sa_respond returned %s", enum_name( &stfstatus_name, ret)); np = ISAKMP_NEXT_v2NONE; } } ikev2_padup_pre_encrypt(md, &e_pbs_cipher); close_output_pbs(&e_pbs_cipher); { unsigned char *authloc = ikev2_authloc(md, &e_pbs); if (authloc == NULL) return STF_INTERNAL_ERROR; close_output_pbs(&e_pbs); close_output_pbs(&md->rbody); close_output_pbs(&reply_stream); ret = ikev2_encrypt_msg(md, RESPONDER, authstart, iv, encstart, authloc, &e_pbs, &e_pbs_cipher); if (ret != STF_OK) return ret; } } /* keep it for a retransmit if necessary */ freeanychunk(st->st_tpacket); clonetochunk(st->st_tpacket, reply_stream.start, pbs_offset(&reply_stream), "reply packet for ikev2_parent_inI2outR2_tail"); /* note: retransimission is driven by initiator */ /* if the child failed, delete its state here - we sent the packet */ /* PAUL */ return STF_OK; } /* * *************************************************************** * PARENT_inR2 (I3 state) ***** *************************************************************** * - there are no cryptographic continuations, but be certain * that there will have to be DNS continuations, but they * just aren't implemented yet. * */ stf_status ikev2parent_inR2(struct msg_digest *md) { struct state *st = md->st; struct connection *c = st->st_connection; unsigned char *idhash_in; struct state *pst = st; if (st->st_clonedfrom != 0) pst = state_with_serialno(st->st_clonedfrom); /* * the initiator sent us an encrypted payload. We need to calculate * our g^xy, and skeyseed values, and then decrypt the payload. */ DBG(DBG_CONTROLMORE, DBG_log( "ikev2 parent inR2: calculating g^{xy} in order to decrypt I2")); /* verify that there is in fact an encrypted payload */ if (!md->chain[ISAKMP_NEXT_v2E]) { libreswan_log("R2 state should receive an encrypted payload"); return STF_FATAL; } /* decrypt things. */ { stf_status ret; ret = ikev2_decrypt_msg(md, INITIATOR); if (ret != STF_OK) return ret; } if (!ikev2_decode_peer_id(md, INITIATOR)) return STF_FAIL + v2N_AUTHENTICATION_FAILED; { struct hmac_ctx id_ctx; const pb_stream *id_pbs = &md->chain[ISAKMP_NEXT_v2IDr]->pbs; unsigned char *idstart = id_pbs->start + 4; unsigned int idlen = pbs_room(id_pbs) - 4; hmac_init_chunk(&id_ctx, pst->st_oakley.prf_hasher, pst->st_skey_pr); /* calculate hash of IDr for AUTH below */ DBG(DBG_CRYPT, DBG_dump_chunk("idhash verify pr", pst->st_skey_pr)); DBG(DBG_CRYPT, DBG_dump("idhash auth R2", idstart, idlen)); hmac_update(&id_ctx, idstart, idlen); idhash_in = alloca(pst->st_oakley.prf_hasher->hash_digest_len); hmac_final(idhash_in, &id_ctx); } if (md->chain[ISAKMP_NEXT_v2CERT]) { /* should we check if we should accept a cert payload ? * has_preloaded_public_key(st) */ /* in v1 code it is decode_cert(struct msg_digest *md) */ DBG(DBG_CONTROLMORE, DBG_log("has a v2_CERT payload going to decode it")); ikev2_decode_cert(md); } /* process AUTH payload */ if (!md->chain[ISAKMP_NEXT_v2AUTH]) { libreswan_log("no authentication payload found"); return STF_FAIL; } /* now check signature from RSA key */ switch (md->chain[ISAKMP_NEXT_v2AUTH]->payload.v2a.isaa_type) { case v2_AUTH_RSA: { stf_status authstat = ikev2_verify_rsa_sha1(pst, INITIATOR, idhash_in, NULL, /* keys from DNS */ NULL, /* gateways from DNS */ &md->chain[ ISAKMP_NEXT_v2AUTH]->pbs); if (authstat != STF_OK) { libreswan_log("authentication failed"); SEND_NOTIFICATION(v2N_AUTHENTICATION_FAILED); return STF_FAIL; } break; } case v2_AUTH_SHARED: { stf_status authstat = ikev2_verify_psk_auth(pst, INITIATOR, idhash_in, &md->chain[ ISAKMP_NEXT_v2AUTH]->pbs); if (authstat != STF_OK) { libreswan_log("PSK authentication failed"); SEND_NOTIFICATION(v2N_AUTHENTICATION_FAILED); return STF_FAIL; } break; } default: libreswan_log("authentication method: %s not supported", enum_name(&ikev2_auth_names, md->chain[ISAKMP_NEXT_v2AUTH]->payload. v2a.isaa_type)); return STF_FAIL; } /* * update the parent state to make sure that it knows we have * authenticated properly. */ change_state(pst, STATE_PARENT_I3); c->newest_isakmp_sa = pst->st_serialno; /* authentication good, see if there is a child SA available */ if (md->chain[ISAKMP_NEXT_v2SA] == NULL || md->chain[ISAKMP_NEXT_v2TSi] == NULL || md->chain[ISAKMP_NEXT_v2TSr] == NULL) { /* not really anything to here... but it would be worth unpending again */ DBG(DBG_CONTROLMORE, DBG_log( "no v2SA, v2TSi or v2TSr received, not attempting to setup child SA")); DBG(DBG_CONTROLMORE, DBG_log(" Should we check for some notify?")); /* * Delete previous retransmission event. */ delete_event(st); return STF_OK; } { int bestfit_n, bestfit_p, bestfit_pr; unsigned int best_tsi_i, best_tsr_i; bestfit_n = -1; bestfit_p = -1; bestfit_pr = -1; /* Check TSi/TSr http://tools.ietf.org/html/rfc5996#section-2.9 */ DBG(DBG_CONTROLMORE, DBG_log(" check narrowing - we are responding to I2")); struct payload_digest *const tsi_pd = md->chain[ISAKMP_NEXT_v2TSi]; struct payload_digest *const tsr_pd = md->chain[ISAKMP_NEXT_v2TSr]; struct traffic_selector tsi[16], tsr[16]; #if 0 bool instantiate = FALSE; ip_subnet tsi_subnet, tsr_subnet; const char *oops; #endif unsigned int tsi_n, tsr_n; tsi_n = ikev2_parse_ts(tsi_pd, tsi, 16); tsr_n = ikev2_parse_ts(tsr_pd, tsr, 16); DBG_log( "Checking TSi(%d)/TSr(%d) selectors, looking for exact match", tsi_n, tsr_n); { struct spd_route *sra; sra = &c->spd; int bfit_n = ikev2_evaluate_connection_fit(c, sra, INITIATOR, tsi, tsr, tsi_n, tsr_n); if (bfit_n > bestfit_n) { DBG(DBG_CONTROLMORE, DBG_log( "bfit_n=ikev2_evaluate_connection_fit found better fit c %s", c->name)); int bfit_p = ikev2_evaluate_connection_port_fit(c, sra, INITIATOR, tsi, tsr, tsi_n, tsr_n, &best_tsi_i, &best_tsr_i); if (bfit_p > bestfit_p) { DBG(DBG_CONTROLMORE, DBG_log( "ikev2_evaluate_connection_port_fit found better fit c %s, tsi[%d],tsr[%d]", c->name, best_tsi_i, best_tsr_i)); int bfit_pr = ikev2_evaluate_connection_protocol_fit( c, sra, INITIATOR, tsi, tsr, tsi_n, tsr_n, &best_tsi_i, &best_tsr_i); if (bfit_pr > bestfit_pr ) { DBG(DBG_CONTROLMORE, DBG_log( "ikev2_evaluate_connection_protocol_fit found better fit c %s, tsi[%d],tsr[%d]", c ->name, best_tsi_i, best_tsr_i)); bestfit_p = bfit_p; bestfit_n = bfit_n; } else { DBG(DBG_CONTROLMORE, DBG_log( "protocol range fit c %s c->name was rejected by protocol matching", c ->name)); } } } else { DBG(DBG_CONTROLMORE, DBG_log( "prefix range fit c %s c->name was rejected by port matching", c->name)); } } if ( ( bestfit_n > 0 ) && (bestfit_p > 0)) { DBG(DBG_CONTROLMORE, DBG_log( ( "found an acceptable TSi/TSr Traffic Selector"))); memcpy(&st->st_ts_this, &tsi[best_tsi_i], sizeof(struct traffic_selector)); memcpy(&st->st_ts_that, &tsr[best_tsr_i], sizeof(struct traffic_selector)); ikev2_print_ts(&st->st_ts_this); ikev2_print_ts(&st->st_ts_that); ip_subnet tmp_subnet_i; ip_subnet tmp_subnet_r; rangetosubnet(&st->st_ts_this.low, &st->st_ts_this.high, &tmp_subnet_i); rangetosubnet(&st->st_ts_that.low, &st->st_ts_that.high, &tmp_subnet_r); c->spd.this.client = tmp_subnet_i; c->spd.this.port = st->st_ts_this.startport; c->spd.this.protocol = st->st_ts_this.ipprotoid; setportof(htons( c->spd.this.port), &c->spd.this.host_addr); setportof(htons( c->spd.this.port), &c->spd.this.client.addr); if ( subnetishost(&c->spd.this.client) && addrinsubnet(&c->spd.this.host_addr, &c->spd.this.client)) c->spd.this.has_client = FALSE; else c->spd.this.has_client = TRUE; c->spd.that.client = tmp_subnet_r; c->spd.that.port = st->st_ts_that.startport; c->spd.that.protocol = st->st_ts_that.ipprotoid; setportof(htons( c->spd.that.port), &c->spd.that.host_addr); setportof(htons( c->spd.that.port), &c->spd.that.client.addr); if ( subnetishost(&c->spd.that.client) && addrinsubnet(&c->spd.that.host_addr, &c->spd.that.client)) c->spd.that.has_client = FALSE; else c->spd.that.has_client = TRUE; /* AAAA */ } else { DBG(DBG_CONTROLMORE, DBG_log(( "reject responder TSi/TSr Traffic Selector"))); /* prevents parent from going to I3 */ return STF_FAIL + v2N_TS_UNACCEPTABLE; } } /* end of TS check block */ { v2_notification_t rn; struct payload_digest *const sa_pd = md->chain[ISAKMP_NEXT_v2SA]; rn = ikev2_parse_child_sa_body(&sa_pd->pbs, &sa_pd->payload.v2sa, NULL, st, FALSE); if (rn != v2N_NOTHING_WRONG) return STF_FAIL + rn; } { struct payload_digest *p; for (p = md->chain[ISAKMP_NEXT_v2N]; p != NULL; p = p->next) { /* RFC 5996 */ /*Types in the range 0 - 16383 are intended for reporting errors. An * implementation receiving a Notify payload with one of these types * that it does not recognize in a response MUST assume that the * corresponding request has failed entirely. Unrecognized error types * in a request and status types in a request or response MUST be * ignored, and they should be logged.*/ if (enum_name(&ikev2_notify_names, p->payload.v2n.isan_type) == NULL) { if (p->payload.v2n.isan_type < v2N_INITIAL_CONTACT) return STF_FAIL + p->payload.v2n.isan_type; } if ( p->payload.v2n.isan_type == v2N_USE_TRANSPORT_MODE ) { if ( st->st_connection->policy & POLICY_TUNNEL) { /*This means we did not send v2N_USE_TRANSPORT, however responder is sending it in now (inR2), seems incorrect*/ DBG(DBG_CONTROLMORE, DBG_log( "Initiator policy is tunnel, responder sends v2N_USE_TRANSPORT_MODE notification in inR2, ignoring it")); } else { DBG(DBG_CONTROLMORE, DBG_log( "Initiator policy is transport, responder sends v2N_USE_TRANSPORT_MODE, setting CHILD SA to transport mode")); if (st->st_esp.present == TRUE) { /*libreswan supports only "esp" with ikev2 it seems, look at ikev2_parse_child_sa_body handling*/ st->st_esp.attrs.encapsulation = ENCAPSULATION_MODE_TRANSPORT; } } } } /* for */ } /* notification block */ ikev2_derive_child_keys(st, md->role); c->newest_ipsec_sa = st->st_serialno; /* now install child SAs */ if (!install_ipsec_sa(st, TRUE)) return STF_FATAL; /* * Delete previous retransmission event. */ delete_event(st); return STF_OK; } /* * Cookie = <VersionIDofSecret> | Hash(Ni | IPi | SPIi | <secret>) * where <secret> is a randomly generated secret known only to the * in LSW implementation <VersionIDofSecret> is not used. */ static bool ikev2_get_dcookie(u_char *dcookie, chunk_t st_ni, ip_address *addr, u_int8_t *spiI) { size_t addr_length; SHA1_CTX ctx_sha1; unsigned char addr_buff[ sizeof(union { struct in_addr A; struct in6_addr B; })]; addr_length = addrbytesof(addr, addr_buff, sizeof(addr_buff)); SHA1Init(&ctx_sha1); SHA1Update(&ctx_sha1, st_ni.ptr, st_ni.len); SHA1Update(&ctx_sha1, addr_buff, addr_length); SHA1Update(&ctx_sha1, spiI, sizeof(*spiI)); SHA1Update(&ctx_sha1, ikev2_secret_of_the_day, SHA1_DIGEST_SIZE); SHA1Final(dcookie, &ctx_sha1); DBG(DBG_PRIVATE, DBG_log("ikev2 secret_of_the_day used %s, length %d", ikev2_secret_of_the_day, SHA1_DIGEST_SIZE); ); DBG(DBG_CRYPT, DBG_dump("computed dcookie: HASH(Ni | IPi | SPIi | <secret>)", dcookie, SHA1_DIGEST_SIZE)); #if 0 ikev2_secrets_recycle++; if (ikev2_secrets_recycle >= 32768) { /* handed out too many cookies, cycle secrets */ ikev2_secrets_recycle = 0; /* can we call init_secrets() without adding an EVENT? */ init_secrets(); } #endif return TRUE; } /* * *************************************************************** * NOTIFICATION_OUT Complete packet ***** *************************************************************** * */ void send_v2_notification(struct state *p1st, u_int16_t type, struct state *encst, u_char *icookie, u_char *rcookie, chunk_t *n_data) { u_char buffer[1024]; pb_stream reply; pb_stream rbody; chunk_t child_spi, notify_data; /* this function is not generic enough yet just enough for 6msg * TBD accept HDR FLAGS as arg. default ISAKMP_FLAGS_R * TBD when there is a child SA use that SPI in the notify paylod. * TBD support encrypted notifications payloads. * TBD accept Critical bit as an argument. default is set. * TBD accept exchange type as an arg, default is ISAKMP_v2_SA_INIT * do we need to send a notify with empty data? * do we need to support more Protocol ID? more than PROTO_ISAKMP */ libreswan_log("sending %s notification %s to %s:%u", encst ? "encrypted " : "", enum_name(&ikev2_notify_names, type), ip_str(&p1st->st_remoteaddr), p1st->st_remoteport); #if 0 /* Empty notification data section should be fine? */ if (n_data == NULL) { DBG(DBG_CONTROLMORE, DBG_log("don't send packet when notification data empty")); return; } #endif memset(buffer, 0, sizeof(buffer)); init_pbs(&reply, buffer, sizeof(buffer), "notification msg"); /* HDR out */ { struct isakmp_hdr n_hdr; zero(&n_hdr); /* default to 0 */ /* AAA should we copy from MD? */ /* Impair function will raise major/minor by 1 for testing */ n_hdr.isa_version = build_ike_version(); memcpy(n_hdr.isa_rcookie, rcookie, COOKIE_SIZE); memcpy(n_hdr.isa_icookie, icookie, COOKIE_SIZE); n_hdr.isa_xchg = ISAKMP_v2_SA_INIT; n_hdr.isa_np = ISAKMP_NEXT_v2N; n_hdr.isa_flags &= ~ISAKMP_FLAGS_I; n_hdr.isa_flags |= ISAKMP_FLAGS_R; #warning check msgid code here /* PAUL: shouldn't we set n_hdr.isa_msgid = [htonl](p1st->st_msgid); */ if (!out_struct(&n_hdr, &isakmp_hdr_desc, &reply, &rbody)) { libreswan_log( "error initializing hdr for notify message"); return; } } child_spi.ptr = NULL; child_spi.len = 0; /* build and add v2N payload to the packet */ memset(&child_spi, 0, sizeof(child_spi)); memset(&notify_data, 0, sizeof(notify_data)); ship_v2N(ISAKMP_NEXT_v2NONE, DBGP( IMPAIR_SEND_BOGUS_ISAKMP_FLAG) ? (ISAKMP_PAYLOAD_NONCRITICAL | ISAKMP_PAYLOAD_LIBRESWAN_BOGUS) : ISAKMP_PAYLOAD_NONCRITICAL, PROTO_ISAKMP, &child_spi, type, n_data, &rbody); close_message(&rbody, p1st); close_output_pbs(&reply); clonetochunk(p1st->st_tpacket, reply.start, pbs_offset(&reply), "notification packet"); send_ike_msg(p1st, __FUNCTION__); } /* add notify payload to the rbody */ bool ship_v2N(unsigned int np, u_int8_t critical, u_int8_t protoid, chunk_t *spi, u_int16_t type, chunk_t *n_data, pb_stream *rbody) { struct ikev2_notify n; pb_stream n_pbs; DBG(DBG_CONTROLMORE, DBG_log("Adding a v2N Payload")); n.isan_np = np; n.isan_critical = critical; if (DBGP(IMPAIR_SEND_BOGUS_ISAKMP_FLAG)) { libreswan_log( " setting bogus ISAKMP_PAYLOAD_LIBRESWAN_BOGUS flag in ISAKMP payload"); n.isan_critical |= ISAKMP_PAYLOAD_LIBRESWAN_BOGUS; } n.isan_protoid = protoid; n.isan_spisize = spi->len; n.isan_type = type; if (!out_struct(&n, &ikev2_notify_desc, rbody, &n_pbs)) { libreswan_log( "error initializing notify payload for notify message"); return FALSE; } if (spi->len > 0) { if (!out_raw(spi->ptr, spi->len, &n_pbs, "SPI ")) { libreswan_log("error writing SPI to notify payload"); return FALSE; } } if (n_data != NULL) { if (!out_raw(n_data->ptr, n_data->len, &n_pbs, "Notify data")) { libreswan_log( "error writing notify payload for notify message"); return FALSE; } } close_output_pbs(&n_pbs); return TRUE; } /* * *************************************************************** * INFORMATIONAL ***** *************************************************************** * - * * */ stf_status process_informational_ikev2(struct msg_digest *md) { /* verify that there is in fact an encrypted payload */ if (!md->chain[ISAKMP_NEXT_v2E]) { libreswan_log( "Ignoring informational exchange outside encrypted payload (rfc5996 section 1.4)"); return STF_IGNORE; } /* decrypt things. */ { stf_status ret; if (md->hdr.isa_flags & ISAKMP_FLAGS_I) { DBG(DBG_CONTROLMORE, DBG_log( "received informational exchange request from INITIATOR")); ret = ikev2_decrypt_msg(md, RESPONDER); } else { DBG(DBG_CONTROLMORE, DBG_log( "received informational exchange request from RESPONDER")); ret = ikev2_decrypt_msg(md, INITIATOR); } if (ret != STF_OK) return ret; } { struct payload_digest *p; struct ikev2_delete *v2del = NULL; stf_status ret; struct state *const st = md->st; /* Only send response if it is request*/ if (!(md->hdr.isa_flags & ISAKMP_FLAGS_R)) { unsigned char *authstart; pb_stream e_pbs, e_pbs_cipher; struct ikev2_generic e; unsigned char *iv; int ivsize; unsigned char *encstart; /* beginning of data going out */ authstart = reply_stream.cur; /* make sure HDR is at start of a clean buffer */ zero(reply_buffer); init_pbs(&reply_stream, reply_buffer, sizeof(reply_buffer), "information exchange reply packet"); DBG(DBG_CONTROLMORE | DBG_DPD, DBG_log("Received an INFORMATIONAL request, " "updating liveness, no longer pending")); st->st_last_liveness = now(); st->st_pend_liveness = FALSE; /* HDR out */ { struct isakmp_hdr r_hdr; zero(&r_hdr); /* default to 0 */ /* AAA should we copy from MD? */ r_hdr.isa_version = build_ike_version(); memcpy(r_hdr.isa_rcookie, st->st_rcookie, COOKIE_SIZE); memcpy(r_hdr.isa_icookie, st->st_icookie, COOKIE_SIZE); r_hdr.isa_xchg = ISAKMP_v2_INFORMATIONAL; r_hdr.isa_np = ISAKMP_NEXT_v2E; r_hdr.isa_msgid = htonl(md->msgid_received); /*set initiator bit if we are initiator*/ if (md->role == INITIATOR) r_hdr.isa_flags |= ISAKMP_FLAGS_I; r_hdr.isa_flags |= ISAKMP_FLAGS_R; if (!out_struct(&r_hdr, &isakmp_hdr_desc, &reply_stream, &md->rbody)) { libreswan_log( "error initializing hdr for informational message"); return STF_INTERNAL_ERROR; } } /*HDR Done*/ /* insert an Encryption payload header */ if (md->chain[ISAKMP_NEXT_v2D]) { bool ikesa_flag = FALSE; /* Search if there is a IKE SA delete payload*/ for (p = md->chain[ISAKMP_NEXT_v2D]; p != NULL; p = p->next) { if (p->payload.v2delete.isad_protoid == PROTO_ISAKMP) { e.isag_np = ISAKMP_NEXT_v2NONE; ikesa_flag = TRUE; break; } } /* if there is no IKE SA DELETE PAYLOAD*/ /* That means, there are AH OR ESP*/ if (!ikesa_flag) e.isag_np = ISAKMP_NEXT_v2D; } else { e.isag_np = ISAKMP_NEXT_v2NONE; } e.isag_critical = ISAKMP_PAYLOAD_NONCRITICAL; if (!out_struct(&e, &ikev2_e_desc, &md->rbody, &e_pbs)) return STF_INTERNAL_ERROR; /* insert IV */ iv = e_pbs.cur; ivsize = st->st_oakley.encrypter->iv_size; if (!out_zero(ivsize, &e_pbs, "iv")) return STF_INTERNAL_ERROR; get_rnd_bytes(iv, ivsize); /* note where cleartext starts */ init_pbs(&e_pbs_cipher, e_pbs.cur, e_pbs.roof - e_pbs.cur, "cleartext"); e_pbs_cipher.container = &e_pbs; e_pbs_cipher.desc = NULL; e_pbs_cipher.cur = e_pbs.cur; encstart = e_pbs_cipher.cur; if (md->chain[ISAKMP_NEXT_v2D]) { for (p = md->chain[ISAKMP_NEXT_v2D]; p != NULL; p = p->next) { v2del = &p->payload.v2delete; switch (v2del->isad_protoid) { case PROTO_ISAKMP: /* My understanding is that delete payload for IKE SA * should be the only payload in the informational exchange */ break; case PROTO_IPSEC_AH: case PROTO_IPSEC_ESP: { char spi_buf[1024]; pb_stream del_pbs; struct ikev2_delete v2del_tmp; u_int16_t i, j = 0; u_char *spi; for (i = 0; i < v2del->isad_nrspi; i++ ) { spi = p->pbs.cur + (i * v2del-> isad_spisize); DBG(DBG_CONTROLMORE, DBG_log( "received delete request for %s SA(0x%08lx)", enum_show( & protocol_names, v2del -> isad_protoid), ( unsigned long) ntohl(( unsigned long) *( ipsec_spi_t *) spi))); struct state *dst = find_state_ikev2_child_to_delete( st->st_icookie, st->st_rcookie, v2del->isad_protoid, *( ipsec_spi_t *)spi); if (dst != NULL) { struct ipsec_proto_info *pr = v2del-> isad_protoid == PROTO_IPSEC_AH ? &dst ->st_ah : &dst -> st_esp; DBG( DBG_CONTROLMORE, DBG_log( "our side spi that needs to be sent: %s SA(0x%08lx)", enum_show( & protocol_names, v2del -> isad_protoid), ( unsigned long) ntohl( pr -> our_spi))); memcpy( spi_buf + (j * v2del -> isad_spisize), (u_char *)&pr->our_spi, v2del->isad_spisize); j++; } else { DBG( DBG_CONTROLMORE, DBG_log( "received delete request for %s SA(0x%08lx) but local state is not found", enum_show( & protocol_names, v2del -> isad_protoid), ( unsigned long) ntohl(( unsigned long) *( ipsec_spi_t *) spi))); } } if ( !j ) { DBG(DBG_CONTROLMORE, DBG_log( "This delete payload does not contain a single spi that has any local state, ignoring")); return STF_IGNORE; } else { DBG(DBG_CONTROLMORE, DBG_log( "No. of SPIs to be sent %d", j); DBG_dump( " Emit SPIs", spi_buf, j * v2del-> isad_spisize)); } zero(&v2del_tmp); if (p->next != NULL) v2del_tmp.isad_np = ISAKMP_NEXT_v2D; else v2del_tmp.isad_np = ISAKMP_NEXT_v2NONE; v2del_tmp.isad_protoid = v2del->isad_protoid; v2del_tmp.isad_spisize = v2del->isad_spisize; v2del_tmp.isad_nrspi = j; /* Emit delete payload header out*/ if (!out_struct(&v2del_tmp, & ikev2_delete_desc, &e_pbs_cipher, &del_pbs)) { libreswan_log( "error initializing hdr for delete payload"); return STF_INTERNAL_ERROR; } /* Emit values of spi to be sent to the peer*/ if (!out_raw(spi_buf, j * v2del-> isad_spisize, &del_pbs, "local spis")) { libreswan_log( "error sending spi values in delete payload"); return STF_INTERNAL_ERROR; } close_output_pbs(&del_pbs); } break; default: /*Unrecongnized protocol */ return STF_IGNORE; } /* this will break from for loop*/ if (v2del->isad_protoid == PROTO_ISAKMP) break; } } /*If there are no payloads or in other words empty payload in request * that means it is check for liveliness, so send an empty payload message * this will end up sending an empty payload */ ikev2_padup_pre_encrypt(md, &e_pbs_cipher); close_output_pbs(&e_pbs_cipher); { unsigned char *authloc = ikev2_authloc(md, &e_pbs); if (authloc == NULL) return STF_INTERNAL_ERROR; close_output_pbs(&e_pbs); close_output_pbs(&md->rbody); close_output_pbs(&reply_stream); ret = ikev2_encrypt_msg(md, md->role, authstart, iv, encstart, authloc, &e_pbs, &e_pbs_cipher); if (ret != STF_OK) return ret; } /* keep it for a retransmit if necessary */ freeanychunk(st->st_tpacket); clonetochunk(st->st_tpacket, reply_stream.start, pbs_offset( &reply_stream), "reply packet for informational exchange"); send_ike_msg(st, __FUNCTION__); } /* Now carry out the actualy task, we can not carry the actual task since * we need to send informational responde using existig SAs */ { if (md->chain[ISAKMP_NEXT_v2D] && st->st_state != STATE_IKESA_DEL) { for (p = md->chain[ISAKMP_NEXT_v2D]; p != NULL; p = p->next) { v2del = &p->payload.v2delete; switch (v2del->isad_protoid) { case PROTO_ISAKMP: { /* My understanding is that delete payload for IKE SA * should be the only payload in the informational * Now delete the IKE SA state and all its child states */ struct state *current_st = st; struct state *next_st = NULL; struct state *first_st = NULL; /* Find the first state in the hash chain*/ while (current_st != (struct state *) NULL) { first_st = current_st; current_st = first_st-> st_hashchain_prev; } current_st = first_st; while (current_st != (struct state *) NULL) { next_st = current_st-> st_hashchain_next; if (current_st-> st_clonedfrom != 0 ) { change_state( current_st, STATE_CHILDSA_DEL); } else { change_state( current_st, STATE_IKESA_DEL); } delete_state(current_st); current_st = next_st; } } break; case PROTO_IPSEC_AH: case PROTO_IPSEC_ESP: { /* pb_stream del_pbs; */ struct ikev2_delete; u_int16_t i; u_char *spi; for (i = 0; i < v2del->isad_nrspi; i++ ) { spi = p->pbs.cur + (i * v2del-> isad_spisize); DBG(DBG_CONTROLMORE, DBG_log( "Now doing actual deletion for request: %s SA(0x%08lx)", enum_show( & protocol_names, v2del -> isad_protoid), ( unsigned long) ntohl(( unsigned long) *( ipsec_spi_t *) spi))); struct state *dst = find_state_ikev2_child_to_delete( st->st_icookie, st->st_rcookie, v2del->isad_protoid, *( ipsec_spi_t *)spi); if (dst != NULL) { struct ipsec_proto_info *pr = v2del-> isad_protoid == PROTO_IPSEC_AH ? &dst ->st_ah : &dst -> st_esp; DBG( DBG_CONTROLMORE, DBG_log( "our side spi that needs to be deleted: %s SA(0x%08lx)", enum_show( & protocol_names, v2del -> isad_protoid), ( unsigned long) ntohl( pr -> our_spi))); /* now delete the state*/ change_state( dst, STATE_CHILDSA_DEL); delete_state( dst); } else { DBG( DBG_CONTROLMORE, DBG_log( "received delete request for %s SA(0x%08lx) but local state is not found", enum_show( & protocol_names, v2del -> isad_protoid), ( unsigned long) ntohl(( unsigned long) *( ipsec_spi_t *) spi))); } } } break; default: /*Unrecongnized protocol */ return STF_IGNORE; } /* this will break from for loop*/ if (v2del->isad_protoid == PROTO_ISAKMP) break; } /* for */ } /* if*/ else { /* empty response to our IKESA delete request*/ if ((md->hdr.isa_flags & ISAKMP_FLAGS_R) && st->st_state == STATE_IKESA_DEL) { /* My understanding is that delete payload for IKE SA * should be the only payload in the informational * Now delete the IKE SA state and all its child states */ struct state *current_st = st; struct state *next_st = NULL; struct state *first_st = NULL; /* Find the first state in the hash chain*/ while (current_st != (struct state *) NULL) { first_st = current_st; current_st = first_st-> st_hashchain_prev; } current_st = first_st; while (current_st != (struct state *) NULL) { next_st = current_st-> st_hashchain_next; if (current_st->st_clonedfrom != 0 ) { change_state( current_st, STATE_CHILDSA_DEL); } else { change_state( current_st, STATE_IKESA_DEL); } delete_state(current_st); current_st = next_st; } /* empty response to our empty INFORMATIONAL * We don't send anything back */ } else if ((md->hdr.isa_flags & ISAKMP_FLAGS_R) && st->st_state != STATE_IKESA_DEL) { DBG(DBG_CONTROLMORE, DBG_log( "Received an INFORMATIONAL response, " "updating liveness, no longer pending.")); st->st_last_liveness = now(); st->st_pend_liveness = FALSE; st->st_msgid_lastrecv = md->msgid_received; } } } } return STF_OK; } stf_status ikev2_send_informational(struct state *st) { struct state *pst = NULL; if (st->st_clonedfrom != SOS_NOBODY) { pst = state_with_serialno(st->st_clonedfrom); if (!pst) { DBG(DBG_CONTROL, DBG_log( "IKE SA does not exist for this child SA - should not happen")); DBG(DBG_CONTROL, DBG_log("INFORMATIONAL exchange can not be sent")); return STF_IGNORE; } } else { pst = st; } { unsigned char *authstart; unsigned char *encstart; unsigned char *iv; int ivsize; struct msg_digest md; struct ikev2_generic e; enum phase1_role role; pb_stream e_pbs, e_pbs_cipher; pb_stream rbody; pb_stream request; u_char buffer[1024]; md.st = st; md.pst = pst; memset(buffer, 0, sizeof(buffer)); init_pbs(&request, buffer, sizeof(buffer), "informational exchange request packet"); authstart = request.cur; /* HDR out */ { struct isakmp_hdr r_hdr; zero(&r_hdr); r_hdr.isa_version = build_ike_version(); memcpy(r_hdr.isa_rcookie, pst->st_rcookie, COOKIE_SIZE); memcpy(r_hdr.isa_icookie, pst->st_icookie, COOKIE_SIZE); r_hdr.isa_xchg = ISAKMP_v2_INFORMATIONAL; r_hdr.isa_np = ISAKMP_NEXT_v2E; if (pst->st_state == STATE_PARENT_I2 || pst->st_state == STATE_PARENT_I3) { r_hdr.isa_flags |= ISAKMP_FLAGS_I; role = INITIATOR; r_hdr.isa_msgid = htonl(pst->st_msgid_nextuse); } else { role = RESPONDER; r_hdr.isa_msgid = htonl( pst->st_msgid_lastrecv + 1); } if (!out_struct(&r_hdr, &isakmp_hdr_desc, &request, &rbody)) { libreswan_log( "error initializing hdr for informational message"); return STF_FATAL; } } /* HDR done*/ /* insert an Encryption payload header */ e.isag_np = ISAKMP_NEXT_v2NONE; e.isag_critical = ISAKMP_PAYLOAD_NONCRITICAL; if (!out_struct(&e, &ikev2_e_desc, &rbody, &e_pbs)) return STF_FATAL; /* IV */ iv = e_pbs.cur; ivsize = pst->st_oakley.encrypter->iv_size; if (!out_zero(ivsize, &e_pbs, "iv")) return STF_FATAL; get_rnd_bytes(iv, ivsize); /* note where cleartext starts */ init_pbs(&e_pbs_cipher, e_pbs.cur, e_pbs.roof - e_pbs.cur, "cleartext"); e_pbs_cipher.container = &e_pbs; e_pbs_cipher.desc = NULL; e_pbs_cipher.cur = e_pbs.cur; encstart = e_pbs_cipher.cur; /* This is an empty informational exchange (A.K.A liveness check) */ ikev2_padup_pre_encrypt(&md, &e_pbs_cipher); close_output_pbs(&e_pbs_cipher); { stf_status ret; unsigned char *authloc = ikev2_authloc(&md, &e_pbs); if (!authloc) return STF_FATAL; close_output_pbs(&e_pbs); close_output_pbs(&rbody); close_output_pbs(&request); ret = ikev2_encrypt_msg(&md, role, authstart, iv, encstart, authloc, &e_pbs, &e_pbs_cipher); if (ret != STF_OK) return STF_FATAL; } /* keep it for a retransmit if necessary */ freeanychunk(pst->st_tpacket); clonetochunk(pst->st_tpacket, request.start, pbs_offset(&request), "reply packet for informational exchange"); pst->st_pend_liveness = TRUE; /* we should only do this when dpd/liveness is active? */ send_ike_msg(pst, __FUNCTION__); ikev2_update_counters(&md); } return STF_OK; } /* * *************************************************************** * DELETE_OUT ***** *************************************************************** * */ void ikev2_delete_out(struct state *st) { struct state *pst = NULL; if (st->st_clonedfrom != 0) { /*child SA*/ pst = state_with_serialno(st->st_clonedfrom); if (!pst) { DBG(DBG_CONTROL, DBG_log("IKE SA does not exist for this child SA")); DBG(DBG_CONTROL, DBG_log( "INFORMATIONAL exchange can not be sent, deleting state")); goto end; } } else { /* Parent SA*/ pst = st; } { unsigned char *authstart; pb_stream e_pbs, e_pbs_cipher; pb_stream rbody; struct ikev2_generic e; unsigned char *iv; int ivsize; unsigned char *encstart; struct msg_digest md; enum phase1_role role; md.st = st; md.pst = pst; /* beginning of data going out */ authstart = reply_stream.cur; /* make sure HDR is at start of a clean buffer */ zero(reply_buffer); init_pbs(&reply_stream, reply_buffer, sizeof(reply_buffer), "information exchange request packet"); /* HDR out */ { struct isakmp_hdr r_hdr; zero(&r_hdr); /* default to 0 */ /* AAA should we copy from MD? */ r_hdr.isa_version = build_ike_version(); memcpy(r_hdr.isa_rcookie, pst->st_rcookie, COOKIE_SIZE); memcpy(r_hdr.isa_icookie, pst->st_icookie, COOKIE_SIZE); r_hdr.isa_xchg = ISAKMP_v2_INFORMATIONAL; r_hdr.isa_np = ISAKMP_NEXT_v2E; r_hdr.isa_msgid = htonl(pst->st_msgid_nextuse); /*set initiator bit if we are initiator*/ if (pst->st_state == STATE_PARENT_I2 || pst->st_state == STATE_PARENT_I3) { r_hdr.isa_flags |= ISAKMP_FLAGS_I; role = INITIATOR; } else { role = RESPONDER; } /* r_hdr.isa_flags |= ISAKMP_FLAGS_R; */ if (!out_struct(&r_hdr, &isakmp_hdr_desc, &reply_stream, &rbody)) { libreswan_log( "error initializing hdr for informational message"); goto end; } } /*HDR Done*/ /* insert an Encryption payload header */ e.isag_np = ISAKMP_NEXT_v2D; e.isag_critical = ISAKMP_PAYLOAD_NONCRITICAL; if (!out_struct(&e, &ikev2_e_desc, &rbody, &e_pbs)) goto end; /* insert IV */ iv = e_pbs.cur; ivsize = pst->st_oakley.encrypter->iv_size; if (!out_zero(ivsize, &e_pbs, "iv")) goto end; get_rnd_bytes(iv, ivsize); /* note where cleartext starts */ init_pbs(&e_pbs_cipher, e_pbs.cur, e_pbs.roof - e_pbs.cur, "cleartext"); e_pbs_cipher.container = &e_pbs; e_pbs_cipher.desc = NULL; e_pbs_cipher.cur = e_pbs.cur; encstart = e_pbs_cipher.cur; { pb_stream del_pbs; struct ikev2_delete v2del_tmp; /* * u_int16_t i, j=0; * u_char *spi; * char spi_buf[1024]; */ zero(&v2del_tmp); v2del_tmp.isad_np = ISAKMP_NEXT_v2NONE; if (st->st_clonedfrom != 0 ) { v2del_tmp.isad_protoid = PROTO_IPSEC_ESP; v2del_tmp.isad_spisize = sizeof(ipsec_spi_t); v2del_tmp.isad_nrspi = 1; } else { v2del_tmp.isad_protoid = PROTO_ISAKMP; v2del_tmp.isad_spisize = 0; v2del_tmp.isad_nrspi = 0; } /* Emit delete payload header out*/ if (!out_struct(&v2del_tmp, &ikev2_delete_desc, &e_pbs_cipher, &del_pbs)) { libreswan_log( "error initializing hdr for delete payload"); goto end; } /* Emit values of spi to be sent to the peer*/ if (st->st_clonedfrom != 0) { if (!out_raw( (u_char *)&st->st_esp.our_spi, sizeof(ipsec_spi_t), &del_pbs, "local spis")) { libreswan_log( "error sending spi values in delete payload"); goto end; } } close_output_pbs(&del_pbs); } ikev2_padup_pre_encrypt(&md, &e_pbs_cipher); close_output_pbs(&e_pbs_cipher); { stf_status ret; unsigned char *authloc = ikev2_authloc(&md, &e_pbs); if (authloc == NULL) goto end; close_output_pbs(&e_pbs); close_output_pbs(&rbody); close_output_pbs(&reply_stream); ret = ikev2_encrypt_msg(&md, role, authstart, iv, encstart, authloc, &e_pbs, &e_pbs_cipher); if (ret != STF_OK) goto end; } /* keep it for a retransmit if necessary */ freeanychunk(pst->st_tpacket); clonetochunk(pst->st_tpacket, reply_stream.start, pbs_offset(&reply_stream), "request packet for informational exchange"); send_ike_msg(pst, __FUNCTION__); /* update state */ ikev2_update_counters(&md); } /* If everything is fine, and we sent packet, goto real_end*/ goto real_end; end: /* If some error occurs above that prevents us sending a request packet*/ /* delete the states right now*/ if (st->st_clonedfrom != SOS_NOBODY) { change_state(st, STATE_CHILDSA_DEL); delete_state(st); } else { struct state *current_st = pst; struct state *next_st = NULL; struct state *first_st = NULL; /* Find the first state in the hash chain*/ while (current_st != (struct state *) NULL) { first_st = current_st; current_st = first_st->st_hashchain_prev; } current_st = first_st; while (current_st != (struct state *) NULL) { next_st = current_st->st_hashchain_next; if (current_st->st_clonedfrom != 0 ) change_state(current_st, STATE_CHILDSA_DEL); else change_state(current_st, STATE_IKESA_DEL); delete_state(current_st); current_st = next_st; } } real_end:; } /* * Determine the IKE version we will use for the IKE packet * Normally, this is "2.0", but in the future we might need to * change that. Version used is the minimum 2.x version both * sides support. So if we support 2.1, and they support 2.0, * we should sent 2.0 (not implemented until we hit 2.1 ourselves) * We also have some impair functions that modify the major/minor * version on purpose - for testing * * rcv_version: the received IKE version, 0 if we don't know * * top 4 bits are major version, lower 4 bits are minor version */ static int build_ike_version() { return ((IKEv2_MAJOR_VERSION + (DBGP(IMPAIR_MAJOR_VERSION_BUMP) ? 1 : 0)) << ISA_MAJ_SHIFT) | (IKEv2_MINOR_VERSION + (DBGP(IMPAIR_MINOR_VERSION_BUMP) ? 1 : 0)); }
./CrossVul/dataset_final_sorted/CWE-20/c/bad_5849_0
crossvul-cpp_data_good_2435_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD DDDD SSSSS % % D D D D SS % % D D D D SSS % % D D D D SS % % DDDD DDDD SSSSS % % % % % % Read/Write Microsoft Direct Draw Surface Image Format % % % % Software Design % % Bianca van Schaik % % March 2008 % % Dirk Lemstra % % September 2013 % % % % % % Copyright 1999-2015 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/profile.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/resource_.h" #include "magick/static.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/module.h" #include "magick/transform.h" /* Definitions */ #define DDSD_CAPS 0x00000001 #define DDSD_HEIGHT 0x00000002 #define DDSD_WIDTH 0x00000004 #define DDSD_PITCH 0x00000008 #define DDSD_PIXELFORMAT 0x00001000 #define DDSD_MIPMAPCOUNT 0x00020000 #define DDSD_LINEARSIZE 0x00080000 #define DDSD_DEPTH 0x00800000 #define DDPF_ALPHAPIXELS 0x00000001 #define DDPF_FOURCC 0x00000004 #define DDPF_RGB 0x00000040 #define DDPF_LUMINANCE 0x00020000 #define FOURCC_DXT1 0x31545844 #define FOURCC_DXT3 0x33545844 #define FOURCC_DXT5 0x35545844 #define DDSCAPS_COMPLEX 0x00000008 #define DDSCAPS_TEXTURE 0x00001000 #define DDSCAPS_MIPMAP 0x00400000 #define DDSCAPS2_CUBEMAP 0x00000200 #define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400 #define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800 #define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000 #define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000 #define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000 #define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000 #define DDSCAPS2_VOLUME 0x00200000 #ifndef SIZE_MAX #define SIZE_MAX ((size_t) -1) #endif /* Structure declarations. */ typedef struct _DDSPixelFormat { size_t flags, fourcc, rgb_bitcount, r_bitmask, g_bitmask, b_bitmask, alpha_bitmask; } DDSPixelFormat; typedef struct _DDSInfo { size_t flags, height, width, pitchOrLinearSize, depth, mipmapcount, ddscaps1, ddscaps2; DDSPixelFormat pixelformat; } DDSInfo; typedef struct _DDSColors { unsigned char r[4], g[4], b[4], a[4]; } DDSColors; typedef struct _DDSVector4 { float x, y, z, w; } DDSVector4; typedef struct _DDSVector3 { float x, y, z; } DDSVector3; typedef struct _DDSSourceBlock { unsigned char start, end, error; } DDSSourceBlock; typedef struct _DDSSingleColourLookup { DDSSourceBlock sources[2]; } DDSSingleColourLookup; typedef MagickBooleanType DDSDecoder(Image *, DDSInfo *, ExceptionInfo *); static const DDSSingleColourLookup DDSLookup_5_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 1 } } }, { { { 0, 0, 2 }, { 0, 1, 0 } } }, { { { 0, 0, 3 }, { 0, 1, 1 } } }, { { { 0, 0, 4 }, { 0, 2, 1 } } }, { { { 1, 0, 3 }, { 0, 2, 0 } } }, { { { 1, 0, 2 }, { 0, 2, 1 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 1, 2, 1 } } }, { { { 1, 0, 2 }, { 1, 2, 0 } } }, { { { 1, 0, 3 }, { 0, 4, 0 } } }, { { { 1, 0, 4 }, { 0, 5, 1 } } }, { { { 2, 0, 3 }, { 0, 5, 0 } } }, { { { 2, 0, 2 }, { 0, 5, 1 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 2, 3, 1 } } }, { { { 2, 0, 2 }, { 2, 3, 0 } } }, { { { 2, 0, 3 }, { 0, 7, 0 } } }, { { { 2, 0, 4 }, { 1, 6, 1 } } }, { { { 3, 0, 3 }, { 1, 6, 0 } } }, { { { 3, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 2 }, { 0, 10, 1 } } }, { { { 3, 0, 3 }, { 0, 10, 0 } } }, { { { 3, 0, 4 }, { 2, 7, 1 } } }, { { { 4, 0, 4 }, { 2, 7, 0 } } }, { { { 4, 0, 3 }, { 0, 11, 0 } } }, { { { 4, 0, 2 }, { 1, 10, 1 } } }, { { { 4, 0, 1 }, { 1, 10, 0 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 1 } } }, { { { 4, 0, 2 }, { 0, 13, 0 } } }, { { { 4, 0, 3 }, { 0, 13, 1 } } }, { { { 4, 0, 4 }, { 0, 14, 1 } } }, { { { 5, 0, 3 }, { 0, 14, 0 } } }, { { { 5, 0, 2 }, { 2, 11, 1 } } }, { { { 5, 0, 1 }, { 2, 11, 0 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 1, 14, 1 } } }, { { { 5, 0, 2 }, { 1, 14, 0 } } }, { { { 5, 0, 3 }, { 0, 16, 0 } } }, { { { 5, 0, 4 }, { 0, 17, 1 } } }, { { { 6, 0, 3 }, { 0, 17, 0 } } }, { { { 6, 0, 2 }, { 0, 17, 1 } } }, { { { 6, 0, 1 }, { 0, 18, 1 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 2, 15, 1 } } }, { { { 6, 0, 2 }, { 2, 15, 0 } } }, { { { 6, 0, 3 }, { 0, 19, 0 } } }, { { { 6, 0, 4 }, { 1, 18, 1 } } }, { { { 7, 0, 3 }, { 1, 18, 0 } } }, { { { 7, 0, 2 }, { 0, 20, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 2 }, { 0, 22, 1 } } }, { { { 7, 0, 3 }, { 0, 22, 0 } } }, { { { 7, 0, 4 }, { 2, 19, 1 } } }, { { { 8, 0, 4 }, { 2, 19, 0 } } }, { { { 8, 0, 3 }, { 0, 23, 0 } } }, { { { 8, 0, 2 }, { 1, 22, 1 } } }, { { { 8, 0, 1 }, { 1, 22, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 1 } } }, { { { 8, 0, 2 }, { 0, 25, 0 } } }, { { { 8, 0, 3 }, { 0, 25, 1 } } }, { { { 8, 0, 4 }, { 0, 26, 1 } } }, { { { 9, 0, 3 }, { 0, 26, 0 } } }, { { { 9, 0, 2 }, { 2, 23, 1 } } }, { { { 9, 0, 1 }, { 2, 23, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 1, 26, 1 } } }, { { { 9, 0, 2 }, { 1, 26, 0 } } }, { { { 9, 0, 3 }, { 0, 28, 0 } } }, { { { 9, 0, 4 }, { 0, 29, 1 } } }, { { { 10, 0, 3 }, { 0, 29, 0 } } }, { { { 10, 0, 2 }, { 0, 29, 1 } } }, { { { 10, 0, 1 }, { 0, 30, 1 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 2, 27, 1 } } }, { { { 10, 0, 2 }, { 2, 27, 0 } } }, { { { 10, 0, 3 }, { 0, 31, 0 } } }, { { { 10, 0, 4 }, { 1, 30, 1 } } }, { { { 11, 0, 3 }, { 1, 30, 0 } } }, { { { 11, 0, 2 }, { 4, 24, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 0 }, { 1, 31, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 2 }, { 2, 30, 1 } } }, { { { 11, 0, 3 }, { 2, 30, 0 } } }, { { { 11, 0, 4 }, { 2, 31, 1 } } }, { { { 12, 0, 4 }, { 2, 31, 0 } } }, { { { 12, 0, 3 }, { 4, 27, 0 } } }, { { { 12, 0, 2 }, { 3, 30, 1 } } }, { { { 12, 0, 1 }, { 3, 30, 0 } } }, { { { 12, 0, 0 }, { 4, 28, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 1 } } }, { { { 12, 0, 2 }, { 3, 31, 0 } } }, { { { 12, 0, 3 }, { 3, 31, 1 } } }, { { { 12, 0, 4 }, { 4, 30, 1 } } }, { { { 13, 0, 3 }, { 4, 30, 0 } } }, { { { 13, 0, 2 }, { 6, 27, 1 } } }, { { { 13, 0, 1 }, { 6, 27, 0 } } }, { { { 13, 0, 0 }, { 4, 31, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 1 } } }, { { { 13, 0, 2 }, { 5, 30, 0 } } }, { { { 13, 0, 3 }, { 8, 24, 0 } } }, { { { 13, 0, 4 }, { 5, 31, 1 } } }, { { { 14, 0, 3 }, { 5, 31, 0 } } }, { { { 14, 0, 2 }, { 5, 31, 1 } } }, { { { 14, 0, 1 }, { 6, 30, 1 } } }, { { { 14, 0, 0 }, { 6, 30, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 1 } } }, { { { 14, 0, 2 }, { 6, 31, 0 } } }, { { { 14, 0, 3 }, { 8, 27, 0 } } }, { { { 14, 0, 4 }, { 7, 30, 1 } } }, { { { 15, 0, 3 }, { 7, 30, 0 } } }, { { { 15, 0, 2 }, { 8, 28, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 0 }, { 7, 31, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 2 }, { 8, 30, 1 } } }, { { { 15, 0, 3 }, { 8, 30, 0 } } }, { { { 15, 0, 4 }, { 10, 27, 1 } } }, { { { 16, 0, 4 }, { 10, 27, 0 } } }, { { { 16, 0, 3 }, { 8, 31, 0 } } }, { { { 16, 0, 2 }, { 9, 30, 1 } } }, { { { 16, 0, 1 }, { 9, 30, 0 } } }, { { { 16, 0, 0 }, { 12, 24, 0 } } }, { { { 16, 0, 1 }, { 9, 31, 1 } } }, { { { 16, 0, 2 }, { 9, 31, 0 } } }, { { { 16, 0, 3 }, { 9, 31, 1 } } }, { { { 16, 0, 4 }, { 10, 30, 1 } } }, { { { 17, 0, 3 }, { 10, 30, 0 } } }, { { { 17, 0, 2 }, { 10, 31, 1 } } }, { { { 17, 0, 1 }, { 10, 31, 0 } } }, { { { 17, 0, 0 }, { 12, 27, 0 } } }, { { { 17, 0, 1 }, { 11, 30, 1 } } }, { { { 17, 0, 2 }, { 11, 30, 0 } } }, { { { 17, 0, 3 }, { 12, 28, 0 } } }, { { { 17, 0, 4 }, { 11, 31, 1 } } }, { { { 18, 0, 3 }, { 11, 31, 0 } } }, { { { 18, 0, 2 }, { 11, 31, 1 } } }, { { { 18, 0, 1 }, { 12, 30, 1 } } }, { { { 18, 0, 0 }, { 12, 30, 0 } } }, { { { 18, 0, 1 }, { 14, 27, 1 } } }, { { { 18, 0, 2 }, { 14, 27, 0 } } }, { { { 18, 0, 3 }, { 12, 31, 0 } } }, { { { 18, 0, 4 }, { 13, 30, 1 } } }, { { { 19, 0, 3 }, { 13, 30, 0 } } }, { { { 19, 0, 2 }, { 16, 24, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 0 }, { 13, 31, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 2 }, { 14, 30, 1 } } }, { { { 19, 0, 3 }, { 14, 30, 0 } } }, { { { 19, 0, 4 }, { 14, 31, 1 } } }, { { { 20, 0, 4 }, { 14, 31, 0 } } }, { { { 20, 0, 3 }, { 16, 27, 0 } } }, { { { 20, 0, 2 }, { 15, 30, 1 } } }, { { { 20, 0, 1 }, { 15, 30, 0 } } }, { { { 20, 0, 0 }, { 16, 28, 0 } } }, { { { 20, 0, 1 }, { 15, 31, 1 } } }, { { { 20, 0, 2 }, { 15, 31, 0 } } }, { { { 20, 0, 3 }, { 15, 31, 1 } } }, { { { 20, 0, 4 }, { 16, 30, 1 } } }, { { { 21, 0, 3 }, { 16, 30, 0 } } }, { { { 21, 0, 2 }, { 18, 27, 1 } } }, { { { 21, 0, 1 }, { 18, 27, 0 } } }, { { { 21, 0, 0 }, { 16, 31, 0 } } }, { { { 21, 0, 1 }, { 17, 30, 1 } } }, { { { 21, 0, 2 }, { 17, 30, 0 } } }, { { { 21, 0, 3 }, { 20, 24, 0 } } }, { { { 21, 0, 4 }, { 17, 31, 1 } } }, { { { 22, 0, 3 }, { 17, 31, 0 } } }, { { { 22, 0, 2 }, { 17, 31, 1 } } }, { { { 22, 0, 1 }, { 18, 30, 1 } } }, { { { 22, 0, 0 }, { 18, 30, 0 } } }, { { { 22, 0, 1 }, { 18, 31, 1 } } }, { { { 22, 0, 2 }, { 18, 31, 0 } } }, { { { 22, 0, 3 }, { 20, 27, 0 } } }, { { { 22, 0, 4 }, { 19, 30, 1 } } }, { { { 23, 0, 3 }, { 19, 30, 0 } } }, { { { 23, 0, 2 }, { 20, 28, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 0 }, { 19, 31, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 2 }, { 20, 30, 1 } } }, { { { 23, 0, 3 }, { 20, 30, 0 } } }, { { { 23, 0, 4 }, { 22, 27, 1 } } }, { { { 24, 0, 4 }, { 22, 27, 0 } } }, { { { 24, 0, 3 }, { 20, 31, 0 } } }, { { { 24, 0, 2 }, { 21, 30, 1 } } }, { { { 24, 0, 1 }, { 21, 30, 0 } } }, { { { 24, 0, 0 }, { 24, 24, 0 } } }, { { { 24, 0, 1 }, { 21, 31, 1 } } }, { { { 24, 0, 2 }, { 21, 31, 0 } } }, { { { 24, 0, 3 }, { 21, 31, 1 } } }, { { { 24, 0, 4 }, { 22, 30, 1 } } }, { { { 25, 0, 3 }, { 22, 30, 0 } } }, { { { 25, 0, 2 }, { 22, 31, 1 } } }, { { { 25, 0, 1 }, { 22, 31, 0 } } }, { { { 25, 0, 0 }, { 24, 27, 0 } } }, { { { 25, 0, 1 }, { 23, 30, 1 } } }, { { { 25, 0, 2 }, { 23, 30, 0 } } }, { { { 25, 0, 3 }, { 24, 28, 0 } } }, { { { 25, 0, 4 }, { 23, 31, 1 } } }, { { { 26, 0, 3 }, { 23, 31, 0 } } }, { { { 26, 0, 2 }, { 23, 31, 1 } } }, { { { 26, 0, 1 }, { 24, 30, 1 } } }, { { { 26, 0, 0 }, { 24, 30, 0 } } }, { { { 26, 0, 1 }, { 26, 27, 1 } } }, { { { 26, 0, 2 }, { 26, 27, 0 } } }, { { { 26, 0, 3 }, { 24, 31, 0 } } }, { { { 26, 0, 4 }, { 25, 30, 1 } } }, { { { 27, 0, 3 }, { 25, 30, 0 } } }, { { { 27, 0, 2 }, { 28, 24, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 0 }, { 25, 31, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 2 }, { 26, 30, 1 } } }, { { { 27, 0, 3 }, { 26, 30, 0 } } }, { { { 27, 0, 4 }, { 26, 31, 1 } } }, { { { 28, 0, 4 }, { 26, 31, 0 } } }, { { { 28, 0, 3 }, { 28, 27, 0 } } }, { { { 28, 0, 2 }, { 27, 30, 1 } } }, { { { 28, 0, 1 }, { 27, 30, 0 } } }, { { { 28, 0, 0 }, { 28, 28, 0 } } }, { { { 28, 0, 1 }, { 27, 31, 1 } } }, { { { 28, 0, 2 }, { 27, 31, 0 } } }, { { { 28, 0, 3 }, { 27, 31, 1 } } }, { { { 28, 0, 4 }, { 28, 30, 1 } } }, { { { 29, 0, 3 }, { 28, 30, 0 } } }, { { { 29, 0, 2 }, { 30, 27, 1 } } }, { { { 29, 0, 1 }, { 30, 27, 0 } } }, { { { 29, 0, 0 }, { 28, 31, 0 } } }, { { { 29, 0, 1 }, { 29, 30, 1 } } }, { { { 29, 0, 2 }, { 29, 30, 0 } } }, { { { 29, 0, 3 }, { 29, 30, 1 } } }, { { { 29, 0, 4 }, { 29, 31, 1 } } }, { { { 30, 0, 3 }, { 29, 31, 0 } } }, { { { 30, 0, 2 }, { 29, 31, 1 } } }, { { { 30, 0, 1 }, { 30, 30, 1 } } }, { { { 30, 0, 0 }, { 30, 30, 0 } } }, { { { 30, 0, 1 }, { 30, 31, 1 } } }, { { { 30, 0, 2 }, { 30, 31, 0 } } }, { { { 30, 0, 3 }, { 30, 31, 1 } } }, { { { 30, 0, 4 }, { 31, 30, 1 } } }, { { { 31, 0, 3 }, { 31, 30, 0 } } }, { { { 31, 0, 2 }, { 31, 30, 1 } } }, { { { 31, 0, 1 }, { 31, 31, 1 } } }, { { { 31, 0, 0 }, { 31, 31, 0 } } } }; static const DDSSingleColourLookup DDSLookup_6_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 0 } } }, { { { 0, 0, 2 }, { 0, 2, 0 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 0, 4, 0 } } }, { { { 1, 0, 2 }, { 0, 5, 0 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 0, 7, 0 } } }, { { { 2, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 10, 0 } } }, { { { 3, 0, 2 }, { 0, 11, 0 } } }, { { { 4, 0, 1 }, { 0, 12, 1 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 0 } } }, { { { 4, 0, 2 }, { 0, 14, 0 } } }, { { { 5, 0, 1 }, { 0, 15, 1 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 0, 16, 0 } } }, { { { 5, 0, 2 }, { 1, 15, 0 } } }, { { { 6, 0, 1 }, { 0, 17, 0 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 0, 19, 0 } } }, { { { 6, 0, 2 }, { 3, 14, 0 } } }, { { { 7, 0, 1 }, { 0, 20, 0 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 22, 0 } } }, { { { 7, 0, 2 }, { 4, 15, 0 } } }, { { { 8, 0, 1 }, { 0, 23, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 0 } } }, { { { 8, 0, 2 }, { 6, 14, 0 } } }, { { { 9, 0, 1 }, { 0, 26, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 0, 28, 0 } } }, { { { 9, 0, 2 }, { 7, 15, 0 } } }, { { { 10, 0, 1 }, { 0, 29, 0 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 0, 31, 0 } } }, { { { 10, 0, 2 }, { 9, 14, 0 } } }, { { { 11, 0, 1 }, { 0, 32, 0 } } }, { { { 11, 0, 0 }, { 0, 33, 0 } } }, { { { 11, 0, 1 }, { 2, 30, 0 } } }, { { { 11, 0, 2 }, { 0, 34, 0 } } }, { { { 12, 0, 1 }, { 0, 35, 0 } } }, { { { 12, 0, 0 }, { 0, 36, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 0 } } }, { { { 12, 0, 2 }, { 0, 37, 0 } } }, { { { 13, 0, 1 }, { 0, 38, 0 } } }, { { { 13, 0, 0 }, { 0, 39, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 0 } } }, { { { 13, 0, 2 }, { 0, 40, 0 } } }, { { { 14, 0, 1 }, { 0, 41, 0 } } }, { { { 14, 0, 0 }, { 0, 42, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 0 } } }, { { { 14, 0, 2 }, { 0, 43, 0 } } }, { { { 15, 0, 1 }, { 0, 44, 0 } } }, { { { 15, 0, 0 }, { 0, 45, 0 } } }, { { { 15, 0, 1 }, { 8, 30, 0 } } }, { { { 15, 0, 2 }, { 0, 46, 0 } } }, { { { 16, 0, 2 }, { 0, 47, 0 } } }, { { { 16, 0, 1 }, { 1, 46, 0 } } }, { { { 16, 0, 0 }, { 0, 48, 0 } } }, { { { 16, 0, 1 }, { 0, 49, 0 } } }, { { { 16, 0, 2 }, { 0, 50, 0 } } }, { { { 17, 0, 1 }, { 2, 47, 0 } } }, { { { 17, 0, 0 }, { 0, 51, 0 } } }, { { { 17, 0, 1 }, { 0, 52, 0 } } }, { { { 17, 0, 2 }, { 0, 53, 0 } } }, { { { 18, 0, 1 }, { 4, 46, 0 } } }, { { { 18, 0, 0 }, { 0, 54, 0 } } }, { { { 18, 0, 1 }, { 0, 55, 0 } } }, { { { 18, 0, 2 }, { 0, 56, 0 } } }, { { { 19, 0, 1 }, { 5, 47, 0 } } }, { { { 19, 0, 0 }, { 0, 57, 0 } } }, { { { 19, 0, 1 }, { 0, 58, 0 } } }, { { { 19, 0, 2 }, { 0, 59, 0 } } }, { { { 20, 0, 1 }, { 7, 46, 0 } } }, { { { 20, 0, 0 }, { 0, 60, 0 } } }, { { { 20, 0, 1 }, { 0, 61, 0 } } }, { { { 20, 0, 2 }, { 0, 62, 0 } } }, { { { 21, 0, 1 }, { 8, 47, 0 } } }, { { { 21, 0, 0 }, { 0, 63, 0 } } }, { { { 21, 0, 1 }, { 1, 62, 0 } } }, { { { 21, 0, 2 }, { 1, 63, 0 } } }, { { { 22, 0, 1 }, { 10, 46, 0 } } }, { { { 22, 0, 0 }, { 2, 62, 0 } } }, { { { 22, 0, 1 }, { 2, 63, 0 } } }, { { { 22, 0, 2 }, { 3, 62, 0 } } }, { { { 23, 0, 1 }, { 11, 47, 0 } } }, { { { 23, 0, 0 }, { 3, 63, 0 } } }, { { { 23, 0, 1 }, { 4, 62, 0 } } }, { { { 23, 0, 2 }, { 4, 63, 0 } } }, { { { 24, 0, 1 }, { 13, 46, 0 } } }, { { { 24, 0, 0 }, { 5, 62, 0 } } }, { { { 24, 0, 1 }, { 5, 63, 0 } } }, { { { 24, 0, 2 }, { 6, 62, 0 } } }, { { { 25, 0, 1 }, { 14, 47, 0 } } }, { { { 25, 0, 0 }, { 6, 63, 0 } } }, { { { 25, 0, 1 }, { 7, 62, 0 } } }, { { { 25, 0, 2 }, { 7, 63, 0 } } }, { { { 26, 0, 1 }, { 16, 45, 0 } } }, { { { 26, 0, 0 }, { 8, 62, 0 } } }, { { { 26, 0, 1 }, { 8, 63, 0 } } }, { { { 26, 0, 2 }, { 9, 62, 0 } } }, { { { 27, 0, 1 }, { 16, 48, 0 } } }, { { { 27, 0, 0 }, { 9, 63, 0 } } }, { { { 27, 0, 1 }, { 10, 62, 0 } } }, { { { 27, 0, 2 }, { 10, 63, 0 } } }, { { { 28, 0, 1 }, { 16, 51, 0 } } }, { { { 28, 0, 0 }, { 11, 62, 0 } } }, { { { 28, 0, 1 }, { 11, 63, 0 } } }, { { { 28, 0, 2 }, { 12, 62, 0 } } }, { { { 29, 0, 1 }, { 16, 54, 0 } } }, { { { 29, 0, 0 }, { 12, 63, 0 } } }, { { { 29, 0, 1 }, { 13, 62, 0 } } }, { { { 29, 0, 2 }, { 13, 63, 0 } } }, { { { 30, 0, 1 }, { 16, 57, 0 } } }, { { { 30, 0, 0 }, { 14, 62, 0 } } }, { { { 30, 0, 1 }, { 14, 63, 0 } } }, { { { 30, 0, 2 }, { 15, 62, 0 } } }, { { { 31, 0, 1 }, { 16, 60, 0 } } }, { { { 31, 0, 0 }, { 15, 63, 0 } } }, { { { 31, 0, 1 }, { 24, 46, 0 } } }, { { { 31, 0, 2 }, { 16, 62, 0 } } }, { { { 32, 0, 2 }, { 16, 63, 0 } } }, { { { 32, 0, 1 }, { 17, 62, 0 } } }, { { { 32, 0, 0 }, { 25, 47, 0 } } }, { { { 32, 0, 1 }, { 17, 63, 0 } } }, { { { 32, 0, 2 }, { 18, 62, 0 } } }, { { { 33, 0, 1 }, { 18, 63, 0 } } }, { { { 33, 0, 0 }, { 27, 46, 0 } } }, { { { 33, 0, 1 }, { 19, 62, 0 } } }, { { { 33, 0, 2 }, { 19, 63, 0 } } }, { { { 34, 0, 1 }, { 20, 62, 0 } } }, { { { 34, 0, 0 }, { 28, 47, 0 } } }, { { { 34, 0, 1 }, { 20, 63, 0 } } }, { { { 34, 0, 2 }, { 21, 62, 0 } } }, { { { 35, 0, 1 }, { 21, 63, 0 } } }, { { { 35, 0, 0 }, { 30, 46, 0 } } }, { { { 35, 0, 1 }, { 22, 62, 0 } } }, { { { 35, 0, 2 }, { 22, 63, 0 } } }, { { { 36, 0, 1 }, { 23, 62, 0 } } }, { { { 36, 0, 0 }, { 31, 47, 0 } } }, { { { 36, 0, 1 }, { 23, 63, 0 } } }, { { { 36, 0, 2 }, { 24, 62, 0 } } }, { { { 37, 0, 1 }, { 24, 63, 0 } } }, { { { 37, 0, 0 }, { 32, 47, 0 } } }, { { { 37, 0, 1 }, { 25, 62, 0 } } }, { { { 37, 0, 2 }, { 25, 63, 0 } } }, { { { 38, 0, 1 }, { 26, 62, 0 } } }, { { { 38, 0, 0 }, { 32, 50, 0 } } }, { { { 38, 0, 1 }, { 26, 63, 0 } } }, { { { 38, 0, 2 }, { 27, 62, 0 } } }, { { { 39, 0, 1 }, { 27, 63, 0 } } }, { { { 39, 0, 0 }, { 32, 53, 0 } } }, { { { 39, 0, 1 }, { 28, 62, 0 } } }, { { { 39, 0, 2 }, { 28, 63, 0 } } }, { { { 40, 0, 1 }, { 29, 62, 0 } } }, { { { 40, 0, 0 }, { 32, 56, 0 } } }, { { { 40, 0, 1 }, { 29, 63, 0 } } }, { { { 40, 0, 2 }, { 30, 62, 0 } } }, { { { 41, 0, 1 }, { 30, 63, 0 } } }, { { { 41, 0, 0 }, { 32, 59, 0 } } }, { { { 41, 0, 1 }, { 31, 62, 0 } } }, { { { 41, 0, 2 }, { 31, 63, 0 } } }, { { { 42, 0, 1 }, { 32, 61, 0 } } }, { { { 42, 0, 0 }, { 32, 62, 0 } } }, { { { 42, 0, 1 }, { 32, 63, 0 } } }, { { { 42, 0, 2 }, { 41, 46, 0 } } }, { { { 43, 0, 1 }, { 33, 62, 0 } } }, { { { 43, 0, 0 }, { 33, 63, 0 } } }, { { { 43, 0, 1 }, { 34, 62, 0 } } }, { { { 43, 0, 2 }, { 42, 47, 0 } } }, { { { 44, 0, 1 }, { 34, 63, 0 } } }, { { { 44, 0, 0 }, { 35, 62, 0 } } }, { { { 44, 0, 1 }, { 35, 63, 0 } } }, { { { 44, 0, 2 }, { 44, 46, 0 } } }, { { { 45, 0, 1 }, { 36, 62, 0 } } }, { { { 45, 0, 0 }, { 36, 63, 0 } } }, { { { 45, 0, 1 }, { 37, 62, 0 } } }, { { { 45, 0, 2 }, { 45, 47, 0 } } }, { { { 46, 0, 1 }, { 37, 63, 0 } } }, { { { 46, 0, 0 }, { 38, 62, 0 } } }, { { { 46, 0, 1 }, { 38, 63, 0 } } }, { { { 46, 0, 2 }, { 47, 46, 0 } } }, { { { 47, 0, 1 }, { 39, 62, 0 } } }, { { { 47, 0, 0 }, { 39, 63, 0 } } }, { { { 47, 0, 1 }, { 40, 62, 0 } } }, { { { 47, 0, 2 }, { 48, 46, 0 } } }, { { { 48, 0, 2 }, { 40, 63, 0 } } }, { { { 48, 0, 1 }, { 41, 62, 0 } } }, { { { 48, 0, 0 }, { 41, 63, 0 } } }, { { { 48, 0, 1 }, { 48, 49, 0 } } }, { { { 48, 0, 2 }, { 42, 62, 0 } } }, { { { 49, 0, 1 }, { 42, 63, 0 } } }, { { { 49, 0, 0 }, { 43, 62, 0 } } }, { { { 49, 0, 1 }, { 48, 52, 0 } } }, { { { 49, 0, 2 }, { 43, 63, 0 } } }, { { { 50, 0, 1 }, { 44, 62, 0 } } }, { { { 50, 0, 0 }, { 44, 63, 0 } } }, { { { 50, 0, 1 }, { 48, 55, 0 } } }, { { { 50, 0, 2 }, { 45, 62, 0 } } }, { { { 51, 0, 1 }, { 45, 63, 0 } } }, { { { 51, 0, 0 }, { 46, 62, 0 } } }, { { { 51, 0, 1 }, { 48, 58, 0 } } }, { { { 51, 0, 2 }, { 46, 63, 0 } } }, { { { 52, 0, 1 }, { 47, 62, 0 } } }, { { { 52, 0, 0 }, { 47, 63, 0 } } }, { { { 52, 0, 1 }, { 48, 61, 0 } } }, { { { 52, 0, 2 }, { 48, 62, 0 } } }, { { { 53, 0, 1 }, { 56, 47, 0 } } }, { { { 53, 0, 0 }, { 48, 63, 0 } } }, { { { 53, 0, 1 }, { 49, 62, 0 } } }, { { { 53, 0, 2 }, { 49, 63, 0 } } }, { { { 54, 0, 1 }, { 58, 46, 0 } } }, { { { 54, 0, 0 }, { 50, 62, 0 } } }, { { { 54, 0, 1 }, { 50, 63, 0 } } }, { { { 54, 0, 2 }, { 51, 62, 0 } } }, { { { 55, 0, 1 }, { 59, 47, 0 } } }, { { { 55, 0, 0 }, { 51, 63, 0 } } }, { { { 55, 0, 1 }, { 52, 62, 0 } } }, { { { 55, 0, 2 }, { 52, 63, 0 } } }, { { { 56, 0, 1 }, { 61, 46, 0 } } }, { { { 56, 0, 0 }, { 53, 62, 0 } } }, { { { 56, 0, 1 }, { 53, 63, 0 } } }, { { { 56, 0, 2 }, { 54, 62, 0 } } }, { { { 57, 0, 1 }, { 62, 47, 0 } } }, { { { 57, 0, 0 }, { 54, 63, 0 } } }, { { { 57, 0, 1 }, { 55, 62, 0 } } }, { { { 57, 0, 2 }, { 55, 63, 0 } } }, { { { 58, 0, 1 }, { 56, 62, 1 } } }, { { { 58, 0, 0 }, { 56, 62, 0 } } }, { { { 58, 0, 1 }, { 56, 63, 0 } } }, { { { 58, 0, 2 }, { 57, 62, 0 } } }, { { { 59, 0, 1 }, { 57, 63, 1 } } }, { { { 59, 0, 0 }, { 57, 63, 0 } } }, { { { 59, 0, 1 }, { 58, 62, 0 } } }, { { { 59, 0, 2 }, { 58, 63, 0 } } }, { { { 60, 0, 1 }, { 59, 62, 1 } } }, { { { 60, 0, 0 }, { 59, 62, 0 } } }, { { { 60, 0, 1 }, { 59, 63, 0 } } }, { { { 60, 0, 2 }, { 60, 62, 0 } } }, { { { 61, 0, 1 }, { 60, 63, 1 } } }, { { { 61, 0, 0 }, { 60, 63, 0 } } }, { { { 61, 0, 1 }, { 61, 62, 0 } } }, { { { 61, 0, 2 }, { 61, 63, 0 } } }, { { { 62, 0, 1 }, { 62, 62, 1 } } }, { { { 62, 0, 0 }, { 62, 62, 0 } } }, { { { 62, 0, 1 }, { 62, 63, 0 } } }, { { { 62, 0, 2 }, { 63, 62, 0 } } }, { { { 63, 0, 1 }, { 63, 63, 1 } } }, { { { 63, 0, 0 }, { 63, 63, 0 } } } }; static const DDSSingleColourLookup* DDS_LOOKUP[] = { DDSLookup_5_4, DDSLookup_6_4, DDSLookup_5_4 }; /* Macros */ #define C565_r(x) (((x) & 0xF800) >> 11) #define C565_g(x) (((x) & 0x07E0) >> 5) #define C565_b(x) ((x) & 0x001F) #define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2)) #define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4)) #define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2)) #define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1) #define FixRange(min, max, steps) \ if (min > max) \ min = max; \ if (max - min < steps) \ max = MagickMin(min + steps, 255); \ if (max - min < steps) \ min = MagickMax(min - steps, 0) #define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z) #define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \ = value #define VectorInit3(vector, value) vector.x = vector.y = vector.z = value #define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \ g && mask.b_bitmask == b && mask.alpha_bitmask == a) /* Forward declarations */ static MagickBooleanType ConstructOrdering(const size_t,const DDSVector4 *,const DDSVector3, DDSVector4 *,DDSVector4 *,unsigned char *,size_t), ReadDDSInfo(Image *,DDSInfo *), ReadDXT1(Image *,DDSInfo *,ExceptionInfo *), ReadDXT3(Image *,DDSInfo *,ExceptionInfo *), ReadDXT5(Image *,DDSInfo *,ExceptionInfo *), ReadUncompressedRGB(Image *,DDSInfo *,ExceptionInfo *), ReadUncompressedRGBA(Image *,DDSInfo *,ExceptionInfo *), SkipDXTMipmaps(Image *,DDSInfo *,int,ExceptionInfo *), SkipRGBMipmaps(Image *,DDSInfo *,int,ExceptionInfo *), WriteDDSImage(const ImageInfo *,Image *), WriteMipmaps(Image *,const size_t,const size_t,const size_t, const MagickBooleanType,const MagickBooleanType,ExceptionInfo *); static void RemapIndices(const ssize_t *,const unsigned char *,unsigned char *), WriteDDSInfo(Image *,const size_t,const size_t,const size_t), WriteFourCC(Image *,const size_t,const MagickBooleanType, const MagickBooleanType,ExceptionInfo *), WriteImageData(Image *,const size_t,const size_t,const MagickBooleanType, const MagickBooleanType,ExceptionInfo *), WriteIndices(Image *,const DDSVector3,const DDSVector3, unsigned char *), WriteSingleColorFit(Image *,const DDSVector4 *,const ssize_t *), WriteUncompressed(Image *,ExceptionInfo *); static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x + right.x; destination->y = left.y + right.y; destination->z = left.z + right.z; destination->w = left.w + right.w; } static inline void VectorClamp(DDSVector4 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); value->w = MagickMin(1.0f,MagickMax(0.0f,value->w)); } static inline void VectorClamp3(DDSVector3 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); } static inline void VectorCopy43(const DDSVector4 source, DDSVector3 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; } static inline void VectorCopy44(const DDSVector4 source, DDSVector4 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; destination->w = source.w; } static inline void VectorNegativeMultiplySubtract(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = c.x - (a.x * b.x); destination->y = c.y - (a.y * b.y); destination->z = c.z - (a.z * b.z); destination->w = c.w - (a.w * b.w); } static inline void VectorMultiply(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; destination->w = left.w * right.w; } static inline void VectorMultiply3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; } static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; destination->w = (a.w * b.w) + c.w; } static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b, const DDSVector3 c, DDSVector3 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; } static inline void VectorReciprocal(const DDSVector4 value, DDSVector4 *destination) { destination->x = 1.0f / value.x; destination->y = 1.0f / value.y; destination->z = 1.0f / value.z; destination->w = 1.0f / value.w; } static inline void VectorSubtract(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; destination->w = left.w - right.w; } static inline void VectorSubtract3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; } static inline void VectorTruncate(DDSVector4 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w); } static inline void VectorTruncate3(DDSVector3 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); } static void CalculateColors(unsigned short c0, unsigned short c1, DDSColors *c, MagickBooleanType ignoreAlpha) { c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0; c->r[0] = (unsigned char) C565_red(c0); c->g[0] = (unsigned char) C565_green(c0); c->b[0] = (unsigned char) C565_blue(c0); c->r[1] = (unsigned char) C565_red(c1); c->g[1] = (unsigned char) C565_green(c1); c->b[1] = (unsigned char) C565_blue(c1); if (ignoreAlpha != MagickFalse || c0 > c1) { c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3); c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3); c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3); c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3); c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3); c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3); } else { c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2); c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2); c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2); c->r[3] = c->g[3] = c->b[3] = 0; c->a[3] = 255; } } static size_t CompressAlpha(const size_t min, const size_t max, const size_t steps, const ssize_t *alphas, unsigned char* indices) { unsigned char codes[8]; register ssize_t i; size_t error, index, j, least, value; codes[0] = (unsigned char) min; codes[1] = (unsigned char) max; codes[6] = 0; codes[7] = 255; for (i=1; i < (ssize_t) steps; i++) codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps); error = 0; for (i=0; i<16; i++) { if (alphas[i] == -1) { indices[i] = 0; continue; } value = alphas[i]; least = SIZE_MAX; index = 0; for (j=0; j<8; j++) { size_t dist; dist = value - (size_t)codes[j]; dist *= dist; if (dist < least) { least = dist; index = j; } } indices[i] = (unsigned char)index; error += least; } return error; } static void CompressClusterFit(const size_t count, const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end, unsigned char *indices) { DDSVector3 axis; DDSVector4 grid, gridrcp, half, onethird_onethird2, pointsWeights[16], two, twonineths, twothirds_twothirds2, xSumwSum; float bestError = 1e+37f; size_t bestIteration = 0, besti = 0, bestj = 0, bestk = 0, iterationIndex; ssize_t i; unsigned char *o, order[128], unordered[16]; VectorInit(half,0.5f); VectorInit(two,2.0f); VectorInit(onethird_onethird2,1.0f/3.0f); onethird_onethird2.w = 1.0f/9.0f; VectorInit(twothirds_twothirds2,2.0f/3.0f); twothirds_twothirds2.w = 4.0f/9.0f; VectorInit(twonineths,2.0f/9.0f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; grid.w = 0.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; gridrcp.w = 0.0f; xSumwSum.x = 0.0f; xSumwSum.y = 0.0f; xSumwSum.z = 0.0f; xSumwSum.w = 0.0f; ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0); for (iterationIndex = 0;;) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,1) \ num_threads(GetMagickResourceLimit(ThreadResource)) #endif for (i=0; i < (ssize_t) count; i++) { DDSVector4 part0, part1, part2; size_t ii, j, k, kmin; VectorInit(part0,0.0f); for(ii=0; ii < (size_t) i; ii++) VectorAdd(pointsWeights[ii],part0,&part0); VectorInit(part1,0.0f); for (j=(size_t) i;;) { if (j == 0) { VectorCopy44(pointsWeights[0],&part2); kmin = 1; } else { VectorInit(part2,0.0f); kmin = j; } for (k=kmin;;) { DDSVector4 a, alpha2_sum, alphax_sum, alphabeta_sum, b, beta2_sum, betax_sum, e1, e2, factor, part3; float error; VectorSubtract(xSumwSum,part2,&part3); VectorSubtract(part3,part1,&part3); VectorSubtract(part3,part0,&part3); VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum); VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum); VectorInit(alpha2_sum,alphax_sum.w); VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum); VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum); VectorInit(beta2_sum,betax_sum.w); VectorAdd(part1,part2,&alphabeta_sum); VectorInit(alphabeta_sum,alphabeta_sum.w); VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum); VectorMultiply(alpha2_sum,beta2_sum,&factor); VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor, &factor); VectorReciprocal(factor,&factor); VectorMultiply(alphax_sum,beta2_sum,&a); VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a); VectorMultiply(a,factor,&a); VectorMultiply(betax_sum,alpha2_sum,&b); VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b); VectorMultiply(b,factor,&b); VectorClamp(&a); VectorMultiplyAdd(grid,a,half,&a); VectorTruncate(&a); VectorMultiply(a,gridrcp,&a); VectorClamp(&b); VectorMultiplyAdd(grid,b,half,&b); VectorTruncate(&b); VectorMultiply(b,gridrcp,&b); VectorMultiply(b,b,&e1); VectorMultiply(e1,beta2_sum,&e1); VectorMultiply(a,a,&e2); VectorMultiplyAdd(e2,alpha2_sum,e1,&e1); VectorMultiply(a,b,&e2); VectorMultiply(e2,alphabeta_sum,&e2); VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2); VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2); VectorMultiplyAdd(two,e2,e1,&e2); VectorMultiply(e2,metric,&e2); error = e2.x + e2.y + e2.z; if (error < bestError) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (DDS_CompressClusterFit) #endif { if (error < bestError) { VectorCopy43(a,start); VectorCopy43(b,end); bestError = error; besti = i; bestj = j; bestk = k; bestIteration = iterationIndex; } } } if (k == count) break; VectorAdd(pointsWeights[k],part2,&part2); k++; } if (j == count) break; VectorAdd(pointsWeights[j],part1,&part1); j++; } } if (bestIteration != iterationIndex) break; iterationIndex++; if (iterationIndex == 8) break; VectorSubtract3(*end,*start,&axis); if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order, iterationIndex) == MagickFalse) break; } o = order + (16*bestIteration); for (i=0; i < (ssize_t) besti; i++) unordered[o[i]] = 0; for (i=besti; i < (ssize_t) bestj; i++) unordered[o[i]] = 2; for (i=bestj; i < (ssize_t) bestk; i++) unordered[o[i]] = 3; for (i=bestk; i < (ssize_t) count; i++) unordered[o[i]] = 1; RemapIndices(map,unordered,indices); } static void CompressRangeFit(const size_t count, const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end, unsigned char *indices) { float d, bestDist, max, min, val; DDSVector3 codes[4], grid, gridrcp, half, dist; register ssize_t i; size_t bestj, j; unsigned char closest[16]; VectorInit3(half,0.5f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; if (count > 0) { VectorCopy43(points[0],start); VectorCopy43(points[0],end); min = max = Dot(points[0],principle); for (i=1; i < (ssize_t) count; i++) { val = Dot(points[i],principle); if (val < min) { VectorCopy43(points[i],start); min = val; } else if (val > max) { VectorCopy43(points[i],end); max = val; } } } VectorClamp3(start); VectorMultiplyAdd3(grid,*start,half,start); VectorTruncate3(start); VectorMultiply3(*start,gridrcp,start); VectorClamp3(end); VectorMultiplyAdd3(grid,*end,half,end); VectorTruncate3(end); VectorMultiply3(*end,gridrcp,end); codes[0] = *start; codes[1] = *end; codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f)); codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f)); codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f)); codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f)); codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f)); codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f)); for (i=0; i < (ssize_t) count; i++) { bestDist = 1e+37f; bestj = 0; for (j=0; j < 4; j++) { dist.x = (points[i].x - codes[j].x) * metric.x; dist.y = (points[i].y - codes[j].y) * metric.y; dist.z = (points[i].z - codes[j].z) * metric.z; d = Dot(dist,dist); if (d < bestDist) { bestDist = d; bestj = j; } } closest[i] = (unsigned char) bestj; } RemapIndices(map, closest, indices); } static void ComputeEndPoints(const DDSSingleColourLookup *lookup[], const unsigned char *color, DDSVector3 *start, DDSVector3 *end, unsigned char *index) { register ssize_t i; size_t c, maxError = SIZE_MAX; for (i=0; i < 2; i++) { const DDSSourceBlock* sources[3]; size_t error = 0; for (c=0; c < 3; c++) { sources[c] = &lookup[c][color[c]].sources[i]; error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error); } if (error > maxError) continue; start->x = (float) sources[0]->start / 31.0f; start->y = (float) sources[1]->start / 63.0f; start->z = (float) sources[2]->start / 31.0f; end->x = (float) sources[0]->end / 31.0f; end->y = (float) sources[1]->end / 63.0f; end->z = (float) sources[2]->end / 31.0f; *index = (unsigned char) (2*i); maxError = error; } } static void ComputePrincipleComponent(const float *covariance, DDSVector3 *principle) { DDSVector4 row0, row1, row2, v; register ssize_t i; row0.x = covariance[0]; row0.y = covariance[1]; row0.z = covariance[2]; row0.w = 0.0f; row1.x = covariance[1]; row1.y = covariance[3]; row1.z = covariance[4]; row1.w = 0.0f; row2.x = covariance[2]; row2.y = covariance[4]; row2.z = covariance[5]; row2.w = 0.0f; VectorInit(v,1.0f); for (i=0; i < 8; i++) { DDSVector4 w; float a; w.x = row0.x * v.x; w.y = row0.y * v.x; w.z = row0.z * v.x; w.w = row0.w * v.x; w.x = (row1.x * v.y) + w.x; w.y = (row1.y * v.y) + w.y; w.z = (row1.z * v.y) + w.z; w.w = (row1.w * v.y) + w.w; w.x = (row2.x * v.z) + w.x; w.y = (row2.y * v.z) + w.y; w.z = (row2.z * v.z) + w.z; w.w = (row2.w * v.z) + w.w; a = 1.0f / MagickMax(w.x,MagickMax(w.y,w.z)); v.x = w.x * a; v.y = w.y * a; v.z = w.z * a; v.w = w.w * a; } VectorCopy43(v,principle); } static void ComputeWeightedCovariance(const size_t count, const DDSVector4 *points, float *covariance) { DDSVector3 centroid; float total; size_t i; total = 0.0f; VectorInit3(centroid,0.0f); for (i=0; i < count; i++) { total += points[i].w; centroid.x += (points[i].x * points[i].w); centroid.y += (points[i].y * points[i].w); centroid.z += (points[i].z * points[i].w); } if( total > 1.192092896e-07F) { centroid.x /= total; centroid.y /= total; centroid.z /= total; } for (i=0; i < 6; i++) covariance[i] = 0.0f; for (i = 0; i < count; i++) { DDSVector3 a, b; a.x = points[i].x - centroid.x; a.y = points[i].y - centroid.y; a.z = points[i].z - centroid.z; b.x = points[i].w * a.x; b.y = points[i].w * a.y; b.z = points[i].w * a.z; covariance[0] += a.x*b.x; covariance[1] += a.x*b.y; covariance[2] += a.x*b.z; covariance[3] += a.y*b.y; covariance[4] += a.y*b.z; covariance[5] += a.z*b.z; } } static MagickBooleanType ConstructOrdering(const size_t count, const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights, DDSVector4 *xSumwSum, unsigned char *order, size_t iteration) { float dps[16], f; register ssize_t i; size_t j; unsigned char c, *o, *p; o = order + (16*iteration); for (i=0; i < (ssize_t) count; i++) { dps[i] = Dot(points[i],axis); o[i] = (unsigned char)i; } for (i=0; i < (ssize_t) count; i++) { for (j=i; j > 0 && dps[j] < dps[j - 1]; j--) { f = dps[j]; dps[j] = dps[j - 1]; dps[j - 1] = f; c = o[j]; o[j] = o[j - 1]; o[j - 1] = c; } } for (i=0; i < (ssize_t) iteration; i++) { MagickBooleanType same; p = order + (16*i); same = MagickTrue; for (j=0; j < count; j++) { if (o[j] != p[j]) { same = MagickFalse; break; } } if (same != MagickFalse) return MagickFalse; } xSumwSum->x = 0; xSumwSum->y = 0; xSumwSum->z = 0; xSumwSum->w = 0; for (i=0; i < (ssize_t) count; i++) { DDSVector4 v; j = (size_t) o[i]; v.x = points[j].w * points[j].x; v.y = points[j].w * points[j].y; v.z = points[j].w * points[j].z; v.w = points[j].w * 1.0f; VectorCopy44(v,&pointsWeights[i]); VectorAdd(*xSumwSum,v,xSumwSum); } return MagickTrue; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s D D S % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsDDS() returns MagickTrue if the image format type, identified by the % magick string, is DDS. % % The format of the IsDDS method is: % % MagickBooleanType IsDDS(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((char *) magick,"DDS ", 4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadDDSImage() reads a DirectDraw Surface image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadDDSImage method is: % % Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: The image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType status, cubemap = MagickFalse, volume = MagickFalse, matte; CompressionType compression; DDSInfo dds_info; DDSDecoder *decoder; size_t n, num_images; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Initialize image structure. */ if (ReadDDSInfo(image, &dds_info) != MagickTrue) { ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP) cubemap = MagickTrue; if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0) volume = MagickTrue; (void) SeekBlob(image, 128, SEEK_SET); /* Determine pixel format */ if (dds_info.pixelformat.flags & DDPF_RGB) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { matte = MagickTrue; decoder = ReadUncompressedRGBA; } else { matte = MagickTrue; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_LUMINANCE) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { /* Not sure how to handle this */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } else { matte = MagickFalse; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_FOURCC) { switch (dds_info.pixelformat.fourcc) { case FOURCC_DXT1: { matte = MagickFalse; compression = DXT1Compression; decoder = ReadDXT1; break; } case FOURCC_DXT3: { matte = MagickTrue; compression = DXT3Compression; decoder = ReadDXT3; break; } case FOURCC_DXT5: { matte = MagickTrue; compression = DXT5Compression; decoder = ReadDXT5; break; } default: { /* Unknown FOURCC */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } } } else { /* Neither compressed nor uncompressed... thus unsupported */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } num_images = 1; if (cubemap) { /* Determine number of faces defined in the cubemap */ num_images = 0; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++; } if (volume) num_images = dds_info.depth; for (n = 0; n < num_images; n++) { if (n != 0) { if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); /* Start a new image */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } image->matte = matte; image->compression = compression; image->columns = dds_info.width; image->rows = dds_info.height; image->storage_class = DirectClass; image->endian = LSBEndian; image->depth = 8; if (image_info->ping != MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } if ((decoder)(image, &dds_info, exception) != MagickTrue) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } } if (EOFBlob(image) != MagickFalse) ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); (void) CloseBlob(image); return(GetFirstImageInList(image)); } static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info) { size_t hdr_size, required; /* Seek to start of header */ (void) SeekBlob(image, 4, SEEK_SET); /* Check header field */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 124) return MagickFalse; /* Fill in DDS info struct */ dds_info->flags = ReadBlobLSBLong(image); /* Check required flags */ required=(size_t) (DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); if ((dds_info->flags & required) != required) return MagickFalse; dds_info->height = ReadBlobLSBLong(image); dds_info->width = ReadBlobLSBLong(image); dds_info->pitchOrLinearSize = ReadBlobLSBLong(image); dds_info->depth = ReadBlobLSBLong(image); dds_info->mipmapcount = ReadBlobLSBLong(image); (void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */ /* Read pixel format structure */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 32) return MagickFalse; dds_info->pixelformat.flags = ReadBlobLSBLong(image); dds_info->pixelformat.fourcc = ReadBlobLSBLong(image); dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image); dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image); dds_info->ddscaps1 = ReadBlobLSBLong(image); dds_info->ddscaps2 = ReadBlobLSBLong(image); (void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */ return MagickTrue; } static MagickBooleanType ReadDXT1(Image *image, DDSInfo *dds_info, ExceptionInfo *exception) { DDSColors colors; PixelPacket *q; register ssize_t i, x; size_t bits; ssize_t j, y; unsigned char code; unsigned short c0, c1; for (y = 0; y < (ssize_t) dds_info->height; y += 4) { for (x = 0; x < (ssize_t) dds_info->width; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, dds_info->width - x), MagickMin(4, dds_info->height - y),exception); if (q == (PixelPacket *) NULL) return MagickFalse; /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickFalse); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height) { code = (unsigned char) ((bits >> ((j*4+i)*2)) & 0x3); SetPixelRed(q,ScaleCharToQuantum(colors.r[code])); SetPixelGreen(q,ScaleCharToQuantum(colors.g[code])); SetPixelBlue(q,ScaleCharToQuantum(colors.b[code])); SetPixelOpacity(q,ScaleCharToQuantum(colors.a[code])); if (colors.a[code] && image->matte == MagickFalse) /* Correct matte */ image->matte = MagickTrue; q++; } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } } return(SkipDXTMipmaps(image,dds_info,8,exception)); } static MagickBooleanType ReadDXT3(Image *image, DDSInfo *dds_info, ExceptionInfo *exception) { DDSColors colors; ssize_t j, y; PixelPacket *q; register ssize_t i, x; unsigned char alpha; size_t a0, a1, bits, code; unsigned short c0, c1; for (y = 0; y < (ssize_t) dds_info->height; y += 4) { for (x = 0; x < (ssize_t) dds_info->width; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, dds_info->width - x), MagickMin(4, dds_info->height - y),exception); if (q == (PixelPacket *) NULL) return MagickFalse; /* Read alpha values (8 bytes) */ a0 = ReadBlobLSBLong(image); a1 = ReadBlobLSBLong(image); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(q,ScaleCharToQuantum(colors.r[code])); SetPixelGreen(q,ScaleCharToQuantum(colors.g[code])); SetPixelBlue(q,ScaleCharToQuantum(colors.b[code])); /* Extract alpha value: multiply 0..15 by 17 to get range 0..255 */ if (j < 2) alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf); else alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf); SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) alpha)); q++; } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } } return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadDXT5(Image *image, DDSInfo *dds_info, ExceptionInfo *exception) { DDSColors colors; ssize_t j, y; MagickSizeType alpha_bits; PixelPacket *q; register ssize_t i, x; unsigned char a0, a1; size_t alpha, bits, code, alpha_code; unsigned short c0, c1; for (y = 0; y < (ssize_t) dds_info->height; y += 4) { for (x = 0; x < (ssize_t) dds_info->width; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, dds_info->width - x), MagickMin(4, dds_info->height - y),exception); if (q == (PixelPacket *) NULL) return MagickFalse; /* Read alpha values (8 bytes) */ a0 = (unsigned char) ReadBlobByte(image); a1 = (unsigned char) ReadBlobByte(image); alpha_bits = (MagickSizeType)ReadBlobLSBLong(image); alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(q,ScaleCharToQuantum(colors.r[code])); SetPixelGreen(q,ScaleCharToQuantum(colors.g[code])); SetPixelBlue(q,ScaleCharToQuantum(colors.b[code])); /* Extract alpha value */ alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7; if (alpha_code == 0) alpha = a0; else if (alpha_code == 1) alpha = a1; else if (a0 > a1) alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7; else if (alpha_code == 6) alpha = 0; else if (alpha_code == 7) alpha = 255; else alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5); SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) alpha)); q++; } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } } return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadUncompressedRGB(Image *image, DDSInfo *dds_info, ExceptionInfo *exception) { PixelPacket *q; ssize_t x, y; unsigned short color; if (dds_info->pixelformat.rgb_bitcount == 8) (void) SetImageType(image,GrayscaleType); else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask( dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000)) ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); for (y = 0; y < (ssize_t) dds_info->height; y++) { q = QueueAuthenticPixels(image, 0, y, dds_info->width, 1,exception); if (q == (PixelPacket *) NULL) return MagickFalse; for (x = 0; x < (ssize_t) dds_info->width; x++) { if (dds_info->pixelformat.rgb_bitcount == 8) SetPixelGray(q,ScaleCharToQuantum(ReadBlobByte(image))); else if (dds_info->pixelformat.rgb_bitcount == 16) { color=ReadBlobShort(image); SetPixelRed(q,ScaleCharToQuantum((unsigned char) (((color >> 11)/31.0)*255))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 5) >> 10)/63.0)*255))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255))); } else { SetPixelBlue(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); SetPixelRed(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); if (dds_info->pixelformat.rgb_bitcount == 32) (void) ReadBlobByte(image); } SetPixelAlpha(q,QuantumRange); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } return(SkipRGBMipmaps(image,dds_info,3,exception)); } static MagickBooleanType ReadUncompressedRGBA(Image *image, DDSInfo *dds_info, ExceptionInfo *exception) { PixelPacket *q; ssize_t alphaBits, x, y; unsigned short color; alphaBits=0; if (dds_info->pixelformat.rgb_bitcount == 16) { if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000)) alphaBits=1; else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00)) { alphaBits=2; (void) SetImageType(image,GrayscaleMatteType); } else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000)) alphaBits=4; else ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); } for (y = 0; y < (ssize_t) dds_info->height; y++) { q = QueueAuthenticPixels(image, 0, y, dds_info->width, 1,exception); if (q == (PixelPacket *) NULL) return MagickFalse; for (x = 0; x < (ssize_t) dds_info->width; x++) { if (dds_info->pixelformat.rgb_bitcount == 16) { color=ReadBlobShort(image); if (alphaBits == 1) { SetPixelAlpha(q,(color & (1 << 15)) ? QuantumRange : 0); SetPixelRed(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 1) >> 11)/31.0)*255))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 6) >> 11)/31.0)*255))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255))); } else if (alphaBits == 2) { SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) (color >> 8))); SetPixelGray(q,ScaleCharToQuantum((unsigned char)color)); } else { SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) (((color >> 12)/15.0)*255))); SetPixelRed(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 4) >> 12)/15.0)*255))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 8) >> 12)/15.0)*255))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 12) >> 12)/15.0)*255))); } } else { SetPixelBlue(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); SetPixelRed(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); } q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } return(SkipRGBMipmaps(image,dds_info,4,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterDDSImage() adds attributes for the DDS image format to % the list of supported formats. The attributes include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterDDSImage method is: % % RegisterDDSImage(void) % */ ModuleExport size_t RegisterDDSImage(void) { MagickInfo *entry; entry = SetMagickInfo("DDS"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->seekable_stream=MagickTrue; entry->description = ConstantString("Microsoft DirectDraw Surface"); entry->module = ConstantString("DDS"); (void) RegisterMagickInfo(entry); entry = SetMagickInfo("DXT1"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->seekable_stream=MagickTrue; entry->description = ConstantString("Microsoft DirectDraw Surface"); entry->module = ConstantString("DDS"); (void) RegisterMagickInfo(entry); entry = SetMagickInfo("DXT5"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->seekable_stream=MagickTrue; entry->description = ConstantString("Microsoft DirectDraw Surface"); entry->module = ConstantString("DDS"); (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } static void RemapIndices(const ssize_t *map, const unsigned char *source, unsigned char *target) { register ssize_t i; for (i = 0; i < 16; i++) { if (map[i] == -1) target[i] = 3; else target[i] = source[map[i]]; } } /* Skip the mipmap images for compressed (DXTn) dds files */ static MagickBooleanType SkipDXTMipmaps(Image *image,DDSInfo *dds_info, int texel_size,ExceptionInfo *exception) { register ssize_t i; MagickOffsetType offset; size_t h, w; /* Only skip mipmaps for textures and cube maps */ if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } w = DIV2(dds_info->width); h = DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset = (MagickOffsetType) ((w + 3) / 4) * ((h + 3) / 4) * texel_size; (void) SeekBlob(image, offset, SEEK_CUR); w = DIV2(w); h = DIV2(h); } } return(MagickTrue); } /* Skip the mipmap images for uncompressed (RGB or RGBA) dds files */ static MagickBooleanType SkipRGBMipmaps(Image *image,DDSInfo *dds_info, int pixel_size,ExceptionInfo *exception) { MagickOffsetType offset; register ssize_t i; size_t h, w; /* Only skip mipmaps for textures and cube maps */ if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } w = DIV2(dds_info->width); h = DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset = (MagickOffsetType) w * h * pixel_size; (void) SeekBlob(image, offset, SEEK_CUR); w = DIV2(w); h = DIV2(h); } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterDDSImage() removes format registrations made by the % DDS module from the list of supported formats. % % The format of the UnregisterDDSImage method is: % % UnregisterDDSImage(void) % */ ModuleExport void UnregisterDDSImage(void) { (void) UnregisterMagickInfo("DDS"); (void) UnregisterMagickInfo("DXT1"); (void) UnregisterMagickInfo("DXT5"); } static void WriteAlphas(Image *image, const ssize_t* alphas, size_t min5, size_t max5, size_t min7, size_t max7) { register ssize_t i; size_t err5, err7, j; unsigned char indices5[16], indices7[16]; FixRange(min5,max5,5); err5 = CompressAlpha(min5,max5,5,alphas,indices5); FixRange(min7,max7,7); err7 = CompressAlpha(min7,max7,7,alphas,indices7); if (err7 < err5) { for (i=0; i < 16; i++) { unsigned char index; index = indices7[i]; if( index == 0 ) indices5[i] = 1; else if (index == 1) indices5[i] = 0; else indices5[i] = 9 - index; } min5 = max7; max5 = min7; } (void) WriteBlobByte(image,(unsigned char) min5); (void) WriteBlobByte(image,(unsigned char) max5); for(i=0; i < 2; i++) { size_t value = 0; for (j=0; j < 8; j++) { size_t index = (size_t) indices5[j + i*8]; value |= ( index << 3*j ); } for (j=0; j < 3; j++) { size_t byte = (value >> 8*j) & 0xff; (void) WriteBlobByte(image,(unsigned char) byte); } } } static void WriteCompressed(Image *image, const size_t count, DDSVector4* points, const ssize_t* map, const MagickBooleanType clusterFit) { float covariance[16]; DDSVector3 end, principle, start; DDSVector4 metric; unsigned char indices[16]; VectorInit(metric,1.0f); VectorInit3(start,0.0f); VectorInit3(end,0.0f); ComputeWeightedCovariance(count,points,covariance); ComputePrincipleComponent(covariance,&principle); if (clusterFit == MagickFalse || count == 0) CompressRangeFit(count,points,map,principle,metric,&start,&end,indices); else CompressClusterFit(count,points,map,principle,metric,&start,&end,indices); WriteIndices(image,start,end,indices); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format. % % The format of the WriteBMPImage method is: % % MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % */ static MagickBooleanType WriteDDSImage(const ImageInfo *image_info, Image *image) { const char *option; size_t compression, columns, maxMipmaps, mipmaps, pixelFormat, rows; MagickBooleanType clusterFit, status, weightByAlpha; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception); if (status == MagickFalse) return(status); (void) TransformImageColorspace(image,sRGBColorspace); pixelFormat=DDPF_FOURCC; compression=FOURCC_DXT5; if (!image->matte) compression=FOURCC_DXT1; if (LocaleCompare(image_info->magick,"dxt1") == 0) compression=FOURCC_DXT1; option=GetImageOption(image_info,"dds:compression"); if (option != (char *) NULL) { if (LocaleCompare(option,"dxt1") == 0) compression=FOURCC_DXT1; if (LocaleCompare(option,"none") == 0) pixelFormat=DDPF_RGB; } clusterFit=MagickFalse; weightByAlpha=MagickFalse; if (pixelFormat == DDPF_FOURCC) { option=GetImageOption(image_info,"dds:cluster-fit"); if (option != (char *) NULL && LocaleCompare(option,"true") == 0) { clusterFit=MagickTrue; if (compression != FOURCC_DXT1) { option=GetImageOption(image_info,"dds:weight-by-alpha"); if (option != (char *) NULL && LocaleCompare(option,"true") == 0) weightByAlpha=MagickTrue; } } } maxMipmaps=SIZE_MAX; mipmaps=0; if ((image->columns & (image->columns - 1)) == 0 && (image->rows & (image->rows - 1)) == 0) { option=GetImageOption(image_info,"dds:mipmaps"); if (option != (char *) NULL) maxMipmaps=StringToUnsignedLong(option); if (maxMipmaps != 0) { columns=image->columns; rows=image->rows; while (columns != 1 && rows != 1 && mipmaps != maxMipmaps) { columns=DIV2(columns); rows=DIV2(rows); mipmaps++; } } } WriteDDSInfo(image,pixelFormat,compression,mipmaps); WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha, &image->exception); if (mipmaps > 0 && WriteMipmaps(image,pixelFormat,compression,mipmaps, clusterFit,weightByAlpha,&image->exception) == MagickFalse) return(MagickFalse); (void) CloseBlob(image); return(MagickTrue); } static void WriteDDSInfo(Image *image, const size_t pixelFormat, const size_t compression, const size_t mipmaps) { char software[MaxTextExtent]; register ssize_t i; unsigned int format, caps, flags; flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT | DDSD_LINEARSIZE); caps=(unsigned int) DDSCAPS_TEXTURE; format=(unsigned int) pixelFormat; if (mipmaps > 0) { flags=flags | (unsigned int) DDSD_MIPMAPCOUNT; caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX); } if (format != DDPF_FOURCC && image->matte) format=format | DDPF_ALPHAPIXELS; (void) WriteBlob(image,4,(unsigned char *) "DDS "); (void) WriteBlobLSBLong(image,124); (void) WriteBlobLSBLong(image,flags); (void) WriteBlobLSBLong(image,(unsigned int) image->rows); (void) WriteBlobLSBLong(image,(unsigned int) image->columns); if (compression == FOURCC_DXT1) (void) WriteBlobLSBLong(image, (unsigned int) (MagickMax(1,(image->columns+3)/4) * 8)); else (void) WriteBlobLSBLong(image, (unsigned int) (MagickMax(1,(image->columns+3)/4) * 16)); (void) WriteBlobLSBLong(image,0x00); (void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1); (void) ResetMagickMemory(software,0,sizeof(software)); (void) strcpy(software,"IMAGEMAGICK"); (void) WriteBlob(image,44,(unsigned char *) software); (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,format); if (pixelFormat == DDPF_FOURCC) { (void) WriteBlobLSBLong(image,(unsigned int) compression); for(i=0;i < 5;i++) // bitcount / masks (void) WriteBlobLSBLong(image,0x00); } else { (void) WriteBlobLSBLong(image,0x00); if (image->matte) { (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0xff000000); } else { (void) WriteBlobLSBLong(image,24); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0x00); (void) WriteBlobLSBLong(image,0x00); (void) WriteBlobLSBLong(image,0x00); } } (void) WriteBlobLSBLong(image,caps); for(i=0;i < 4;i++) // ddscaps2 + reserved region (void) WriteBlobLSBLong(image,0x00); } static void WriteFourCC(Image *image, const size_t compression, const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { register const PixelPacket *p; register ssize_t x; ssize_t i, y, bx, by; for (y=0; y < (ssize_t) image->rows; y+=4) { for (x=0; x < (ssize_t) image->columns; x+=4) { MagickBooleanType match; DDSVector4 point, points[16]; size_t count = 0, max5 = 0, max7 = 0, min5 = 255, min7 = 255, columns = 4, rows = 4; ssize_t alphas[16], map[16]; unsigned char alpha; if (x + columns >= image->columns) columns = image->columns - x; if (y + rows >= image->rows) rows = image->rows - y; p=GetVirtualPixels(image,x,y,columns,rows,exception); if (p == (const PixelPacket *) NULL) break; for (i=0; i<16; i++) { map[i] = -1; alphas[i] = -1; } for (by=0; by < (ssize_t) rows; by++) { for (bx=0; bx < (ssize_t) columns; bx++) { if (compression == FOURCC_DXT5) alpha = ScaleQuantumToChar(GetPixelAlpha(p)); else alpha = 255; alphas[4*by + bx] = (size_t)alpha; point.x = (float)ScaleQuantumToChar(GetPixelRed(p)) / 255.0f; point.y = (float)ScaleQuantumToChar(GetPixelGreen(p)) / 255.0f; point.z = (float)ScaleQuantumToChar(GetPixelBlue(p)) / 255.0f; point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f; p++; match = MagickFalse; for (i=0; i < (ssize_t) count; i++) { if ((points[i].x == point.x) && (points[i].y == point.y) && (points[i].z == point.z) && (alpha >= 128 || compression == FOURCC_DXT5)) { points[i].w += point.w; map[4*by + bx] = i; match = MagickTrue; break; } } if (match != MagickFalse) continue; points[count].x = point.x; points[count].y = point.y; points[count].z = point.z; points[count].w = point.w; map[4*by + bx] = count; count++; if (compression == FOURCC_DXT5) { if (alpha < min7) min7 = alpha; if (alpha > max7) max7 = alpha; if (alpha != 0 && alpha < min5) min5 = alpha; if (alpha != 255 && alpha > max5) max5 = alpha; } } } for (i=0; i < (ssize_t) count; i++) points[i].w = sqrt(points[i].w); if (compression == FOURCC_DXT5) WriteAlphas(image,alphas,min5,max5,min7,max7); if (count == 1) WriteSingleColorFit(image,points,map); else WriteCompressed(image,count,points,map,clusterFit); } } } static void WriteImageData(Image *image, const size_t pixelFormat, const size_t compression, const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { if (pixelFormat == DDPF_FOURCC) WriteFourCC(image,compression,clusterFit,weightByAlpha,exception); else WriteUncompressed(image,exception); } static inline size_t ClampToLimit(const float value, const size_t limit) { size_t result = (int) (value + 0.5f); if (result < 0.0f) return(0); if (result > limit) return(limit); return result; } static inline size_t ColorTo565(const DDSVector3 point) { size_t r = ClampToLimit(31.0f*point.x,31); size_t g = ClampToLimit(63.0f*point.y,63); size_t b = ClampToLimit(31.0f*point.z,31); return (r << 11) | (g << 5) | b; } static void WriteIndices(Image *image, const DDSVector3 start, const DDSVector3 end, unsigned char* indices) { register ssize_t i; size_t a, b; unsigned char remapped[16]; const unsigned char *ind; a = ColorTo565(start); b = ColorTo565(end); for (i=0; i<16; i++) { if( a < b ) remapped[i] = (indices[i] ^ 0x1) & 0x3; else if( a == b ) remapped[i] = 0; else remapped[i] = indices[i]; } if( a < b ) Swap(a,b); (void) WriteBlobByte(image,(unsigned char) (a & 0xff)); (void) WriteBlobByte(image,(unsigned char) (a >> 8)); (void) WriteBlobByte(image,(unsigned char) (b & 0xff)); (void) WriteBlobByte(image,(unsigned char) (b >> 8)); for (i=0; i<4; i++) { ind = remapped + 4*i; (void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) | (ind[3] << 6)); } } static MagickBooleanType WriteMipmaps(Image *image, const size_t pixelFormat, const size_t compression, const size_t mipmaps, const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { Image* resize_image; register ssize_t i; size_t columns, rows; columns = image->columns; rows = image->rows; for (i=0; i< (ssize_t) mipmaps; i++) { resize_image = ResizeImage(image,columns/2,rows/2,TriangleFilter,1.0, exception); if (resize_image == (Image *) NULL) return(MagickFalse); DestroyBlob(resize_image); resize_image->blob=ReferenceBlob(image->blob); WriteImageData(resize_image,pixelFormat,compression,weightByAlpha, clusterFit,exception); resize_image=DestroyImage(resize_image); columns = DIV2(columns); rows = DIV2(rows); } return(MagickTrue); } static void WriteSingleColorFit(Image *image, const DDSVector4* points, const ssize_t* map) { DDSVector3 start, end; register ssize_t i; unsigned char color[3], index, indexes[16], indices[16]; color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255); color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255); color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255); index=0; ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index); for (i=0; i< 16; i++) indexes[i]=index; RemapIndices(map,indexes,indices); WriteIndices(image,start,end,indices); } static void WriteUncompressed(Image *image, ExceptionInfo *exception) { register const PixelPacket *p; register ssize_t x; ssize_t y; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(p))); if (image->matte) (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(p))); p++; } } }
./CrossVul/dataset_final_sorted/CWE-20/c/good_2435_0
crossvul-cpp_data_bad_3515_0
/* * AppArmor security module * * This file contains AppArmor LSM hooks. * * Copyright (C) 1998-2008 Novell/SUSE * Copyright 2009-2010 Canonical Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. */ #include <linux/security.h> #include <linux/moduleparam.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/ptrace.h> #include <linux/ctype.h> #include <linux/sysctl.h> #include <linux/audit.h> #include <linux/user_namespace.h> #include <net/sock.h> #include "include/apparmor.h" #include "include/apparmorfs.h" #include "include/audit.h" #include "include/capability.h" #include "include/context.h" #include "include/file.h" #include "include/ipc.h" #include "include/path.h" #include "include/policy.h" #include "include/procattr.h" /* Flag indicating whether initialization completed */ int apparmor_initialized __initdata; /* * LSM hook functions */ /* * free the associated aa_task_cxt and put its profiles */ static void apparmor_cred_free(struct cred *cred) { aa_free_task_context(cred->security); cred->security = NULL; } /* * allocate the apparmor part of blank credentials */ static int apparmor_cred_alloc_blank(struct cred *cred, gfp_t gfp) { /* freed by apparmor_cred_free */ struct aa_task_cxt *cxt = aa_alloc_task_context(gfp); if (!cxt) return -ENOMEM; cred->security = cxt; return 0; } /* * prepare new aa_task_cxt for modification by prepare_cred block */ static int apparmor_cred_prepare(struct cred *new, const struct cred *old, gfp_t gfp) { /* freed by apparmor_cred_free */ struct aa_task_cxt *cxt = aa_alloc_task_context(gfp); if (!cxt) return -ENOMEM; aa_dup_task_context(cxt, old->security); new->security = cxt; return 0; } /* * transfer the apparmor data to a blank set of creds */ static void apparmor_cred_transfer(struct cred *new, const struct cred *old) { const struct aa_task_cxt *old_cxt = old->security; struct aa_task_cxt *new_cxt = new->security; aa_dup_task_context(new_cxt, old_cxt); } static int apparmor_ptrace_access_check(struct task_struct *child, unsigned int mode) { int error = cap_ptrace_access_check(child, mode); if (error) return error; return aa_ptrace(current, child, mode); } static int apparmor_ptrace_traceme(struct task_struct *parent) { int error = cap_ptrace_traceme(parent); if (error) return error; return aa_ptrace(parent, current, PTRACE_MODE_ATTACH); } /* Derived from security/commoncap.c:cap_capget */ static int apparmor_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted) { struct aa_profile *profile; const struct cred *cred; rcu_read_lock(); cred = __task_cred(target); profile = aa_cred_profile(cred); *effective = cred->cap_effective; *inheritable = cred->cap_inheritable; *permitted = cred->cap_permitted; if (!unconfined(profile)) { *effective = cap_intersect(*effective, profile->caps.allow); *permitted = cap_intersect(*permitted, profile->caps.allow); } rcu_read_unlock(); return 0; } static int apparmor_capable(struct task_struct *task, const struct cred *cred, struct user_namespace *ns, int cap, int audit) { struct aa_profile *profile; /* cap_capable returns 0 on success, else -EPERM */ int error = cap_capable(task, cred, ns, cap, audit); if (!error) { profile = aa_cred_profile(cred); if (!unconfined(profile)) error = aa_capable(task, profile, cap, audit); } return error; } /** * common_perm - basic common permission check wrapper fn for paths * @op: operation being checked * @path: path to check permission of (NOT NULL) * @mask: requested permissions mask * @cond: conditional info for the permission request (NOT NULL) * * Returns: %0 else error code if error or permission denied */ static int common_perm(int op, struct path *path, u32 mask, struct path_cond *cond) { struct aa_profile *profile; int error = 0; profile = __aa_current_profile(); if (!unconfined(profile)) error = aa_path_perm(op, profile, path, 0, mask, cond); return error; } /** * common_perm_dir_dentry - common permission wrapper when path is dir, dentry * @op: operation being checked * @dir: directory of the dentry (NOT NULL) * @dentry: dentry to check (NOT NULL) * @mask: requested permissions mask * @cond: conditional info for the permission request (NOT NULL) * * Returns: %0 else error code if error or permission denied */ static int common_perm_dir_dentry(int op, struct path *dir, struct dentry *dentry, u32 mask, struct path_cond *cond) { struct path path = { dir->mnt, dentry }; return common_perm(op, &path, mask, cond); } /** * common_perm_mnt_dentry - common permission wrapper when mnt, dentry * @op: operation being checked * @mnt: mount point of dentry (NOT NULL) * @dentry: dentry to check (NOT NULL) * @mask: requested permissions mask * * Returns: %0 else error code if error or permission denied */ static int common_perm_mnt_dentry(int op, struct vfsmount *mnt, struct dentry *dentry, u32 mask) { struct path path = { mnt, dentry }; struct path_cond cond = { dentry->d_inode->i_uid, dentry->d_inode->i_mode }; return common_perm(op, &path, mask, &cond); } /** * common_perm_rm - common permission wrapper for operations doing rm * @op: operation being checked * @dir: directory that the dentry is in (NOT NULL) * @dentry: dentry being rm'd (NOT NULL) * @mask: requested permission mask * * Returns: %0 else error code if error or permission denied */ static int common_perm_rm(int op, struct path *dir, struct dentry *dentry, u32 mask) { struct inode *inode = dentry->d_inode; struct path_cond cond = { }; if (!inode || !dir->mnt || !mediated_filesystem(inode)) return 0; cond.uid = inode->i_uid; cond.mode = inode->i_mode; return common_perm_dir_dentry(op, dir, dentry, mask, &cond); } /** * common_perm_create - common permission wrapper for operations doing create * @op: operation being checked * @dir: directory that dentry will be created in (NOT NULL) * @dentry: dentry to create (NOT NULL) * @mask: request permission mask * @mode: created file mode * * Returns: %0 else error code if error or permission denied */ static int common_perm_create(int op, struct path *dir, struct dentry *dentry, u32 mask, umode_t mode) { struct path_cond cond = { current_fsuid(), mode }; if (!dir->mnt || !mediated_filesystem(dir->dentry->d_inode)) return 0; return common_perm_dir_dentry(op, dir, dentry, mask, &cond); } static int apparmor_path_unlink(struct path *dir, struct dentry *dentry) { return common_perm_rm(OP_UNLINK, dir, dentry, AA_MAY_DELETE); } static int apparmor_path_mkdir(struct path *dir, struct dentry *dentry, int mode) { return common_perm_create(OP_MKDIR, dir, dentry, AA_MAY_CREATE, S_IFDIR); } static int apparmor_path_rmdir(struct path *dir, struct dentry *dentry) { return common_perm_rm(OP_RMDIR, dir, dentry, AA_MAY_DELETE); } static int apparmor_path_mknod(struct path *dir, struct dentry *dentry, int mode, unsigned int dev) { return common_perm_create(OP_MKNOD, dir, dentry, AA_MAY_CREATE, mode); } static int apparmor_path_truncate(struct path *path) { struct path_cond cond = { path->dentry->d_inode->i_uid, path->dentry->d_inode->i_mode }; if (!path->mnt || !mediated_filesystem(path->dentry->d_inode)) return 0; return common_perm(OP_TRUNC, path, MAY_WRITE | AA_MAY_META_WRITE, &cond); } static int apparmor_path_symlink(struct path *dir, struct dentry *dentry, const char *old_name) { return common_perm_create(OP_SYMLINK, dir, dentry, AA_MAY_CREATE, S_IFLNK); } static int apparmor_path_link(struct dentry *old_dentry, struct path *new_dir, struct dentry *new_dentry) { struct aa_profile *profile; int error = 0; if (!mediated_filesystem(old_dentry->d_inode)) return 0; profile = aa_current_profile(); if (!unconfined(profile)) error = aa_path_link(profile, old_dentry, new_dir, new_dentry); return error; } static int apparmor_path_rename(struct path *old_dir, struct dentry *old_dentry, struct path *new_dir, struct dentry *new_dentry) { struct aa_profile *profile; int error = 0; if (!mediated_filesystem(old_dentry->d_inode)) return 0; profile = aa_current_profile(); if (!unconfined(profile)) { struct path old_path = { old_dir->mnt, old_dentry }; struct path new_path = { new_dir->mnt, new_dentry }; struct path_cond cond = { old_dentry->d_inode->i_uid, old_dentry->d_inode->i_mode }; error = aa_path_perm(OP_RENAME_SRC, profile, &old_path, 0, MAY_READ | AA_MAY_META_READ | MAY_WRITE | AA_MAY_META_WRITE | AA_MAY_DELETE, &cond); if (!error) error = aa_path_perm(OP_RENAME_DEST, profile, &new_path, 0, MAY_WRITE | AA_MAY_META_WRITE | AA_MAY_CREATE, &cond); } return error; } static int apparmor_path_chmod(struct dentry *dentry, struct vfsmount *mnt, mode_t mode) { if (!mediated_filesystem(dentry->d_inode)) return 0; return common_perm_mnt_dentry(OP_CHMOD, mnt, dentry, AA_MAY_CHMOD); } static int apparmor_path_chown(struct path *path, uid_t uid, gid_t gid) { struct path_cond cond = { path->dentry->d_inode->i_uid, path->dentry->d_inode->i_mode }; if (!mediated_filesystem(path->dentry->d_inode)) return 0; return common_perm(OP_CHOWN, path, AA_MAY_CHOWN, &cond); } static int apparmor_inode_getattr(struct vfsmount *mnt, struct dentry *dentry) { if (!mediated_filesystem(dentry->d_inode)) return 0; return common_perm_mnt_dentry(OP_GETATTR, mnt, dentry, AA_MAY_META_READ); } static int apparmor_dentry_open(struct file *file, const struct cred *cred) { struct aa_file_cxt *fcxt = file->f_security; struct aa_profile *profile; int error = 0; if (!mediated_filesystem(file->f_path.dentry->d_inode)) return 0; /* If in exec, permission is handled by bprm hooks. * Cache permissions granted by the previous exec check, with * implicit read and executable mmap which are required to * actually execute the image. */ if (current->in_execve) { fcxt->allow = MAY_EXEC | MAY_READ | AA_EXEC_MMAP; return 0; } profile = aa_cred_profile(cred); if (!unconfined(profile)) { struct inode *inode = file->f_path.dentry->d_inode; struct path_cond cond = { inode->i_uid, inode->i_mode }; error = aa_path_perm(OP_OPEN, profile, &file->f_path, 0, aa_map_file_to_perms(file), &cond); /* todo cache full allowed permissions set and state */ fcxt->allow = aa_map_file_to_perms(file); } return error; } static int apparmor_file_alloc_security(struct file *file) { /* freed by apparmor_file_free_security */ file->f_security = aa_alloc_file_context(GFP_KERNEL); if (!file->f_security) return -ENOMEM; return 0; } static void apparmor_file_free_security(struct file *file) { struct aa_file_cxt *cxt = file->f_security; aa_free_file_context(cxt); } static int common_file_perm(int op, struct file *file, u32 mask) { struct aa_file_cxt *fcxt = file->f_security; struct aa_profile *profile, *fprofile = aa_cred_profile(file->f_cred); int error = 0; BUG_ON(!fprofile); if (!file->f_path.mnt || !mediated_filesystem(file->f_path.dentry->d_inode)) return 0; profile = __aa_current_profile(); /* revalidate access, if task is unconfined, or the cached cred * doesn't match or if the request is for more permissions than * was granted. * * Note: the test for !unconfined(fprofile) is to handle file * delegation from unconfined tasks */ if (!unconfined(profile) && !unconfined(fprofile) && ((fprofile != profile) || (mask & ~fcxt->allow))) error = aa_file_perm(op, profile, file, mask); return error; } static int apparmor_file_permission(struct file *file, int mask) { return common_file_perm(OP_FPERM, file, mask); } static int apparmor_file_lock(struct file *file, unsigned int cmd) { u32 mask = AA_MAY_LOCK; if (cmd == F_WRLCK) mask |= MAY_WRITE; return common_file_perm(OP_FLOCK, file, mask); } static int common_mmap(int op, struct file *file, unsigned long prot, unsigned long flags) { struct dentry *dentry; int mask = 0; if (!file || !file->f_security) return 0; if (prot & PROT_READ) mask |= MAY_READ; /* * Private mappings don't require write perms since they don't * write back to the files */ if ((prot & PROT_WRITE) && !(flags & MAP_PRIVATE)) mask |= MAY_WRITE; if (prot & PROT_EXEC) mask |= AA_EXEC_MMAP; dentry = file->f_path.dentry; return common_file_perm(op, file, mask); } static int apparmor_file_mmap(struct file *file, unsigned long reqprot, unsigned long prot, unsigned long flags, unsigned long addr, unsigned long addr_only) { int rc = 0; /* do DAC check */ rc = cap_file_mmap(file, reqprot, prot, flags, addr, addr_only); if (rc || addr_only) return rc; return common_mmap(OP_FMMAP, file, prot, flags); } static int apparmor_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot, unsigned long prot) { return common_mmap(OP_FMPROT, vma->vm_file, prot, !(vma->vm_flags & VM_SHARED) ? MAP_PRIVATE : 0); } static int apparmor_getprocattr(struct task_struct *task, char *name, char **value) { int error = -ENOENT; struct aa_profile *profile; /* released below */ const struct cred *cred = get_task_cred(task); struct aa_task_cxt *cxt = cred->security; profile = aa_cred_profile(cred); if (strcmp(name, "current") == 0) error = aa_getprocattr(aa_newest_version(cxt->profile), value); else if (strcmp(name, "prev") == 0 && cxt->previous) error = aa_getprocattr(aa_newest_version(cxt->previous), value); else if (strcmp(name, "exec") == 0 && cxt->onexec) error = aa_getprocattr(aa_newest_version(cxt->onexec), value); else error = -EINVAL; put_cred(cred); return error; } static int apparmor_setprocattr(struct task_struct *task, char *name, void *value, size_t size) { char *command, *args = value; size_t arg_size; int error; if (size == 0) return -EINVAL; /* args points to a PAGE_SIZE buffer, AppArmor requires that * the buffer must be null terminated or have size <= PAGE_SIZE -1 * so that AppArmor can null terminate them */ if (args[size - 1] != '\0') { if (size == PAGE_SIZE) return -EINVAL; args[size] = '\0'; } /* task can only write its own attributes */ if (current != task) return -EACCES; args = value; args = strim(args); command = strsep(&args, " "); if (!args) return -EINVAL; args = skip_spaces(args); if (!*args) return -EINVAL; arg_size = size - (args - (char *) value); if (strcmp(name, "current") == 0) { if (strcmp(command, "changehat") == 0) { error = aa_setprocattr_changehat(args, arg_size, !AA_DO_TEST); } else if (strcmp(command, "permhat") == 0) { error = aa_setprocattr_changehat(args, arg_size, AA_DO_TEST); } else if (strcmp(command, "changeprofile") == 0) { error = aa_setprocattr_changeprofile(args, !AA_ONEXEC, !AA_DO_TEST); } else if (strcmp(command, "permprofile") == 0) { error = aa_setprocattr_changeprofile(args, !AA_ONEXEC, AA_DO_TEST); } else if (strcmp(command, "permipc") == 0) { error = aa_setprocattr_permipc(args); } else { struct common_audit_data sa; COMMON_AUDIT_DATA_INIT(&sa, NONE); sa.aad.op = OP_SETPROCATTR; sa.aad.info = name; sa.aad.error = -EINVAL; return aa_audit(AUDIT_APPARMOR_DENIED, NULL, GFP_KERNEL, &sa, NULL); } } else if (strcmp(name, "exec") == 0) { error = aa_setprocattr_changeprofile(args, AA_ONEXEC, !AA_DO_TEST); } else { /* only support the "current" and "exec" process attributes */ return -EINVAL; } if (!error) error = size; return error; } static int apparmor_task_setrlimit(struct task_struct *task, unsigned int resource, struct rlimit *new_rlim) { struct aa_profile *profile = aa_current_profile(); int error = 0; if (!unconfined(profile)) error = aa_task_setrlimit(profile, task, resource, new_rlim); return error; } static struct security_operations apparmor_ops = { .name = "apparmor", .ptrace_access_check = apparmor_ptrace_access_check, .ptrace_traceme = apparmor_ptrace_traceme, .capget = apparmor_capget, .capable = apparmor_capable, .path_link = apparmor_path_link, .path_unlink = apparmor_path_unlink, .path_symlink = apparmor_path_symlink, .path_mkdir = apparmor_path_mkdir, .path_rmdir = apparmor_path_rmdir, .path_mknod = apparmor_path_mknod, .path_rename = apparmor_path_rename, .path_chmod = apparmor_path_chmod, .path_chown = apparmor_path_chown, .path_truncate = apparmor_path_truncate, .dentry_open = apparmor_dentry_open, .inode_getattr = apparmor_inode_getattr, .file_permission = apparmor_file_permission, .file_alloc_security = apparmor_file_alloc_security, .file_free_security = apparmor_file_free_security, .file_mmap = apparmor_file_mmap, .file_mprotect = apparmor_file_mprotect, .file_lock = apparmor_file_lock, .getprocattr = apparmor_getprocattr, .setprocattr = apparmor_setprocattr, .cred_alloc_blank = apparmor_cred_alloc_blank, .cred_free = apparmor_cred_free, .cred_prepare = apparmor_cred_prepare, .cred_transfer = apparmor_cred_transfer, .bprm_set_creds = apparmor_bprm_set_creds, .bprm_committing_creds = apparmor_bprm_committing_creds, .bprm_committed_creds = apparmor_bprm_committed_creds, .bprm_secureexec = apparmor_bprm_secureexec, .task_setrlimit = apparmor_task_setrlimit, }; /* * AppArmor sysfs module parameters */ static int param_set_aabool(const char *val, const struct kernel_param *kp); static int param_get_aabool(char *buffer, const struct kernel_param *kp); #define param_check_aabool(name, p) __param_check(name, p, int) static struct kernel_param_ops param_ops_aabool = { .set = param_set_aabool, .get = param_get_aabool }; static int param_set_aauint(const char *val, const struct kernel_param *kp); static int param_get_aauint(char *buffer, const struct kernel_param *kp); #define param_check_aauint(name, p) __param_check(name, p, int) static struct kernel_param_ops param_ops_aauint = { .set = param_set_aauint, .get = param_get_aauint }; static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp); static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp); #define param_check_aalockpolicy(name, p) __param_check(name, p, int) static struct kernel_param_ops param_ops_aalockpolicy = { .set = param_set_aalockpolicy, .get = param_get_aalockpolicy }; static int param_set_audit(const char *val, struct kernel_param *kp); static int param_get_audit(char *buffer, struct kernel_param *kp); static int param_set_mode(const char *val, struct kernel_param *kp); static int param_get_mode(char *buffer, struct kernel_param *kp); /* Flag values, also controllable via /sys/module/apparmor/parameters * We define special types as we want to do additional mediation. */ /* AppArmor global enforcement switch - complain, enforce, kill */ enum profile_mode aa_g_profile_mode = APPARMOR_ENFORCE; module_param_call(mode, param_set_mode, param_get_mode, &aa_g_profile_mode, S_IRUSR | S_IWUSR); /* Debug mode */ int aa_g_debug; module_param_named(debug, aa_g_debug, aabool, S_IRUSR | S_IWUSR); /* Audit mode */ enum audit_mode aa_g_audit; module_param_call(audit, param_set_audit, param_get_audit, &aa_g_audit, S_IRUSR | S_IWUSR); /* Determines if audit header is included in audited messages. This * provides more context if the audit daemon is not running */ int aa_g_audit_header = 1; module_param_named(audit_header, aa_g_audit_header, aabool, S_IRUSR | S_IWUSR); /* lock out loading/removal of policy * TODO: add in at boot loading of policy, which is the only way to * load policy, if lock_policy is set */ int aa_g_lock_policy; module_param_named(lock_policy, aa_g_lock_policy, aalockpolicy, S_IRUSR | S_IWUSR); /* Syscall logging mode */ int aa_g_logsyscall; module_param_named(logsyscall, aa_g_logsyscall, aabool, S_IRUSR | S_IWUSR); /* Maximum pathname length before accesses will start getting rejected */ unsigned int aa_g_path_max = 2 * PATH_MAX; module_param_named(path_max, aa_g_path_max, aauint, S_IRUSR | S_IWUSR); /* Determines how paranoid loading of policy is and how much verification * on the loaded policy is done. */ int aa_g_paranoid_load = 1; module_param_named(paranoid_load, aa_g_paranoid_load, aabool, S_IRUSR | S_IWUSR); /* Boot time disable flag */ static unsigned int apparmor_enabled = CONFIG_SECURITY_APPARMOR_BOOTPARAM_VALUE; module_param_named(enabled, apparmor_enabled, aabool, S_IRUSR); static int __init apparmor_enabled_setup(char *str) { unsigned long enabled; int error = strict_strtoul(str, 0, &enabled); if (!error) apparmor_enabled = enabled ? 1 : 0; return 1; } __setup("apparmor=", apparmor_enabled_setup); /* set global flag turning off the ability to load policy */ static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp) { if (!capable(CAP_MAC_ADMIN)) return -EPERM; if (aa_g_lock_policy) return -EACCES; return param_set_bool(val, kp); } static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp) { if (!capable(CAP_MAC_ADMIN)) return -EPERM; return param_get_bool(buffer, kp); } static int param_set_aabool(const char *val, const struct kernel_param *kp) { if (!capable(CAP_MAC_ADMIN)) return -EPERM; return param_set_bool(val, kp); } static int param_get_aabool(char *buffer, const struct kernel_param *kp) { if (!capable(CAP_MAC_ADMIN)) return -EPERM; return param_get_bool(buffer, kp); } static int param_set_aauint(const char *val, const struct kernel_param *kp) { if (!capable(CAP_MAC_ADMIN)) return -EPERM; return param_set_uint(val, kp); } static int param_get_aauint(char *buffer, const struct kernel_param *kp) { if (!capable(CAP_MAC_ADMIN)) return -EPERM; return param_get_uint(buffer, kp); } static int param_get_audit(char *buffer, struct kernel_param *kp) { if (!capable(CAP_MAC_ADMIN)) return -EPERM; if (!apparmor_enabled) return -EINVAL; return sprintf(buffer, "%s", audit_mode_names[aa_g_audit]); } static int param_set_audit(const char *val, struct kernel_param *kp) { int i; if (!capable(CAP_MAC_ADMIN)) return -EPERM; if (!apparmor_enabled) return -EINVAL; if (!val) return -EINVAL; for (i = 0; i < AUDIT_MAX_INDEX; i++) { if (strcmp(val, audit_mode_names[i]) == 0) { aa_g_audit = i; return 0; } } return -EINVAL; } static int param_get_mode(char *buffer, struct kernel_param *kp) { if (!capable(CAP_MAC_ADMIN)) return -EPERM; if (!apparmor_enabled) return -EINVAL; return sprintf(buffer, "%s", profile_mode_names[aa_g_profile_mode]); } static int param_set_mode(const char *val, struct kernel_param *kp) { int i; if (!capable(CAP_MAC_ADMIN)) return -EPERM; if (!apparmor_enabled) return -EINVAL; if (!val) return -EINVAL; for (i = 0; i < APPARMOR_NAMES_MAX_INDEX; i++) { if (strcmp(val, profile_mode_names[i]) == 0) { aa_g_profile_mode = i; return 0; } } return -EINVAL; } /* * AppArmor init functions */ /** * set_init_cxt - set a task context and profile on the first task. * * TODO: allow setting an alternate profile than unconfined */ static int __init set_init_cxt(void) { struct cred *cred = (struct cred *)current->real_cred; struct aa_task_cxt *cxt; cxt = aa_alloc_task_context(GFP_KERNEL); if (!cxt) return -ENOMEM; cxt->profile = aa_get_profile(root_ns->unconfined); cred->security = cxt; return 0; } static int __init apparmor_init(void) { int error; if (!apparmor_enabled || !security_module_enable(&apparmor_ops)) { aa_info_message("AppArmor disabled by boot time parameter"); apparmor_enabled = 0; return 0; } error = aa_alloc_root_ns(); if (error) { AA_ERROR("Unable to allocate default profile namespace\n"); goto alloc_out; } error = set_init_cxt(); if (error) { AA_ERROR("Failed to set context on init task\n"); goto register_security_out; } error = register_security(&apparmor_ops); if (error) { AA_ERROR("Unable to register AppArmor\n"); goto set_init_cxt_out; } /* Report that AppArmor successfully initialized */ apparmor_initialized = 1; if (aa_g_profile_mode == APPARMOR_COMPLAIN) aa_info_message("AppArmor initialized: complain mode enabled"); else if (aa_g_profile_mode == APPARMOR_KILL) aa_info_message("AppArmor initialized: kill mode enabled"); else aa_info_message("AppArmor initialized"); return error; set_init_cxt_out: aa_free_task_context(current->real_cred->security); register_security_out: aa_free_root_ns(); alloc_out: aa_destroy_aafs(); apparmor_enabled = 0; return error; } security_initcall(apparmor_init);
./CrossVul/dataset_final_sorted/CWE-20/c/bad_3515_0
crossvul-cpp_data_good_2891_1
/* Key type used to cache DNS lookups made by the kernel * * See Documentation/networking/dns_resolver.txt * * Copyright (c) 2007 Igor Mammedov * Author(s): Igor Mammedov (niallain@gmail.com) * Steve French (sfrench@us.ibm.com) * Wang Lei (wang840925@gmail.com) * David Howells (dhowells@redhat.com) * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/keyctl.h> #include <linux/err.h> #include <linux/seq_file.h> #include <keys/dns_resolver-type.h> #include <keys/user-type.h> #include "internal.h" MODULE_DESCRIPTION("DNS Resolver"); MODULE_AUTHOR("Wang Lei"); MODULE_LICENSE("GPL"); unsigned int dns_resolver_debug; module_param_named(debug, dns_resolver_debug, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(debug, "DNS Resolver debugging mask"); const struct cred *dns_resolver_cache; #define DNS_ERRORNO_OPTION "dnserror" /* * Preparse instantiation data for a dns_resolver key. * * The data must be a NUL-terminated string, with the NUL char accounted in * datalen. * * If the data contains a '#' characters, then we take the clause after each * one to be an option of the form 'key=value'. The actual data of interest is * the string leading up to the first '#'. For instance: * * "ip1,ip2,...#foo=bar" */ static int dns_resolver_preparse(struct key_preparsed_payload *prep) { struct user_key_payload *upayload; unsigned long derrno; int ret; int datalen = prep->datalen, result_len = 0; const char *data = prep->data, *end, *opt; kenter("'%*.*s',%u", datalen, datalen, data, datalen); if (datalen <= 1 || !data || data[datalen - 1] != '\0') return -EINVAL; datalen--; /* deal with any options embedded in the data */ end = data + datalen; opt = memchr(data, '#', datalen); if (!opt) { /* no options: the entire data is the result */ kdebug("no options"); result_len = datalen; } else { const char *next_opt; result_len = opt - data; opt++; kdebug("options: '%s'", opt); do { const char *eq; int opt_len, opt_nlen, opt_vlen, tmp; next_opt = memchr(opt, '#', end - opt) ?: end; opt_len = next_opt - opt; if (!opt_len) { printk(KERN_WARNING "Empty option to dns_resolver key\n"); return -EINVAL; } eq = memchr(opt, '=', opt_len) ?: end; opt_nlen = eq - opt; eq++; opt_vlen = next_opt - eq; /* will be -1 if no value */ tmp = opt_vlen >= 0 ? opt_vlen : 0; kdebug("option '%*.*s' val '%*.*s'", opt_nlen, opt_nlen, opt, tmp, tmp, eq); /* see if it's an error number representing a DNS error * that's to be recorded as the result in this key */ if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 && memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) { kdebug("dns error number option"); if (opt_vlen <= 0) goto bad_option_value; ret = kstrtoul(eq, 10, &derrno); if (ret < 0) goto bad_option_value; if (derrno < 1 || derrno > 511) goto bad_option_value; kdebug("dns error no. = %lu", derrno); prep->payload.data[dns_key_error] = ERR_PTR(-derrno); continue; } bad_option_value: printk(KERN_WARNING "Option '%*.*s' to dns_resolver key:" " bad/missing value\n", opt_nlen, opt_nlen, opt); return -EINVAL; } while (opt = next_opt + 1, opt < end); } /* don't cache the result if we're caching an error saying there's no * result */ if (prep->payload.data[dns_key_error]) { kleave(" = 0 [h_error %ld]", PTR_ERR(prep->payload.data[dns_key_error])); return 0; } kdebug("store result"); prep->quotalen = result_len; upayload = kmalloc(sizeof(*upayload) + result_len + 1, GFP_KERNEL); if (!upayload) { kleave(" = -ENOMEM"); return -ENOMEM; } upayload->datalen = result_len; memcpy(upayload->data, data, result_len); upayload->data[result_len] = '\0'; prep->payload.data[dns_key_data] = upayload; kleave(" = 0"); return 0; } /* * Clean up the preparse data */ static void dns_resolver_free_preparse(struct key_preparsed_payload *prep) { pr_devel("==>%s()\n", __func__); kfree(prep->payload.data[dns_key_data]); } /* * The description is of the form "[<type>:]<domain_name>" * * The domain name may be a simple name or an absolute domain name (which * should end with a period). The domain name is case-independent. */ static bool dns_resolver_cmp(const struct key *key, const struct key_match_data *match_data) { int slen, dlen, ret = 0; const char *src = key->description, *dsp = match_data->raw_data; kenter("%s,%s", src, dsp); if (!src || !dsp) goto no_match; if (strcasecmp(src, dsp) == 0) goto matched; slen = strlen(src); dlen = strlen(dsp); if (slen <= 0 || dlen <= 0) goto no_match; if (src[slen - 1] == '.') slen--; if (dsp[dlen - 1] == '.') dlen--; if (slen != dlen || strncasecmp(src, dsp, slen) != 0) goto no_match; matched: ret = 1; no_match: kleave(" = %d", ret); return ret; } /* * Preparse the match criterion. */ static int dns_resolver_match_preparse(struct key_match_data *match_data) { match_data->lookup_type = KEYRING_SEARCH_LOOKUP_ITERATE; match_data->cmp = dns_resolver_cmp; return 0; } /* * Describe a DNS key */ static void dns_resolver_describe(const struct key *key, struct seq_file *m) { seq_puts(m, key->description); if (key_is_positive(key)) { int err = PTR_ERR(key->payload.data[dns_key_error]); if (err) seq_printf(m, ": %d", err); else seq_printf(m, ": %u", key->datalen); } } /* * read the DNS data * - the key's semaphore is read-locked */ static long dns_resolver_read(const struct key *key, char __user *buffer, size_t buflen) { int err = PTR_ERR(key->payload.data[dns_key_error]); if (err) return err; return user_read(key, buffer, buflen); } struct key_type key_type_dns_resolver = { .name = "dns_resolver", .preparse = dns_resolver_preparse, .free_preparse = dns_resolver_free_preparse, .instantiate = generic_key_instantiate, .match_preparse = dns_resolver_match_preparse, .revoke = user_revoke, .destroy = user_destroy, .describe = dns_resolver_describe, .read = dns_resolver_read, }; static int __init init_dns_resolver(void) { struct cred *cred; struct key *keyring; int ret; /* create an override credential set with a special thread keyring in * which DNS requests are cached * * this is used to prevent malicious redirections from being installed * with add_key(). */ cred = prepare_kernel_cred(NULL); if (!cred) return -ENOMEM; keyring = keyring_alloc(".dns_resolver", GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred, (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW | KEY_USR_READ, KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL); if (IS_ERR(keyring)) { ret = PTR_ERR(keyring); goto failed_put_cred; } ret = register_key_type(&key_type_dns_resolver); if (ret < 0) goto failed_put_key; /* instruct request_key() to use this special keyring as a cache for * the results it looks up */ set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags); cred->thread_keyring = keyring; cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING; dns_resolver_cache = cred; kdebug("DNS resolver keyring: %d\n", key_serial(keyring)); return 0; failed_put_key: key_put(keyring); failed_put_cred: put_cred(cred); return ret; } static void __exit exit_dns_resolver(void) { key_revoke(dns_resolver_cache->thread_keyring); unregister_key_type(&key_type_dns_resolver); put_cred(dns_resolver_cache); } module_init(init_dns_resolver) module_exit(exit_dns_resolver) MODULE_LICENSE("GPL");
./CrossVul/dataset_final_sorted/CWE-20/c/good_2891_1
crossvul-cpp_data_good_5845_17
/* * IUCV protocol stack for Linux on zSeries * * Copyright IBM Corp. 2006, 2009 * * Author(s): Jennifer Hunt <jenhunt@us.ibm.com> * Hendrik Brueckner <brueckner@linux.vnet.ibm.com> * PM functions: * Ursula Braun <ursula.braun@de.ibm.com> */ #define KMSG_COMPONENT "af_iucv" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/types.h> #include <linux/list.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/poll.h> #include <net/sock.h> #include <asm/ebcdic.h> #include <asm/cpcmd.h> #include <linux/kmod.h> #include <net/iucv/af_iucv.h> #define VERSION "1.2" static char iucv_userid[80]; static const struct proto_ops iucv_sock_ops; static struct proto iucv_proto = { .name = "AF_IUCV", .owner = THIS_MODULE, .obj_size = sizeof(struct iucv_sock), }; static struct iucv_interface *pr_iucv; /* special AF_IUCV IPRM messages */ static const u8 iprm_shutdown[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}; #define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class)) #define __iucv_sock_wait(sk, condition, timeo, ret) \ do { \ DEFINE_WAIT(__wait); \ long __timeo = timeo; \ ret = 0; \ prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \ while (!(condition)) { \ if (!__timeo) { \ ret = -EAGAIN; \ break; \ } \ if (signal_pending(current)) { \ ret = sock_intr_errno(__timeo); \ break; \ } \ release_sock(sk); \ __timeo = schedule_timeout(__timeo); \ lock_sock(sk); \ ret = sock_error(sk); \ if (ret) \ break; \ } \ finish_wait(sk_sleep(sk), &__wait); \ } while (0) #define iucv_sock_wait(sk, condition, timeo) \ ({ \ int __ret = 0; \ if (!(condition)) \ __iucv_sock_wait(sk, condition, timeo, __ret); \ __ret; \ }) static void iucv_sock_kill(struct sock *sk); static void iucv_sock_close(struct sock *sk); static void iucv_sever_path(struct sock *, int); static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev); static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, struct sk_buff *skb, u8 flags); static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify); /* Call Back functions */ static void iucv_callback_rx(struct iucv_path *, struct iucv_message *); static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *); static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]); static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]); static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]); static void iucv_callback_shutdown(struct iucv_path *, u8 ipuser[16]); static struct iucv_sock_list iucv_sk_list = { .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock), .autobind_name = ATOMIC_INIT(0) }; static struct iucv_handler af_iucv_handler = { .path_pending = iucv_callback_connreq, .path_complete = iucv_callback_connack, .path_severed = iucv_callback_connrej, .message_pending = iucv_callback_rx, .message_complete = iucv_callback_txdone, .path_quiesced = iucv_callback_shutdown, }; static inline void high_nmcpy(unsigned char *dst, char *src) { memcpy(dst, src, 8); } static inline void low_nmcpy(unsigned char *dst, char *src) { memcpy(&dst[8], src, 8); } static int afiucv_pm_prepare(struct device *dev) { #ifdef CONFIG_PM_DEBUG printk(KERN_WARNING "afiucv_pm_prepare\n"); #endif return 0; } static void afiucv_pm_complete(struct device *dev) { #ifdef CONFIG_PM_DEBUG printk(KERN_WARNING "afiucv_pm_complete\n"); #endif } /** * afiucv_pm_freeze() - Freeze PM callback * @dev: AFIUCV dummy device * * Sever all established IUCV communication pathes */ static int afiucv_pm_freeze(struct device *dev) { struct iucv_sock *iucv; struct sock *sk; int err = 0; #ifdef CONFIG_PM_DEBUG printk(KERN_WARNING "afiucv_pm_freeze\n"); #endif read_lock(&iucv_sk_list.lock); sk_for_each(sk, &iucv_sk_list.head) { iucv = iucv_sk(sk); switch (sk->sk_state) { case IUCV_DISCONN: case IUCV_CLOSING: case IUCV_CONNECTED: iucv_sever_path(sk, 0); break; case IUCV_OPEN: case IUCV_BOUND: case IUCV_LISTEN: case IUCV_CLOSED: default: break; } skb_queue_purge(&iucv->send_skb_q); skb_queue_purge(&iucv->backlog_skb_q); } read_unlock(&iucv_sk_list.lock); return err; } /** * afiucv_pm_restore_thaw() - Thaw and restore PM callback * @dev: AFIUCV dummy device * * socket clean up after freeze */ static int afiucv_pm_restore_thaw(struct device *dev) { struct sock *sk; #ifdef CONFIG_PM_DEBUG printk(KERN_WARNING "afiucv_pm_restore_thaw\n"); #endif read_lock(&iucv_sk_list.lock); sk_for_each(sk, &iucv_sk_list.head) { switch (sk->sk_state) { case IUCV_CONNECTED: sk->sk_err = EPIPE; sk->sk_state = IUCV_DISCONN; sk->sk_state_change(sk); break; case IUCV_DISCONN: case IUCV_CLOSING: case IUCV_LISTEN: case IUCV_BOUND: case IUCV_OPEN: default: break; } } read_unlock(&iucv_sk_list.lock); return 0; } static const struct dev_pm_ops afiucv_pm_ops = { .prepare = afiucv_pm_prepare, .complete = afiucv_pm_complete, .freeze = afiucv_pm_freeze, .thaw = afiucv_pm_restore_thaw, .restore = afiucv_pm_restore_thaw, }; static struct device_driver af_iucv_driver = { .owner = THIS_MODULE, .name = "afiucv", .bus = NULL, .pm = &afiucv_pm_ops, }; /* dummy device used as trigger for PM functions */ static struct device *af_iucv_dev; /** * iucv_msg_length() - Returns the length of an iucv message. * @msg: Pointer to struct iucv_message, MUST NOT be NULL * * The function returns the length of the specified iucv message @msg of data * stored in a buffer and of data stored in the parameter list (PRMDATA). * * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket * data: * PRMDATA[0..6] socket data (max 7 bytes); * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7]) * * The socket data length is computed by subtracting the socket data length * value from 0xFF. * If the socket data len is greater 7, then PRMDATA can be used for special * notifications (see iucv_sock_shutdown); and further, * if the socket data len is > 7, the function returns 8. * * Use this function to allocate socket buffers to store iucv message data. */ static inline size_t iucv_msg_length(struct iucv_message *msg) { size_t datalen; if (msg->flags & IUCV_IPRMDATA) { datalen = 0xff - msg->rmmsg[7]; return (datalen < 8) ? datalen : 8; } return msg->length; } /** * iucv_sock_in_state() - check for specific states * @sk: sock structure * @state: first iucv sk state * @state: second iucv sk state * * Returns true if the socket in either in the first or second state. */ static int iucv_sock_in_state(struct sock *sk, int state, int state2) { return (sk->sk_state == state || sk->sk_state == state2); } /** * iucv_below_msglim() - function to check if messages can be sent * @sk: sock structure * * Returns true if the send queue length is lower than the message limit. * Always returns true if the socket is not connected (no iucv path for * checking the message limit). */ static inline int iucv_below_msglim(struct sock *sk) { struct iucv_sock *iucv = iucv_sk(sk); if (sk->sk_state != IUCV_CONNECTED) return 1; if (iucv->transport == AF_IUCV_TRANS_IUCV) return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim); else return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) && (atomic_read(&iucv->pendings) <= 0)); } /** * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit */ static void iucv_sock_wake_msglim(struct sock *sk) { struct socket_wq *wq; rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (wq_has_sleeper(wq)) wake_up_interruptible_all(&wq->wait); sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); rcu_read_unlock(); } /** * afiucv_hs_send() - send a message through HiperSockets transport */ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, struct sk_buff *skb, u8 flags) { struct iucv_sock *iucv = iucv_sk(sock); struct af_iucv_trans_hdr *phs_hdr; struct sk_buff *nskb; int err, confirm_recv = 0; memset(skb->head, 0, ETH_HLEN); phs_hdr = (struct af_iucv_trans_hdr *)skb_push(skb, sizeof(struct af_iucv_trans_hdr)); skb_reset_mac_header(skb); skb_reset_network_header(skb); skb_push(skb, ETH_HLEN); skb_reset_mac_header(skb); memset(phs_hdr, 0, sizeof(struct af_iucv_trans_hdr)); phs_hdr->magic = ETH_P_AF_IUCV; phs_hdr->version = 1; phs_hdr->flags = flags; if (flags == AF_IUCV_FLAG_SYN) phs_hdr->window = iucv->msglimit; else if ((flags == AF_IUCV_FLAG_WIN) || !flags) { confirm_recv = atomic_read(&iucv->msg_recv); phs_hdr->window = confirm_recv; if (confirm_recv) phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN; } memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8); memcpy(phs_hdr->destAppName, iucv->dst_name, 8); memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8); memcpy(phs_hdr->srcAppName, iucv->src_name, 8); ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID)); ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName)); ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID)); ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName)); if (imsg) memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message)); skb->dev = iucv->hs_dev; if (!skb->dev) return -ENODEV; if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) return -ENETDOWN; if (skb->len > skb->dev->mtu) { if (sock->sk_type == SOCK_SEQPACKET) return -EMSGSIZE; else skb_trim(skb, skb->dev->mtu); } skb->protocol = ETH_P_AF_IUCV; nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) return -ENOMEM; skb_queue_tail(&iucv->send_skb_q, nskb); err = dev_queue_xmit(skb); if (net_xmit_eval(err)) { skb_unlink(nskb, &iucv->send_skb_q); kfree_skb(nskb); } else { atomic_sub(confirm_recv, &iucv->msg_recv); WARN_ON(atomic_read(&iucv->msg_recv) < 0); } return net_xmit_eval(err); } static struct sock *__iucv_get_sock_by_name(char *nm) { struct sock *sk; sk_for_each(sk, &iucv_sk_list.head) if (!memcmp(&iucv_sk(sk)->src_name, nm, 8)) return sk; return NULL; } static void iucv_sock_destruct(struct sock *sk) { skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_error_queue); sk_mem_reclaim(sk); if (!sock_flag(sk, SOCK_DEAD)) { pr_err("Attempt to release alive iucv socket %p\n", sk); return; } WARN_ON(atomic_read(&sk->sk_rmem_alloc)); WARN_ON(atomic_read(&sk->sk_wmem_alloc)); WARN_ON(sk->sk_wmem_queued); WARN_ON(sk->sk_forward_alloc); } /* Cleanup Listen */ static void iucv_sock_cleanup_listen(struct sock *parent) { struct sock *sk; /* Close non-accepted connections */ while ((sk = iucv_accept_dequeue(parent, NULL))) { iucv_sock_close(sk); iucv_sock_kill(sk); } parent->sk_state = IUCV_CLOSED; } /* Kill socket (only if zapped and orphaned) */ static void iucv_sock_kill(struct sock *sk) { if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) return; iucv_sock_unlink(&iucv_sk_list, sk); sock_set_flag(sk, SOCK_DEAD); sock_put(sk); } /* Terminate an IUCV path */ static void iucv_sever_path(struct sock *sk, int with_user_data) { unsigned char user_data[16]; struct iucv_sock *iucv = iucv_sk(sk); struct iucv_path *path = iucv->path; if (iucv->path) { iucv->path = NULL; if (with_user_data) { low_nmcpy(user_data, iucv->src_name); high_nmcpy(user_data, iucv->dst_name); ASCEBC(user_data, sizeof(user_data)); pr_iucv->path_sever(path, user_data); } else pr_iucv->path_sever(path, NULL); iucv_path_free(path); } } /* Send FIN through an IUCV socket for HIPER transport */ static int iucv_send_ctrl(struct sock *sk, u8 flags) { int err = 0; int blen; struct sk_buff *skb; blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN; skb = sock_alloc_send_skb(sk, blen, 1, &err); if (skb) { skb_reserve(skb, blen); err = afiucv_hs_send(NULL, sk, skb, flags); } return err; } /* Close an IUCV socket */ static void iucv_sock_close(struct sock *sk) { struct iucv_sock *iucv = iucv_sk(sk); unsigned long timeo; int err = 0; lock_sock(sk); switch (sk->sk_state) { case IUCV_LISTEN: iucv_sock_cleanup_listen(sk); break; case IUCV_CONNECTED: if (iucv->transport == AF_IUCV_TRANS_HIPER) { err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN); sk->sk_state = IUCV_DISCONN; sk->sk_state_change(sk); } case IUCV_DISCONN: /* fall through */ sk->sk_state = IUCV_CLOSING; sk->sk_state_change(sk); if (!err && !skb_queue_empty(&iucv->send_skb_q)) { if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) timeo = sk->sk_lingertime; else timeo = IUCV_DISCONN_TIMEOUT; iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CLOSED, 0), timeo); } case IUCV_CLOSING: /* fall through */ sk->sk_state = IUCV_CLOSED; sk->sk_state_change(sk); sk->sk_err = ECONNRESET; sk->sk_state_change(sk); skb_queue_purge(&iucv->send_skb_q); skb_queue_purge(&iucv->backlog_skb_q); default: /* fall through */ iucv_sever_path(sk, 1); } if (iucv->hs_dev) { dev_put(iucv->hs_dev); iucv->hs_dev = NULL; sk->sk_bound_dev_if = 0; } /* mark socket for deletion by iucv_sock_kill() */ sock_set_flag(sk, SOCK_ZAPPED); release_sock(sk); } static void iucv_sock_init(struct sock *sk, struct sock *parent) { if (parent) sk->sk_type = parent->sk_type; } static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio) { struct sock *sk; struct iucv_sock *iucv; sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto); if (!sk) return NULL; iucv = iucv_sk(sk); sock_init_data(sock, sk); INIT_LIST_HEAD(&iucv->accept_q); spin_lock_init(&iucv->accept_q_lock); skb_queue_head_init(&iucv->send_skb_q); INIT_LIST_HEAD(&iucv->message_q.list); spin_lock_init(&iucv->message_q.lock); skb_queue_head_init(&iucv->backlog_skb_q); iucv->send_tag = 0; atomic_set(&iucv->pendings, 0); iucv->flags = 0; iucv->msglimit = 0; atomic_set(&iucv->msg_sent, 0); atomic_set(&iucv->msg_recv, 0); iucv->path = NULL; iucv->sk_txnotify = afiucv_hs_callback_txnotify; memset(&iucv->src_user_id , 0, 32); if (pr_iucv) iucv->transport = AF_IUCV_TRANS_IUCV; else iucv->transport = AF_IUCV_TRANS_HIPER; sk->sk_destruct = iucv_sock_destruct; sk->sk_sndtimeo = IUCV_CONN_TIMEOUT; sk->sk_allocation = GFP_DMA; sock_reset_flag(sk, SOCK_ZAPPED); sk->sk_protocol = proto; sk->sk_state = IUCV_OPEN; iucv_sock_link(&iucv_sk_list, sk); return sk; } /* Create an IUCV socket */ static int iucv_sock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; if (protocol && protocol != PF_IUCV) return -EPROTONOSUPPORT; sock->state = SS_UNCONNECTED; switch (sock->type) { case SOCK_STREAM: sock->ops = &iucv_sock_ops; break; case SOCK_SEQPACKET: /* currently, proto ops can handle both sk types */ sock->ops = &iucv_sock_ops; break; default: return -ESOCKTNOSUPPORT; } sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL); if (!sk) return -ENOMEM; iucv_sock_init(sk, NULL); return 0; } void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk) { write_lock_bh(&l->lock); sk_add_node(sk, &l->head); write_unlock_bh(&l->lock); } void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk) { write_lock_bh(&l->lock); sk_del_node_init(sk); write_unlock_bh(&l->lock); } void iucv_accept_enqueue(struct sock *parent, struct sock *sk) { unsigned long flags; struct iucv_sock *par = iucv_sk(parent); sock_hold(sk); spin_lock_irqsave(&par->accept_q_lock, flags); list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q); spin_unlock_irqrestore(&par->accept_q_lock, flags); iucv_sk(sk)->parent = parent; sk_acceptq_added(parent); } void iucv_accept_unlink(struct sock *sk) { unsigned long flags; struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent); spin_lock_irqsave(&par->accept_q_lock, flags); list_del_init(&iucv_sk(sk)->accept_q); spin_unlock_irqrestore(&par->accept_q_lock, flags); sk_acceptq_removed(iucv_sk(sk)->parent); iucv_sk(sk)->parent = NULL; sock_put(sk); } struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock) { struct iucv_sock *isk, *n; struct sock *sk; list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) { sk = (struct sock *) isk; lock_sock(sk); if (sk->sk_state == IUCV_CLOSED) { iucv_accept_unlink(sk); release_sock(sk); continue; } if (sk->sk_state == IUCV_CONNECTED || sk->sk_state == IUCV_DISCONN || !newsock) { iucv_accept_unlink(sk); if (newsock) sock_graft(sk, newsock); release_sock(sk); return sk; } release_sock(sk); } return NULL; } /* Bind an unbound socket */ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; struct sock *sk = sock->sk; struct iucv_sock *iucv; int err = 0; struct net_device *dev; char uid[9]; /* Verify the input sockaddr */ if (!addr || addr->sa_family != AF_IUCV) return -EINVAL; lock_sock(sk); if (sk->sk_state != IUCV_OPEN) { err = -EBADFD; goto done; } write_lock_bh(&iucv_sk_list.lock); iucv = iucv_sk(sk); if (__iucv_get_sock_by_name(sa->siucv_name)) { err = -EADDRINUSE; goto done_unlock; } if (iucv->path) goto done_unlock; /* Bind the socket */ if (pr_iucv) if (!memcmp(sa->siucv_user_id, iucv_userid, 8)) goto vm_bind; /* VM IUCV transport */ /* try hiper transport */ memcpy(uid, sa->siucv_user_id, sizeof(uid)); ASCEBC(uid, 8); rcu_read_lock(); for_each_netdev_rcu(&init_net, dev) { if (!memcmp(dev->perm_addr, uid, 8)) { memcpy(iucv->src_name, sa->siucv_name, 8); memcpy(iucv->src_user_id, sa->siucv_user_id, 8); sk->sk_bound_dev_if = dev->ifindex; iucv->hs_dev = dev; dev_hold(dev); sk->sk_state = IUCV_BOUND; iucv->transport = AF_IUCV_TRANS_HIPER; if (!iucv->msglimit) iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT; rcu_read_unlock(); goto done_unlock; } } rcu_read_unlock(); vm_bind: if (pr_iucv) { /* use local userid for backward compat */ memcpy(iucv->src_name, sa->siucv_name, 8); memcpy(iucv->src_user_id, iucv_userid, 8); sk->sk_state = IUCV_BOUND; iucv->transport = AF_IUCV_TRANS_IUCV; if (!iucv->msglimit) iucv->msglimit = IUCV_QUEUELEN_DEFAULT; goto done_unlock; } /* found no dev to bind */ err = -ENODEV; done_unlock: /* Release the socket list lock */ write_unlock_bh(&iucv_sk_list.lock); done: release_sock(sk); return err; } /* Automatically bind an unbound socket */ static int iucv_sock_autobind(struct sock *sk) { struct iucv_sock *iucv = iucv_sk(sk); char name[12]; int err = 0; if (unlikely(!pr_iucv)) return -EPROTO; memcpy(iucv->src_user_id, iucv_userid, 8); write_lock_bh(&iucv_sk_list.lock); sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name)); while (__iucv_get_sock_by_name(name)) { sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name)); } write_unlock_bh(&iucv_sk_list.lock); memcpy(&iucv->src_name, name, 8); if (!iucv->msglimit) iucv->msglimit = IUCV_QUEUELEN_DEFAULT; return err; } static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr) { struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; struct sock *sk = sock->sk; struct iucv_sock *iucv = iucv_sk(sk); unsigned char user_data[16]; int err; high_nmcpy(user_data, sa->siucv_name); low_nmcpy(user_data, iucv->src_name); ASCEBC(user_data, sizeof(user_data)); /* Create path. */ iucv->path = iucv_path_alloc(iucv->msglimit, IUCV_IPRMDATA, GFP_KERNEL); if (!iucv->path) { err = -ENOMEM; goto done; } err = pr_iucv->path_connect(iucv->path, &af_iucv_handler, sa->siucv_user_id, NULL, user_data, sk); if (err) { iucv_path_free(iucv->path); iucv->path = NULL; switch (err) { case 0x0b: /* Target communicator is not logged on */ err = -ENETUNREACH; break; case 0x0d: /* Max connections for this guest exceeded */ case 0x0e: /* Max connections for target guest exceeded */ err = -EAGAIN; break; case 0x0f: /* Missing IUCV authorization */ err = -EACCES; break; default: err = -ECONNREFUSED; break; } } done: return err; } /* Connect an unconnected socket */ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; struct sock *sk = sock->sk; struct iucv_sock *iucv = iucv_sk(sk); int err; if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv)) return -EINVAL; if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND) return -EBADFD; if (sk->sk_state == IUCV_OPEN && iucv->transport == AF_IUCV_TRANS_HIPER) return -EBADFD; /* explicit bind required */ if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET) return -EINVAL; if (sk->sk_state == IUCV_OPEN) { err = iucv_sock_autobind(sk); if (unlikely(err)) return err; } lock_sock(sk); /* Set the destination information */ memcpy(iucv->dst_user_id, sa->siucv_user_id, 8); memcpy(iucv->dst_name, sa->siucv_name, 8); if (iucv->transport == AF_IUCV_TRANS_HIPER) err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN); else err = afiucv_path_connect(sock, addr); if (err) goto done; if (sk->sk_state != IUCV_CONNECTED) err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED, IUCV_DISCONN), sock_sndtimeo(sk, flags & O_NONBLOCK)); if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED) err = -ECONNREFUSED; if (err && iucv->transport == AF_IUCV_TRANS_IUCV) iucv_sever_path(sk, 0); done: release_sock(sk); return err; } /* Move a socket into listening state. */ static int iucv_sock_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; int err; lock_sock(sk); err = -EINVAL; if (sk->sk_state != IUCV_BOUND) goto done; if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET) goto done; sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; sk->sk_state = IUCV_LISTEN; err = 0; done: release_sock(sk); return err; } /* Accept a pending connection */ static int iucv_sock_accept(struct socket *sock, struct socket *newsock, int flags) { DECLARE_WAITQUEUE(wait, current); struct sock *sk = sock->sk, *nsk; long timeo; int err = 0; lock_sock_nested(sk, SINGLE_DEPTH_NESTING); if (sk->sk_state != IUCV_LISTEN) { err = -EBADFD; goto done; } timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); /* Wait for an incoming connection */ add_wait_queue_exclusive(sk_sleep(sk), &wait); while (!(nsk = iucv_accept_dequeue(sk, newsock))) { set_current_state(TASK_INTERRUPTIBLE); if (!timeo) { err = -EAGAIN; break; } release_sock(sk); timeo = schedule_timeout(timeo); lock_sock_nested(sk, SINGLE_DEPTH_NESTING); if (sk->sk_state != IUCV_LISTEN) { err = -EBADFD; break; } if (signal_pending(current)) { err = sock_intr_errno(timeo); break; } } set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); if (err) goto done; newsock->state = SS_CONNECTED; done: release_sock(sk); return err; } static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer) { struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr; struct sock *sk = sock->sk; struct iucv_sock *iucv = iucv_sk(sk); addr->sa_family = AF_IUCV; *len = sizeof(struct sockaddr_iucv); if (peer) { memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8); memcpy(siucv->siucv_name, iucv->dst_name, 8); } else { memcpy(siucv->siucv_user_id, iucv->src_user_id, 8); memcpy(siucv->siucv_name, iucv->src_name, 8); } memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port)); memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr)); memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid)); return 0; } /** * iucv_send_iprm() - Send socket data in parameter list of an iucv message. * @path: IUCV path * @msg: Pointer to a struct iucv_message * @skb: The socket data to send, skb->len MUST BE <= 7 * * Send the socket data in the parameter list in the iucv message * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter * list and the socket data len at index 7 (last byte). * See also iucv_msg_length(). * * Returns the error code from the iucv_message_send() call. */ static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg, struct sk_buff *skb) { u8 prmdata[8]; memcpy(prmdata, (void *) skb->data, skb->len); prmdata[7] = 0xff - (u8) skb->len; return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0, (void *) prmdata, 8); } static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct iucv_sock *iucv = iucv_sk(sk); struct sk_buff *skb; struct iucv_message txmsg; struct cmsghdr *cmsg; int cmsg_done; long timeo; char user_id[9]; char appl_id[9]; int err; int noblock = msg->msg_flags & MSG_DONTWAIT; err = sock_error(sk); if (err) return err; if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; /* SOCK_SEQPACKET: we do not support segmented records */ if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR)) return -EOPNOTSUPP; lock_sock(sk); if (sk->sk_shutdown & SEND_SHUTDOWN) { err = -EPIPE; goto out; } /* Return if the socket is not in connected state */ if (sk->sk_state != IUCV_CONNECTED) { err = -ENOTCONN; goto out; } /* initialize defaults */ cmsg_done = 0; /* check for duplicate headers */ txmsg.class = 0; /* iterate over control messages */ for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { if (!CMSG_OK(msg, cmsg)) { err = -EINVAL; goto out; } if (cmsg->cmsg_level != SOL_IUCV) continue; if (cmsg->cmsg_type & cmsg_done) { err = -EINVAL; goto out; } cmsg_done |= cmsg->cmsg_type; switch (cmsg->cmsg_type) { case SCM_IUCV_TRGCLS: if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) { err = -EINVAL; goto out; } /* set iucv message target class */ memcpy(&txmsg.class, (void *) CMSG_DATA(cmsg), TRGCLS_SIZE); break; default: err = -EINVAL; goto out; break; } } /* allocate one skb for each iucv message: * this is fine for SOCK_SEQPACKET (unless we want to support * segmented records using the MSG_EOR flag), but * for SOCK_STREAM we might want to improve it in future */ if (iucv->transport == AF_IUCV_TRANS_HIPER) skb = sock_alloc_send_skb(sk, len + sizeof(struct af_iucv_trans_hdr) + ETH_HLEN, noblock, &err); else skb = sock_alloc_send_skb(sk, len, noblock, &err); if (!skb) { err = -ENOMEM; goto out; } if (iucv->transport == AF_IUCV_TRANS_HIPER) skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN); if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { err = -EFAULT; goto fail; } /* wait if outstanding messages for iucv path has reached */ timeo = sock_sndtimeo(sk, noblock); err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo); if (err) goto fail; /* return -ECONNRESET if the socket is no longer connected */ if (sk->sk_state != IUCV_CONNECTED) { err = -ECONNRESET; goto fail; } /* increment and save iucv message tag for msg_completion cbk */ txmsg.tag = iucv->send_tag++; IUCV_SKB_CB(skb)->tag = txmsg.tag; if (iucv->transport == AF_IUCV_TRANS_HIPER) { atomic_inc(&iucv->msg_sent); err = afiucv_hs_send(&txmsg, sk, skb, 0); if (err) { atomic_dec(&iucv->msg_sent); goto fail; } goto release; } skb_queue_tail(&iucv->send_skb_q, skb); if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) && skb->len <= 7) { err = iucv_send_iprm(iucv->path, &txmsg, skb); /* on success: there is no message_complete callback * for an IPRMDATA msg; remove skb from send queue */ if (err == 0) { skb_unlink(skb, &iucv->send_skb_q); kfree_skb(skb); } /* this error should never happen since the * IUCV_IPRMDATA path flag is set... sever path */ if (err == 0x15) { pr_iucv->path_sever(iucv->path, NULL); skb_unlink(skb, &iucv->send_skb_q); err = -EPIPE; goto fail; } } else err = pr_iucv->message_send(iucv->path, &txmsg, 0, 0, (void *) skb->data, skb->len); if (err) { if (err == 3) { user_id[8] = 0; memcpy(user_id, iucv->dst_user_id, 8); appl_id[8] = 0; memcpy(appl_id, iucv->dst_name, 8); pr_err("Application %s on z/VM guest %s" " exceeds message limit\n", appl_id, user_id); err = -EAGAIN; } else err = -EPIPE; skb_unlink(skb, &iucv->send_skb_q); goto fail; } release: release_sock(sk); return len; fail: kfree_skb(skb); out: release_sock(sk); return err; } /* iucv_fragment_skb() - Fragment a single IUCV message into multiple skb's * * Locking: must be called with message_q.lock held */ static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len) { int dataleft, size, copied = 0; struct sk_buff *nskb; dataleft = len; while (dataleft) { if (dataleft >= sk->sk_rcvbuf / 4) size = sk->sk_rcvbuf / 4; else size = dataleft; nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA); if (!nskb) return -ENOMEM; /* copy target class to control buffer of new skb */ IUCV_SKB_CB(nskb)->class = IUCV_SKB_CB(skb)->class; /* copy data fragment */ memcpy(nskb->data, skb->data + copied, size); copied += size; dataleft -= size; skb_reset_transport_header(nskb); skb_reset_network_header(nskb); nskb->len = size; skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb); } return 0; } /* iucv_process_message() - Receive a single outstanding IUCV message * * Locking: must be called with message_q.lock held */ static void iucv_process_message(struct sock *sk, struct sk_buff *skb, struct iucv_path *path, struct iucv_message *msg) { int rc; unsigned int len; len = iucv_msg_length(msg); /* store msg target class in the second 4 bytes of skb ctrl buffer */ /* Note: the first 4 bytes are reserved for msg tag */ IUCV_SKB_CB(skb)->class = msg->class; /* check for special IPRM messages (e.g. iucv_sock_shutdown) */ if ((msg->flags & IUCV_IPRMDATA) && len > 7) { if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) { skb->data = NULL; skb->len = 0; } } else { rc = pr_iucv->message_receive(path, msg, msg->flags & IUCV_IPRMDATA, skb->data, len, NULL); if (rc) { kfree_skb(skb); return; } /* we need to fragment iucv messages for SOCK_STREAM only; * for SOCK_SEQPACKET, it is only relevant if we support * record segmentation using MSG_EOR (see also recvmsg()) */ if (sk->sk_type == SOCK_STREAM && skb->truesize >= sk->sk_rcvbuf / 4) { rc = iucv_fragment_skb(sk, skb, len); kfree_skb(skb); skb = NULL; if (rc) { pr_iucv->path_sever(path, NULL); return; } skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q); } else { skb_reset_transport_header(skb); skb_reset_network_header(skb); skb->len = len; } } IUCV_SKB_CB(skb)->offset = 0; if (sock_queue_rcv_skb(sk, skb)) skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb); } /* iucv_process_message_q() - Process outstanding IUCV messages * * Locking: must be called with message_q.lock held */ static void iucv_process_message_q(struct sock *sk) { struct iucv_sock *iucv = iucv_sk(sk); struct sk_buff *skb; struct sock_msg_q *p, *n; list_for_each_entry_safe(p, n, &iucv->message_q.list, list) { skb = alloc_skb(iucv_msg_length(&p->msg), GFP_ATOMIC | GFP_DMA); if (!skb) break; iucv_process_message(sk, skb, p->path, &p->msg); list_del(&p->list); kfree(p); if (!skb_queue_empty(&iucv->backlog_skb_q)) break; } } static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; struct iucv_sock *iucv = iucv_sk(sk); unsigned int copied, rlen; struct sk_buff *skb, *rskb, *cskb; int err = 0; u32 offset; if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) && list_empty(&iucv->message_q.list)) return 0; if (flags & (MSG_OOB)) return -EOPNOTSUPP; /* receive/dequeue next skb: * the function understands MSG_PEEK and, thus, does not dequeue skb */ skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) { if (sk->sk_shutdown & RCV_SHUTDOWN) return 0; return err; } offset = IUCV_SKB_CB(skb)->offset; rlen = skb->len - offset; /* real length of skb */ copied = min_t(unsigned int, rlen, len); if (!rlen) sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN; cskb = skb; if (skb_copy_datagram_iovec(cskb, offset, msg->msg_iov, copied)) { if (!(flags & MSG_PEEK)) skb_queue_head(&sk->sk_receive_queue, skb); return -EFAULT; } /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */ if (sk->sk_type == SOCK_SEQPACKET) { if (copied < rlen) msg->msg_flags |= MSG_TRUNC; /* each iucv message contains a complete record */ msg->msg_flags |= MSG_EOR; } /* create control message to store iucv msg target class: * get the trgcls from the control buffer of the skb due to * fragmentation of original iucv message. */ err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS, sizeof(IUCV_SKB_CB(skb)->class), (void *)&IUCV_SKB_CB(skb)->class); if (err) { if (!(flags & MSG_PEEK)) skb_queue_head(&sk->sk_receive_queue, skb); return err; } /* Mark read part of skb as used */ if (!(flags & MSG_PEEK)) { /* SOCK_STREAM: re-queue skb if it contains unreceived data */ if (sk->sk_type == SOCK_STREAM) { if (copied < rlen) { IUCV_SKB_CB(skb)->offset = offset + copied; goto done; } } kfree_skb(skb); if (iucv->transport == AF_IUCV_TRANS_HIPER) { atomic_inc(&iucv->msg_recv); if (atomic_read(&iucv->msg_recv) > iucv->msglimit) { WARN_ON(1); iucv_sock_close(sk); return -EFAULT; } } /* Queue backlog skbs */ spin_lock_bh(&iucv->message_q.lock); rskb = skb_dequeue(&iucv->backlog_skb_q); while (rskb) { IUCV_SKB_CB(rskb)->offset = 0; if (sock_queue_rcv_skb(sk, rskb)) { skb_queue_head(&iucv->backlog_skb_q, rskb); break; } else { rskb = skb_dequeue(&iucv->backlog_skb_q); } } if (skb_queue_empty(&iucv->backlog_skb_q)) { if (!list_empty(&iucv->message_q.list)) iucv_process_message_q(sk); if (atomic_read(&iucv->msg_recv) >= iucv->msglimit / 2) { err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN); if (err) { sk->sk_state = IUCV_DISCONN; sk->sk_state_change(sk); } } } spin_unlock_bh(&iucv->message_q.lock); } done: /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */ if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC)) copied = rlen; return copied; } static inline unsigned int iucv_accept_poll(struct sock *parent) { struct iucv_sock *isk, *n; struct sock *sk; list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) { sk = (struct sock *) isk; if (sk->sk_state == IUCV_CONNECTED) return POLLIN | POLLRDNORM; } return 0; } unsigned int iucv_sock_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; unsigned int mask = 0; sock_poll_wait(file, sk_sleep(sk), wait); if (sk->sk_state == IUCV_LISTEN) return iucv_accept_poll(sk); if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) mask |= POLLERR | (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= POLLRDHUP; if (sk->sk_shutdown == SHUTDOWN_MASK) mask |= POLLHUP; if (!skb_queue_empty(&sk->sk_receive_queue) || (sk->sk_shutdown & RCV_SHUTDOWN)) mask |= POLLIN | POLLRDNORM; if (sk->sk_state == IUCV_CLOSED) mask |= POLLHUP; if (sk->sk_state == IUCV_DISCONN) mask |= POLLIN; if (sock_writeable(sk) && iucv_below_msglim(sk)) mask |= POLLOUT | POLLWRNORM | POLLWRBAND; else set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); return mask; } static int iucv_sock_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; struct iucv_sock *iucv = iucv_sk(sk); struct iucv_message txmsg; int err = 0; how++; if ((how & ~SHUTDOWN_MASK) || !how) return -EINVAL; lock_sock(sk); switch (sk->sk_state) { case IUCV_LISTEN: case IUCV_DISCONN: case IUCV_CLOSING: case IUCV_CLOSED: err = -ENOTCONN; goto fail; default: break; } if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) { if (iucv->transport == AF_IUCV_TRANS_IUCV) { txmsg.class = 0; txmsg.tag = 0; err = pr_iucv->message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0, (void *) iprm_shutdown, 8); if (err) { switch (err) { case 1: err = -ENOTCONN; break; case 2: err = -ECONNRESET; break; default: err = -ENOTCONN; break; } } } else iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT); } sk->sk_shutdown |= how; if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) { if (iucv->transport == AF_IUCV_TRANS_IUCV) { err = pr_iucv->path_quiesce(iucv->path, NULL); if (err) err = -ENOTCONN; /* skb_queue_purge(&sk->sk_receive_queue); */ } skb_queue_purge(&sk->sk_receive_queue); } /* Wake up anyone sleeping in poll */ sk->sk_state_change(sk); fail: release_sock(sk); return err; } static int iucv_sock_release(struct socket *sock) { struct sock *sk = sock->sk; int err = 0; if (!sk) return 0; iucv_sock_close(sk); sock_orphan(sk); iucv_sock_kill(sk); return err; } /* getsockopt and setsockopt */ static int iucv_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct iucv_sock *iucv = iucv_sk(sk); int val; int rc; if (level != SOL_IUCV) return -ENOPROTOOPT; if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *) optval)) return -EFAULT; rc = 0; lock_sock(sk); switch (optname) { case SO_IPRMDATA_MSG: if (val) iucv->flags |= IUCV_IPRMDATA; else iucv->flags &= ~IUCV_IPRMDATA; break; case SO_MSGLIMIT: switch (sk->sk_state) { case IUCV_OPEN: case IUCV_BOUND: if (val < 1 || val > (u16)(~0)) rc = -EINVAL; else iucv->msglimit = val; break; default: rc = -EINVAL; break; } break; default: rc = -ENOPROTOOPT; break; } release_sock(sk); return rc; } static int iucv_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct iucv_sock *iucv = iucv_sk(sk); unsigned int val; int len; if (level != SOL_IUCV) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL; len = min_t(unsigned int, len, sizeof(int)); switch (optname) { case SO_IPRMDATA_MSG: val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0; break; case SO_MSGLIMIT: lock_sock(sk); val = (iucv->path != NULL) ? iucv->path->msglim /* connected */ : iucv->msglimit; /* default */ release_sock(sk); break; case SO_MSGSIZE: if (sk->sk_state == IUCV_OPEN) return -EBADFD; val = (iucv->hs_dev) ? iucv->hs_dev->mtu - sizeof(struct af_iucv_trans_hdr) - ETH_HLEN : 0x7fffffff; break; default: return -ENOPROTOOPT; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } /* Callback wrappers - called from iucv base support */ static int iucv_callback_connreq(struct iucv_path *path, u8 ipvmid[8], u8 ipuser[16]) { unsigned char user_data[16]; unsigned char nuser_data[16]; unsigned char src_name[8]; struct sock *sk, *nsk; struct iucv_sock *iucv, *niucv; int err; memcpy(src_name, ipuser, 8); EBCASC(src_name, 8); /* Find out if this path belongs to af_iucv. */ read_lock(&iucv_sk_list.lock); iucv = NULL; sk = NULL; sk_for_each(sk, &iucv_sk_list.head) if (sk->sk_state == IUCV_LISTEN && !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) { /* * Found a listening socket with * src_name == ipuser[0-7]. */ iucv = iucv_sk(sk); break; } read_unlock(&iucv_sk_list.lock); if (!iucv) /* No socket found, not one of our paths. */ return -EINVAL; bh_lock_sock(sk); /* Check if parent socket is listening */ low_nmcpy(user_data, iucv->src_name); high_nmcpy(user_data, iucv->dst_name); ASCEBC(user_data, sizeof(user_data)); if (sk->sk_state != IUCV_LISTEN) { err = pr_iucv->path_sever(path, user_data); iucv_path_free(path); goto fail; } /* Check for backlog size */ if (sk_acceptq_is_full(sk)) { err = pr_iucv->path_sever(path, user_data); iucv_path_free(path); goto fail; } /* Create the new socket */ nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC); if (!nsk) { err = pr_iucv->path_sever(path, user_data); iucv_path_free(path); goto fail; } niucv = iucv_sk(nsk); iucv_sock_init(nsk, sk); /* Set the new iucv_sock */ memcpy(niucv->dst_name, ipuser + 8, 8); EBCASC(niucv->dst_name, 8); memcpy(niucv->dst_user_id, ipvmid, 8); memcpy(niucv->src_name, iucv->src_name, 8); memcpy(niucv->src_user_id, iucv->src_user_id, 8); niucv->path = path; /* Call iucv_accept */ high_nmcpy(nuser_data, ipuser + 8); memcpy(nuser_data + 8, niucv->src_name, 8); ASCEBC(nuser_data + 8, 8); /* set message limit for path based on msglimit of accepting socket */ niucv->msglimit = iucv->msglimit; path->msglim = iucv->msglimit; err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk); if (err) { iucv_sever_path(nsk, 1); iucv_sock_kill(nsk); goto fail; } iucv_accept_enqueue(sk, nsk); /* Wake up accept */ nsk->sk_state = IUCV_CONNECTED; sk->sk_data_ready(sk, 1); err = 0; fail: bh_unlock_sock(sk); return 0; } static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16]) { struct sock *sk = path->private; sk->sk_state = IUCV_CONNECTED; sk->sk_state_change(sk); } static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) { struct sock *sk = path->private; struct iucv_sock *iucv = iucv_sk(sk); struct sk_buff *skb; struct sock_msg_q *save_msg; int len; if (sk->sk_shutdown & RCV_SHUTDOWN) { pr_iucv->message_reject(path, msg); return; } spin_lock(&iucv->message_q.lock); if (!list_empty(&iucv->message_q.list) || !skb_queue_empty(&iucv->backlog_skb_q)) goto save_message; len = atomic_read(&sk->sk_rmem_alloc); len += SKB_TRUESIZE(iucv_msg_length(msg)); if (len > sk->sk_rcvbuf) goto save_message; skb = alloc_skb(iucv_msg_length(msg), GFP_ATOMIC | GFP_DMA); if (!skb) goto save_message; iucv_process_message(sk, skb, path, msg); goto out_unlock; save_message: save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA); if (!save_msg) goto out_unlock; save_msg->path = path; save_msg->msg = *msg; list_add_tail(&save_msg->list, &iucv->message_q.list); out_unlock: spin_unlock(&iucv->message_q.lock); } static void iucv_callback_txdone(struct iucv_path *path, struct iucv_message *msg) { struct sock *sk = path->private; struct sk_buff *this = NULL; struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q; struct sk_buff *list_skb = list->next; unsigned long flags; bh_lock_sock(sk); if (!skb_queue_empty(list)) { spin_lock_irqsave(&list->lock, flags); while (list_skb != (struct sk_buff *)list) { if (msg->tag != IUCV_SKB_CB(list_skb)->tag) { this = list_skb; break; } list_skb = list_skb->next; } if (this) __skb_unlink(this, list); spin_unlock_irqrestore(&list->lock, flags); if (this) { kfree_skb(this); /* wake up any process waiting for sending */ iucv_sock_wake_msglim(sk); } } if (sk->sk_state == IUCV_CLOSING) { if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { sk->sk_state = IUCV_CLOSED; sk->sk_state_change(sk); } } bh_unlock_sock(sk); } static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16]) { struct sock *sk = path->private; if (sk->sk_state == IUCV_CLOSED) return; bh_lock_sock(sk); iucv_sever_path(sk, 1); sk->sk_state = IUCV_DISCONN; sk->sk_state_change(sk); bh_unlock_sock(sk); } /* called if the other communication side shuts down its RECV direction; * in turn, the callback sets SEND_SHUTDOWN to disable sending of data. */ static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16]) { struct sock *sk = path->private; bh_lock_sock(sk); if (sk->sk_state != IUCV_CLOSED) { sk->sk_shutdown |= SEND_SHUTDOWN; sk->sk_state_change(sk); } bh_unlock_sock(sk); } /***************** HiperSockets transport callbacks ********************/ static void afiucv_swap_src_dest(struct sk_buff *skb) { struct af_iucv_trans_hdr *trans_hdr = (struct af_iucv_trans_hdr *)skb->data; char tmpID[8]; char tmpName[8]; ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID)); ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName)); ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID)); ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName)); memcpy(tmpID, trans_hdr->srcUserID, 8); memcpy(tmpName, trans_hdr->srcAppName, 8); memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8); memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8); memcpy(trans_hdr->destUserID, tmpID, 8); memcpy(trans_hdr->destAppName, tmpName, 8); skb_push(skb, ETH_HLEN); memset(skb->data, 0, ETH_HLEN); } /** * afiucv_hs_callback_syn - react on received SYN **/ static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb) { struct sock *nsk; struct iucv_sock *iucv, *niucv; struct af_iucv_trans_hdr *trans_hdr; int err; iucv = iucv_sk(sk); trans_hdr = (struct af_iucv_trans_hdr *)skb->data; if (!iucv) { /* no sock - connection refused */ afiucv_swap_src_dest(skb); trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN; err = dev_queue_xmit(skb); goto out; } nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC); bh_lock_sock(sk); if ((sk->sk_state != IUCV_LISTEN) || sk_acceptq_is_full(sk) || !nsk) { /* error on server socket - connection refused */ if (nsk) sk_free(nsk); afiucv_swap_src_dest(skb); trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN; err = dev_queue_xmit(skb); bh_unlock_sock(sk); goto out; } niucv = iucv_sk(nsk); iucv_sock_init(nsk, sk); niucv->transport = AF_IUCV_TRANS_HIPER; niucv->msglimit = iucv->msglimit; if (!trans_hdr->window) niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT; else niucv->msglimit_peer = trans_hdr->window; memcpy(niucv->dst_name, trans_hdr->srcAppName, 8); memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8); memcpy(niucv->src_name, iucv->src_name, 8); memcpy(niucv->src_user_id, iucv->src_user_id, 8); nsk->sk_bound_dev_if = sk->sk_bound_dev_if; niucv->hs_dev = iucv->hs_dev; dev_hold(niucv->hs_dev); afiucv_swap_src_dest(skb); trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK; trans_hdr->window = niucv->msglimit; /* if receiver acks the xmit connection is established */ err = dev_queue_xmit(skb); if (!err) { iucv_accept_enqueue(sk, nsk); nsk->sk_state = IUCV_CONNECTED; sk->sk_data_ready(sk, 1); } else iucv_sock_kill(nsk); bh_unlock_sock(sk); out: return NET_RX_SUCCESS; } /** * afiucv_hs_callback_synack() - react on received SYN-ACK **/ static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb) { struct iucv_sock *iucv = iucv_sk(sk); struct af_iucv_trans_hdr *trans_hdr = (struct af_iucv_trans_hdr *)skb->data; if (!iucv) goto out; if (sk->sk_state != IUCV_BOUND) goto out; bh_lock_sock(sk); iucv->msglimit_peer = trans_hdr->window; sk->sk_state = IUCV_CONNECTED; sk->sk_state_change(sk); bh_unlock_sock(sk); out: kfree_skb(skb); return NET_RX_SUCCESS; } /** * afiucv_hs_callback_synfin() - react on received SYN_FIN **/ static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb) { struct iucv_sock *iucv = iucv_sk(sk); if (!iucv) goto out; if (sk->sk_state != IUCV_BOUND) goto out; bh_lock_sock(sk); sk->sk_state = IUCV_DISCONN; sk->sk_state_change(sk); bh_unlock_sock(sk); out: kfree_skb(skb); return NET_RX_SUCCESS; } /** * afiucv_hs_callback_fin() - react on received FIN **/ static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb) { struct iucv_sock *iucv = iucv_sk(sk); /* other end of connection closed */ if (!iucv) goto out; bh_lock_sock(sk); if (sk->sk_state == IUCV_CONNECTED) { sk->sk_state = IUCV_DISCONN; sk->sk_state_change(sk); } bh_unlock_sock(sk); out: kfree_skb(skb); return NET_RX_SUCCESS; } /** * afiucv_hs_callback_win() - react on received WIN **/ static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb) { struct iucv_sock *iucv = iucv_sk(sk); struct af_iucv_trans_hdr *trans_hdr = (struct af_iucv_trans_hdr *)skb->data; if (!iucv) return NET_RX_SUCCESS; if (sk->sk_state != IUCV_CONNECTED) return NET_RX_SUCCESS; atomic_sub(trans_hdr->window, &iucv->msg_sent); iucv_sock_wake_msglim(sk); return NET_RX_SUCCESS; } /** * afiucv_hs_callback_rx() - react on received data **/ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb) { struct iucv_sock *iucv = iucv_sk(sk); if (!iucv) { kfree_skb(skb); return NET_RX_SUCCESS; } if (sk->sk_state != IUCV_CONNECTED) { kfree_skb(skb); return NET_RX_SUCCESS; } if (sk->sk_shutdown & RCV_SHUTDOWN) { kfree_skb(skb); return NET_RX_SUCCESS; } /* write stuff from iucv_msg to skb cb */ if (skb->len < sizeof(struct af_iucv_trans_hdr)) { kfree_skb(skb); return NET_RX_SUCCESS; } skb_pull(skb, sizeof(struct af_iucv_trans_hdr)); skb_reset_transport_header(skb); skb_reset_network_header(skb); IUCV_SKB_CB(skb)->offset = 0; spin_lock(&iucv->message_q.lock); if (skb_queue_empty(&iucv->backlog_skb_q)) { if (sock_queue_rcv_skb(sk, skb)) { /* handle rcv queue full */ skb_queue_tail(&iucv->backlog_skb_q, skb); } } else skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); spin_unlock(&iucv->message_q.lock); return NET_RX_SUCCESS; } /** * afiucv_hs_rcv() - base function for arriving data through HiperSockets * transport * called from netif RX softirq **/ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct sock *sk; struct iucv_sock *iucv; struct af_iucv_trans_hdr *trans_hdr; char nullstring[8]; int err = 0; skb_pull(skb, ETH_HLEN); trans_hdr = (struct af_iucv_trans_hdr *)skb->data; EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName)); EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID)); EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName)); EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID)); memset(nullstring, 0, sizeof(nullstring)); iucv = NULL; sk = NULL; read_lock(&iucv_sk_list.lock); sk_for_each(sk, &iucv_sk_list.head) { if (trans_hdr->flags == AF_IUCV_FLAG_SYN) { if ((!memcmp(&iucv_sk(sk)->src_name, trans_hdr->destAppName, 8)) && (!memcmp(&iucv_sk(sk)->src_user_id, trans_hdr->destUserID, 8)) && (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) && (!memcmp(&iucv_sk(sk)->dst_user_id, nullstring, 8))) { iucv = iucv_sk(sk); break; } } else { if ((!memcmp(&iucv_sk(sk)->src_name, trans_hdr->destAppName, 8)) && (!memcmp(&iucv_sk(sk)->src_user_id, trans_hdr->destUserID, 8)) && (!memcmp(&iucv_sk(sk)->dst_name, trans_hdr->srcAppName, 8)) && (!memcmp(&iucv_sk(sk)->dst_user_id, trans_hdr->srcUserID, 8))) { iucv = iucv_sk(sk); break; } } } read_unlock(&iucv_sk_list.lock); if (!iucv) sk = NULL; /* no sock how should we send with no sock 1) send without sock no send rc checking? 2) introduce default sock to handle this cases SYN -> send SYN|ACK in good case, send SYN|FIN in bad case data -> send FIN SYN|ACK, SYN|FIN, FIN -> no action? */ switch (trans_hdr->flags) { case AF_IUCV_FLAG_SYN: /* connect request */ err = afiucv_hs_callback_syn(sk, skb); break; case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK): /* connect request confirmed */ err = afiucv_hs_callback_synack(sk, skb); break; case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN): /* connect request refused */ err = afiucv_hs_callback_synfin(sk, skb); break; case (AF_IUCV_FLAG_FIN): /* close request */ err = afiucv_hs_callback_fin(sk, skb); break; case (AF_IUCV_FLAG_WIN): err = afiucv_hs_callback_win(sk, skb); if (skb->len == sizeof(struct af_iucv_trans_hdr)) { kfree_skb(skb); break; } /* fall through and receive non-zero length data */ case (AF_IUCV_FLAG_SHT): /* shutdown request */ /* fall through and receive zero length data */ case 0: /* plain data frame */ IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class; err = afiucv_hs_callback_rx(sk, skb); break; default: ; } return err; } /** * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets * transport **/ static void afiucv_hs_callback_txnotify(struct sk_buff *skb, enum iucv_tx_notify n) { struct sock *isk = skb->sk; struct sock *sk = NULL; struct iucv_sock *iucv = NULL; struct sk_buff_head *list; struct sk_buff *list_skb; struct sk_buff *nskb; unsigned long flags; read_lock_irqsave(&iucv_sk_list.lock, flags); sk_for_each(sk, &iucv_sk_list.head) if (sk == isk) { iucv = iucv_sk(sk); break; } read_unlock_irqrestore(&iucv_sk_list.lock, flags); if (!iucv || sock_flag(sk, SOCK_ZAPPED)) return; list = &iucv->send_skb_q; spin_lock_irqsave(&list->lock, flags); if (skb_queue_empty(list)) goto out_unlock; list_skb = list->next; nskb = list_skb->next; while (list_skb != (struct sk_buff *)list) { if (skb_shinfo(list_skb) == skb_shinfo(skb)) { switch (n) { case TX_NOTIFY_OK: __skb_unlink(list_skb, list); kfree_skb(list_skb); iucv_sock_wake_msglim(sk); break; case TX_NOTIFY_PENDING: atomic_inc(&iucv->pendings); break; case TX_NOTIFY_DELAYED_OK: __skb_unlink(list_skb, list); atomic_dec(&iucv->pendings); if (atomic_read(&iucv->pendings) <= 0) iucv_sock_wake_msglim(sk); kfree_skb(list_skb); break; case TX_NOTIFY_UNREACHABLE: case TX_NOTIFY_DELAYED_UNREACHABLE: case TX_NOTIFY_TPQFULL: /* not yet used */ case TX_NOTIFY_GENERALERROR: case TX_NOTIFY_DELAYED_GENERALERROR: __skb_unlink(list_skb, list); kfree_skb(list_skb); if (sk->sk_state == IUCV_CONNECTED) { sk->sk_state = IUCV_DISCONN; sk->sk_state_change(sk); } break; } break; } list_skb = nskb; nskb = nskb->next; } out_unlock: spin_unlock_irqrestore(&list->lock, flags); if (sk->sk_state == IUCV_CLOSING) { if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { sk->sk_state = IUCV_CLOSED; sk->sk_state_change(sk); } } } /* * afiucv_netdev_event: handle netdev notifier chain events */ static int afiucv_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); struct sock *sk; struct iucv_sock *iucv; switch (event) { case NETDEV_REBOOT: case NETDEV_GOING_DOWN: sk_for_each(sk, &iucv_sk_list.head) { iucv = iucv_sk(sk); if ((iucv->hs_dev == event_dev) && (sk->sk_state == IUCV_CONNECTED)) { if (event == NETDEV_GOING_DOWN) iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN); sk->sk_state = IUCV_DISCONN; sk->sk_state_change(sk); } } break; case NETDEV_DOWN: case NETDEV_UNREGISTER: default: break; } return NOTIFY_DONE; } static struct notifier_block afiucv_netdev_notifier = { .notifier_call = afiucv_netdev_event, }; static const struct proto_ops iucv_sock_ops = { .family = PF_IUCV, .owner = THIS_MODULE, .release = iucv_sock_release, .bind = iucv_sock_bind, .connect = iucv_sock_connect, .listen = iucv_sock_listen, .accept = iucv_sock_accept, .getname = iucv_sock_getname, .sendmsg = iucv_sock_sendmsg, .recvmsg = iucv_sock_recvmsg, .poll = iucv_sock_poll, .ioctl = sock_no_ioctl, .mmap = sock_no_mmap, .socketpair = sock_no_socketpair, .shutdown = iucv_sock_shutdown, .setsockopt = iucv_sock_setsockopt, .getsockopt = iucv_sock_getsockopt, }; static const struct net_proto_family iucv_sock_family_ops = { .family = AF_IUCV, .owner = THIS_MODULE, .create = iucv_sock_create, }; static struct packet_type iucv_packet_type = { .type = cpu_to_be16(ETH_P_AF_IUCV), .func = afiucv_hs_rcv, }; static int afiucv_iucv_init(void) { int err; err = pr_iucv->iucv_register(&af_iucv_handler, 0); if (err) goto out; /* establish dummy device */ af_iucv_driver.bus = pr_iucv->bus; err = driver_register(&af_iucv_driver); if (err) goto out_iucv; af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL); if (!af_iucv_dev) { err = -ENOMEM; goto out_driver; } dev_set_name(af_iucv_dev, "af_iucv"); af_iucv_dev->bus = pr_iucv->bus; af_iucv_dev->parent = pr_iucv->root; af_iucv_dev->release = (void (*)(struct device *))kfree; af_iucv_dev->driver = &af_iucv_driver; err = device_register(af_iucv_dev); if (err) goto out_driver; return 0; out_driver: driver_unregister(&af_iucv_driver); out_iucv: pr_iucv->iucv_unregister(&af_iucv_handler, 0); out: return err; } static int __init afiucv_init(void) { int err; if (MACHINE_IS_VM) { cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err); if (unlikely(err)) { WARN_ON(err); err = -EPROTONOSUPPORT; goto out; } pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv"); if (!pr_iucv) { printk(KERN_WARNING "iucv_if lookup failed\n"); memset(&iucv_userid, 0, sizeof(iucv_userid)); } } else { memset(&iucv_userid, 0, sizeof(iucv_userid)); pr_iucv = NULL; } err = proto_register(&iucv_proto, 0); if (err) goto out; err = sock_register(&iucv_sock_family_ops); if (err) goto out_proto; if (pr_iucv) { err = afiucv_iucv_init(); if (err) goto out_sock; } else register_netdevice_notifier(&afiucv_netdev_notifier); dev_add_pack(&iucv_packet_type); return 0; out_sock: sock_unregister(PF_IUCV); out_proto: proto_unregister(&iucv_proto); out: if (pr_iucv) symbol_put(iucv_if); return err; } static void __exit afiucv_exit(void) { if (pr_iucv) { device_unregister(af_iucv_dev); driver_unregister(&af_iucv_driver); pr_iucv->iucv_unregister(&af_iucv_handler, 0); symbol_put(iucv_if); } else unregister_netdevice_notifier(&afiucv_netdev_notifier); dev_remove_pack(&iucv_packet_type); sock_unregister(PF_IUCV); proto_unregister(&iucv_proto); } module_init(afiucv_init); module_exit(afiucv_exit); MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>"); MODULE_DESCRIPTION("IUCV Sockets ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_IUCV);
./CrossVul/dataset_final_sorted/CWE-20/c/good_5845_17
crossvul-cpp_data_bad_5184_0
404: Not Found
./CrossVul/dataset_final_sorted/CWE-20/c/bad_5184_0
crossvul-cpp_data_bad_3547_4
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/slab.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/sock.h> #include <net/tcp_states.h> #include <asm/system.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <net/rose.h> static int rose_create_facilities(unsigned char *buffer, struct rose_sock *rose); /* * This routine purges all of the queues of frames. */ void rose_clear_queues(struct sock *sk) { skb_queue_purge(&sk->sk_write_queue); skb_queue_purge(&rose_sk(sk)->ack_queue); } /* * This routine purges the input queue of those frames that have been * acknowledged. This replaces the boxes labelled "V(a) <- N(r)" on the * SDL diagram. */ void rose_frames_acked(struct sock *sk, unsigned short nr) { struct sk_buff *skb; struct rose_sock *rose = rose_sk(sk); /* * Remove all the ack-ed frames from the ack queue. */ if (rose->va != nr) { while (skb_peek(&rose->ack_queue) != NULL && rose->va != nr) { skb = skb_dequeue(&rose->ack_queue); kfree_skb(skb); rose->va = (rose->va + 1) % ROSE_MODULUS; } } } void rose_requeue_frames(struct sock *sk) { struct sk_buff *skb, *skb_prev = NULL; /* * Requeue all the un-ack-ed frames on the output queue to be picked * up by rose_kick. This arrangement handles the possibility of an * empty output queue. */ while ((skb = skb_dequeue(&rose_sk(sk)->ack_queue)) != NULL) { if (skb_prev == NULL) skb_queue_head(&sk->sk_write_queue, skb); else skb_append(skb_prev, skb, &sk->sk_write_queue); skb_prev = skb; } } /* * Validate that the value of nr is between va and vs. Return true or * false for testing. */ int rose_validate_nr(struct sock *sk, unsigned short nr) { struct rose_sock *rose = rose_sk(sk); unsigned short vc = rose->va; while (vc != rose->vs) { if (nr == vc) return 1; vc = (vc + 1) % ROSE_MODULUS; } return nr == rose->vs; } /* * This routine is called when the packet layer internally generates a * control frame. */ void rose_write_internal(struct sock *sk, int frametype) { struct rose_sock *rose = rose_sk(sk); struct sk_buff *skb; unsigned char *dptr; unsigned char lci1, lci2; char buffer[100]; int len, faclen = 0; len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 1; switch (frametype) { case ROSE_CALL_REQUEST: len += 1 + ROSE_ADDR_LEN + ROSE_ADDR_LEN; faclen = rose_create_facilities(buffer, rose); len += faclen; break; case ROSE_CALL_ACCEPTED: case ROSE_CLEAR_REQUEST: case ROSE_RESET_REQUEST: len += 2; break; } if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL) return; /* * Space for AX.25 header and PID. */ skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1); dptr = skb_put(skb, skb_tailroom(skb)); lci1 = (rose->lci >> 8) & 0x0F; lci2 = (rose->lci >> 0) & 0xFF; switch (frametype) { case ROSE_CALL_REQUEST: *dptr++ = ROSE_GFI | lci1; *dptr++ = lci2; *dptr++ = frametype; *dptr++ = 0xAA; memcpy(dptr, &rose->dest_addr, ROSE_ADDR_LEN); dptr += ROSE_ADDR_LEN; memcpy(dptr, &rose->source_addr, ROSE_ADDR_LEN); dptr += ROSE_ADDR_LEN; memcpy(dptr, buffer, faclen); dptr += faclen; break; case ROSE_CALL_ACCEPTED: *dptr++ = ROSE_GFI | lci1; *dptr++ = lci2; *dptr++ = frametype; *dptr++ = 0x00; /* Address length */ *dptr++ = 0; /* Facilities length */ break; case ROSE_CLEAR_REQUEST: *dptr++ = ROSE_GFI | lci1; *dptr++ = lci2; *dptr++ = frametype; *dptr++ = rose->cause; *dptr++ = rose->diagnostic; break; case ROSE_RESET_REQUEST: *dptr++ = ROSE_GFI | lci1; *dptr++ = lci2; *dptr++ = frametype; *dptr++ = ROSE_DTE_ORIGINATED; *dptr++ = 0; break; case ROSE_RR: case ROSE_RNR: *dptr++ = ROSE_GFI | lci1; *dptr++ = lci2; *dptr = frametype; *dptr++ |= (rose->vr << 5) & 0xE0; break; case ROSE_CLEAR_CONFIRMATION: case ROSE_RESET_CONFIRMATION: *dptr++ = ROSE_GFI | lci1; *dptr++ = lci2; *dptr++ = frametype; break; default: printk(KERN_ERR "ROSE: rose_write_internal - invalid frametype %02X\n", frametype); kfree_skb(skb); return; } rose_transmit_link(skb, rose->neighbour); } int rose_decode(struct sk_buff *skb, int *ns, int *nr, int *q, int *d, int *m) { unsigned char *frame; frame = skb->data; *ns = *nr = *q = *d = *m = 0; switch (frame[2]) { case ROSE_CALL_REQUEST: case ROSE_CALL_ACCEPTED: case ROSE_CLEAR_REQUEST: case ROSE_CLEAR_CONFIRMATION: case ROSE_RESET_REQUEST: case ROSE_RESET_CONFIRMATION: return frame[2]; default: break; } if ((frame[2] & 0x1F) == ROSE_RR || (frame[2] & 0x1F) == ROSE_RNR) { *nr = (frame[2] >> 5) & 0x07; return frame[2] & 0x1F; } if ((frame[2] & 0x01) == ROSE_DATA) { *q = (frame[0] & ROSE_Q_BIT) == ROSE_Q_BIT; *d = (frame[0] & ROSE_D_BIT) == ROSE_D_BIT; *m = (frame[2] & ROSE_M_BIT) == ROSE_M_BIT; *nr = (frame[2] >> 5) & 0x07; *ns = (frame[2] >> 1) & 0x07; return ROSE_DATA; } return ROSE_ILLEGAL; } static int rose_parse_national(unsigned char *p, struct rose_facilities_struct *facilities, int len) { unsigned char *pt; unsigned char l, lg, n = 0; int fac_national_digis_received = 0; do { switch (*p & 0xC0) { case 0x00: p += 2; n += 2; len -= 2; break; case 0x40: if (*p == FAC_NATIONAL_RAND) facilities->rand = ((p[1] << 8) & 0xFF00) + ((p[2] << 0) & 0x00FF); p += 3; n += 3; len -= 3; break; case 0x80: p += 4; n += 4; len -= 4; break; case 0xC0: l = p[1]; if (*p == FAC_NATIONAL_DEST_DIGI) { if (!fac_national_digis_received) { memcpy(&facilities->source_digis[0], p + 2, AX25_ADDR_LEN); facilities->source_ndigis = 1; } } else if (*p == FAC_NATIONAL_SRC_DIGI) { if (!fac_national_digis_received) { memcpy(&facilities->dest_digis[0], p + 2, AX25_ADDR_LEN); facilities->dest_ndigis = 1; } } else if (*p == FAC_NATIONAL_FAIL_CALL) { memcpy(&facilities->fail_call, p + 2, AX25_ADDR_LEN); } else if (*p == FAC_NATIONAL_FAIL_ADD) { memcpy(&facilities->fail_addr, p + 3, ROSE_ADDR_LEN); } else if (*p == FAC_NATIONAL_DIGIS) { fac_national_digis_received = 1; facilities->source_ndigis = 0; facilities->dest_ndigis = 0; for (pt = p + 2, lg = 0 ; lg < l ; pt += AX25_ADDR_LEN, lg += AX25_ADDR_LEN) { if (pt[6] & AX25_HBIT) { if (facilities->dest_ndigis >= ROSE_MAX_DIGIS) return -1; memcpy(&facilities->dest_digis[facilities->dest_ndigis++], pt, AX25_ADDR_LEN); } else { if (facilities->source_ndigis >= ROSE_MAX_DIGIS) return -1; memcpy(&facilities->source_digis[facilities->source_ndigis++], pt, AX25_ADDR_LEN); } } } p += l + 2; n += l + 2; len -= l + 2; break; } } while (*p != 0x00 && len > 0); return n; } static int rose_parse_ccitt(unsigned char *p, struct rose_facilities_struct *facilities, int len) { unsigned char l, n = 0; char callsign[11]; do { switch (*p & 0xC0) { case 0x00: p += 2; n += 2; len -= 2; break; case 0x40: p += 3; n += 3; len -= 3; break; case 0x80: p += 4; n += 4; len -= 4; break; case 0xC0: l = p[1]; /* Prevent overflows*/ if (l < 10 || l > 20) return -1; if (*p == FAC_CCITT_DEST_NSAP) { memcpy(&facilities->source_addr, p + 7, ROSE_ADDR_LEN); memcpy(callsign, p + 12, l - 10); callsign[l - 10] = '\0'; asc2ax(&facilities->source_call, callsign); } if (*p == FAC_CCITT_SRC_NSAP) { memcpy(&facilities->dest_addr, p + 7, ROSE_ADDR_LEN); memcpy(callsign, p + 12, l - 10); callsign[l - 10] = '\0'; asc2ax(&facilities->dest_call, callsign); } p += l + 2; n += l + 2; len -= l + 2; break; } } while (*p != 0x00 && len > 0); return n; } int rose_parse_facilities(unsigned char *p, struct rose_facilities_struct *facilities) { int facilities_len, len; facilities_len = *p++; if (facilities_len == 0) return 0; while (facilities_len > 0) { if (*p == 0x00) { facilities_len--; p++; switch (*p) { case FAC_NATIONAL: /* National */ len = rose_parse_national(p + 1, facilities, facilities_len - 1); if (len < 0) return 0; facilities_len -= len + 1; p += len + 1; break; case FAC_CCITT: /* CCITT */ len = rose_parse_ccitt(p + 1, facilities, facilities_len - 1); if (len < 0) return 0; facilities_len -= len + 1; p += len + 1; break; default: printk(KERN_DEBUG "ROSE: rose_parse_facilities - unknown facilities family %02X\n", *p); facilities_len--; p++; break; } } else break; /* Error in facilities format */ } return 1; } static int rose_create_facilities(unsigned char *buffer, struct rose_sock *rose) { unsigned char *p = buffer + 1; char *callsign; char buf[11]; int len, nb; /* National Facilities */ if (rose->rand != 0 || rose->source_ndigis == 1 || rose->dest_ndigis == 1) { *p++ = 0x00; *p++ = FAC_NATIONAL; if (rose->rand != 0) { *p++ = FAC_NATIONAL_RAND; *p++ = (rose->rand >> 8) & 0xFF; *p++ = (rose->rand >> 0) & 0xFF; } /* Sent before older facilities */ if ((rose->source_ndigis > 0) || (rose->dest_ndigis > 0)) { int maxdigi = 0; *p++ = FAC_NATIONAL_DIGIS; *p++ = AX25_ADDR_LEN * (rose->source_ndigis + rose->dest_ndigis); for (nb = 0 ; nb < rose->source_ndigis ; nb++) { if (++maxdigi >= ROSE_MAX_DIGIS) break; memcpy(p, &rose->source_digis[nb], AX25_ADDR_LEN); p[6] |= AX25_HBIT; p += AX25_ADDR_LEN; } for (nb = 0 ; nb < rose->dest_ndigis ; nb++) { if (++maxdigi >= ROSE_MAX_DIGIS) break; memcpy(p, &rose->dest_digis[nb], AX25_ADDR_LEN); p[6] &= ~AX25_HBIT; p += AX25_ADDR_LEN; } } /* For compatibility */ if (rose->source_ndigis > 0) { *p++ = FAC_NATIONAL_SRC_DIGI; *p++ = AX25_ADDR_LEN; memcpy(p, &rose->source_digis[0], AX25_ADDR_LEN); p += AX25_ADDR_LEN; } /* For compatibility */ if (rose->dest_ndigis > 0) { *p++ = FAC_NATIONAL_DEST_DIGI; *p++ = AX25_ADDR_LEN; memcpy(p, &rose->dest_digis[0], AX25_ADDR_LEN); p += AX25_ADDR_LEN; } } *p++ = 0x00; *p++ = FAC_CCITT; *p++ = FAC_CCITT_DEST_NSAP; callsign = ax2asc(buf, &rose->dest_call); *p++ = strlen(callsign) + 10; *p++ = (strlen(callsign) + 9) * 2; /* ??? */ *p++ = 0x47; *p++ = 0x00; *p++ = 0x11; *p++ = ROSE_ADDR_LEN * 2; memcpy(p, &rose->dest_addr, ROSE_ADDR_LEN); p += ROSE_ADDR_LEN; memcpy(p, callsign, strlen(callsign)); p += strlen(callsign); *p++ = FAC_CCITT_SRC_NSAP; callsign = ax2asc(buf, &rose->source_call); *p++ = strlen(callsign) + 10; *p++ = (strlen(callsign) + 9) * 2; /* ??? */ *p++ = 0x47; *p++ = 0x00; *p++ = 0x11; *p++ = ROSE_ADDR_LEN * 2; memcpy(p, &rose->source_addr, ROSE_ADDR_LEN); p += ROSE_ADDR_LEN; memcpy(p, callsign, strlen(callsign)); p += strlen(callsign); len = p - buffer; buffer[0] = len - 1; return len; } void rose_disconnect(struct sock *sk, int reason, int cause, int diagnostic) { struct rose_sock *rose = rose_sk(sk); rose_stop_timer(sk); rose_stop_idletimer(sk); rose_clear_queues(sk); rose->lci = 0; rose->state = ROSE_STATE_0; if (cause != -1) rose->cause = cause; if (diagnostic != -1) rose->diagnostic = diagnostic; sk->sk_state = TCP_CLOSE; sk->sk_err = reason; sk->sk_shutdown |= SEND_SHUTDOWN; if (!sock_flag(sk, SOCK_DEAD)) { sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); } }
./CrossVul/dataset_final_sorted/CWE-20/c/bad_3547_4
crossvul-cpp_data_good_396_0
#include "cache.h" #include "object.h" #include "blob.h" #include "tree.h" #include "tree-walk.h" #include "commit.h" #include "tag.h" #include "fsck.h" #include "refs.h" #include "utf8.h" #include "sha1-array.h" #include "decorate.h" #include "oidset.h" #include "packfile.h" #include "submodule-config.h" #include "config.h" static struct oidset gitmodules_found = OIDSET_INIT; static struct oidset gitmodules_done = OIDSET_INIT; #define FSCK_FATAL -1 #define FSCK_INFO -2 #define FOREACH_MSG_ID(FUNC) \ /* fatal errors */ \ FUNC(NUL_IN_HEADER, FATAL) \ FUNC(UNTERMINATED_HEADER, FATAL) \ /* errors */ \ FUNC(BAD_DATE, ERROR) \ FUNC(BAD_DATE_OVERFLOW, ERROR) \ FUNC(BAD_EMAIL, ERROR) \ FUNC(BAD_NAME, ERROR) \ FUNC(BAD_OBJECT_SHA1, ERROR) \ FUNC(BAD_PARENT_SHA1, ERROR) \ FUNC(BAD_TAG_OBJECT, ERROR) \ FUNC(BAD_TIMEZONE, ERROR) \ FUNC(BAD_TREE, ERROR) \ FUNC(BAD_TREE_SHA1, ERROR) \ FUNC(BAD_TYPE, ERROR) \ FUNC(DUPLICATE_ENTRIES, ERROR) \ FUNC(MISSING_AUTHOR, ERROR) \ FUNC(MISSING_COMMITTER, ERROR) \ FUNC(MISSING_EMAIL, ERROR) \ FUNC(MISSING_GRAFT, ERROR) \ FUNC(MISSING_NAME_BEFORE_EMAIL, ERROR) \ FUNC(MISSING_OBJECT, ERROR) \ FUNC(MISSING_PARENT, ERROR) \ FUNC(MISSING_SPACE_BEFORE_DATE, ERROR) \ FUNC(MISSING_SPACE_BEFORE_EMAIL, ERROR) \ FUNC(MISSING_TAG, ERROR) \ FUNC(MISSING_TAG_ENTRY, ERROR) \ FUNC(MISSING_TAG_OBJECT, ERROR) \ FUNC(MISSING_TREE, ERROR) \ FUNC(MISSING_TREE_OBJECT, ERROR) \ FUNC(MISSING_TYPE, ERROR) \ FUNC(MISSING_TYPE_ENTRY, ERROR) \ FUNC(MULTIPLE_AUTHORS, ERROR) \ FUNC(TAG_OBJECT_NOT_TAG, ERROR) \ FUNC(TREE_NOT_SORTED, ERROR) \ FUNC(UNKNOWN_TYPE, ERROR) \ FUNC(ZERO_PADDED_DATE, ERROR) \ FUNC(GITMODULES_MISSING, ERROR) \ FUNC(GITMODULES_BLOB, ERROR) \ FUNC(GITMODULES_PARSE, ERROR) \ FUNC(GITMODULES_NAME, ERROR) \ FUNC(GITMODULES_SYMLINK, ERROR) \ FUNC(GITMODULES_URL, ERROR) \ FUNC(GITMODULES_PATH, ERROR) \ /* warnings */ \ FUNC(BAD_FILEMODE, WARN) \ FUNC(EMPTY_NAME, WARN) \ FUNC(FULL_PATHNAME, WARN) \ FUNC(HAS_DOT, WARN) \ FUNC(HAS_DOTDOT, WARN) \ FUNC(HAS_DOTGIT, WARN) \ FUNC(NULL_SHA1, WARN) \ FUNC(ZERO_PADDED_FILEMODE, WARN) \ FUNC(NUL_IN_COMMIT, WARN) \ /* infos (reported as warnings, but ignored by default) */ \ FUNC(BAD_TAG_NAME, INFO) \ FUNC(MISSING_TAGGER_ENTRY, INFO) #define MSG_ID(id, msg_type) FSCK_MSG_##id, enum fsck_msg_id { FOREACH_MSG_ID(MSG_ID) FSCK_MSG_MAX }; #undef MSG_ID #define STR(x) #x #define MSG_ID(id, msg_type) { STR(id), NULL, FSCK_##msg_type }, static struct { const char *id_string; const char *downcased; int msg_type; } msg_id_info[FSCK_MSG_MAX + 1] = { FOREACH_MSG_ID(MSG_ID) { NULL, NULL, -1 } }; #undef MSG_ID static int parse_msg_id(const char *text) { int i; if (!msg_id_info[0].downcased) { /* convert id_string to lower case, without underscores. */ for (i = 0; i < FSCK_MSG_MAX; i++) { const char *p = msg_id_info[i].id_string; int len = strlen(p); char *q = xmalloc(len); msg_id_info[i].downcased = q; while (*p) if (*p == '_') p++; else *(q)++ = tolower(*(p)++); *q = '\0'; } } for (i = 0; i < FSCK_MSG_MAX; i++) if (!strcmp(text, msg_id_info[i].downcased)) return i; return -1; } static int fsck_msg_type(enum fsck_msg_id msg_id, struct fsck_options *options) { int msg_type; assert(msg_id >= 0 && msg_id < FSCK_MSG_MAX); if (options->msg_type) msg_type = options->msg_type[msg_id]; else { msg_type = msg_id_info[msg_id].msg_type; if (options->strict && msg_type == FSCK_WARN) msg_type = FSCK_ERROR; } return msg_type; } static void init_skiplist(struct fsck_options *options, const char *path) { static struct oid_array skiplist = OID_ARRAY_INIT; int sorted, fd; char buffer[GIT_MAX_HEXSZ + 1]; struct object_id oid; if (options->skiplist) sorted = options->skiplist->sorted; else { sorted = 1; options->skiplist = &skiplist; } fd = open(path, O_RDONLY); if (fd < 0) die("Could not open skip list: %s", path); for (;;) { const char *p; int result = read_in_full(fd, buffer, sizeof(buffer)); if (result < 0) die_errno("Could not read '%s'", path); if (!result) break; if (parse_oid_hex(buffer, &oid, &p) || *p != '\n') die("Invalid SHA-1: %s", buffer); oid_array_append(&skiplist, &oid); if (sorted && skiplist.nr > 1 && oidcmp(&skiplist.oid[skiplist.nr - 2], &oid) > 0) sorted = 0; } close(fd); if (sorted) skiplist.sorted = 1; } static int parse_msg_type(const char *str) { if (!strcmp(str, "error")) return FSCK_ERROR; else if (!strcmp(str, "warn")) return FSCK_WARN; else if (!strcmp(str, "ignore")) return FSCK_IGNORE; else die("Unknown fsck message type: '%s'", str); } int is_valid_msg_type(const char *msg_id, const char *msg_type) { if (parse_msg_id(msg_id) < 0) return 0; parse_msg_type(msg_type); return 1; } void fsck_set_msg_type(struct fsck_options *options, const char *msg_id, const char *msg_type) { int id = parse_msg_id(msg_id), type; if (id < 0) die("Unhandled message id: %s", msg_id); type = parse_msg_type(msg_type); if (type != FSCK_ERROR && msg_id_info[id].msg_type == FSCK_FATAL) die("Cannot demote %s to %s", msg_id, msg_type); if (!options->msg_type) { int i; int *msg_type; ALLOC_ARRAY(msg_type, FSCK_MSG_MAX); for (i = 0; i < FSCK_MSG_MAX; i++) msg_type[i] = fsck_msg_type(i, options); options->msg_type = msg_type; } options->msg_type[id] = type; } void fsck_set_msg_types(struct fsck_options *options, const char *values) { char *buf = xstrdup(values), *to_free = buf; int done = 0; while (!done) { int len = strcspn(buf, " ,|"), equal; done = !buf[len]; if (!len) { buf++; continue; } buf[len] = '\0'; for (equal = 0; equal < len && buf[equal] != '=' && buf[equal] != ':'; equal++) buf[equal] = tolower(buf[equal]); buf[equal] = '\0'; if (!strcmp(buf, "skiplist")) { if (equal == len) die("skiplist requires a path"); init_skiplist(options, buf + equal + 1); buf += len + 1; continue; } if (equal == len) die("Missing '=': '%s'", buf); fsck_set_msg_type(options, buf, buf + equal + 1); buf += len + 1; } free(to_free); } static void append_msg_id(struct strbuf *sb, const char *msg_id) { for (;;) { char c = *(msg_id)++; if (!c) break; if (c != '_') strbuf_addch(sb, tolower(c)); else { assert(*msg_id); strbuf_addch(sb, *(msg_id)++); } } strbuf_addstr(sb, ": "); } __attribute__((format (printf, 4, 5))) static int report(struct fsck_options *options, struct object *object, enum fsck_msg_id id, const char *fmt, ...) { va_list ap; struct strbuf sb = STRBUF_INIT; int msg_type = fsck_msg_type(id, options), result; if (msg_type == FSCK_IGNORE) return 0; if (options->skiplist && object && oid_array_lookup(options->skiplist, &object->oid) >= 0) return 0; if (msg_type == FSCK_FATAL) msg_type = FSCK_ERROR; else if (msg_type == FSCK_INFO) msg_type = FSCK_WARN; append_msg_id(&sb, msg_id_info[id].id_string); va_start(ap, fmt); strbuf_vaddf(&sb, fmt, ap); result = options->error_func(options, object, msg_type, sb.buf); strbuf_release(&sb); va_end(ap); return result; } static char *get_object_name(struct fsck_options *options, struct object *obj) { if (!options->object_names) return NULL; return lookup_decoration(options->object_names, obj); } static void put_object_name(struct fsck_options *options, struct object *obj, const char *fmt, ...) { va_list ap; struct strbuf buf = STRBUF_INIT; char *existing; if (!options->object_names) return; existing = lookup_decoration(options->object_names, obj); if (existing) return; va_start(ap, fmt); strbuf_vaddf(&buf, fmt, ap); add_decoration(options->object_names, obj, strbuf_detach(&buf, NULL)); va_end(ap); } static const char *describe_object(struct fsck_options *o, struct object *obj) { static struct strbuf buf = STRBUF_INIT; char *name; strbuf_reset(&buf); strbuf_addstr(&buf, oid_to_hex(&obj->oid)); if (o->object_names && (name = lookup_decoration(o->object_names, obj))) strbuf_addf(&buf, " (%s)", name); return buf.buf; } static int fsck_walk_tree(struct tree *tree, void *data, struct fsck_options *options) { struct tree_desc desc; struct name_entry entry; int res = 0; const char *name; if (parse_tree(tree)) return -1; name = get_object_name(options, &tree->object); if (init_tree_desc_gently(&desc, tree->buffer, tree->size)) return -1; while (tree_entry_gently(&desc, &entry)) { struct object *obj; int result; if (S_ISGITLINK(entry.mode)) continue; if (S_ISDIR(entry.mode)) { obj = (struct object *)lookup_tree(entry.oid); if (name && obj) put_object_name(options, obj, "%s%s/", name, entry.path); result = options->walk(obj, OBJ_TREE, data, options); } else if (S_ISREG(entry.mode) || S_ISLNK(entry.mode)) { obj = (struct object *)lookup_blob(entry.oid); if (name && obj) put_object_name(options, obj, "%s%s", name, entry.path); result = options->walk(obj, OBJ_BLOB, data, options); } else { result = error("in tree %s: entry %s has bad mode %.6o", describe_object(options, &tree->object), entry.path, entry.mode); } if (result < 0) return result; if (!res) res = result; } return res; } static int fsck_walk_commit(struct commit *commit, void *data, struct fsck_options *options) { int counter = 0, generation = 0, name_prefix_len = 0; struct commit_list *parents; int res; int result; const char *name; if (parse_commit(commit)) return -1; name = get_object_name(options, &commit->object); if (name) put_object_name(options, &commit->tree->object, "%s:", name); result = options->walk((struct object *)commit->tree, OBJ_TREE, data, options); if (result < 0) return result; res = result; parents = commit->parents; if (name && parents) { int len = strlen(name), power; if (len && name[len - 1] == '^') { generation = 1; name_prefix_len = len - 1; } else { /* parse ~<generation> suffix */ for (generation = 0, power = 1; len && isdigit(name[len - 1]); power *= 10) generation += power * (name[--len] - '0'); if (power > 1 && len && name[len - 1] == '~') name_prefix_len = len - 1; } } while (parents) { if (name) { struct object *obj = &parents->item->object; if (++counter > 1) put_object_name(options, obj, "%s^%d", name, counter); else if (generation > 0) put_object_name(options, obj, "%.*s~%d", name_prefix_len, name, generation + 1); else put_object_name(options, obj, "%s^", name); } result = options->walk((struct object *)parents->item, OBJ_COMMIT, data, options); if (result < 0) return result; if (!res) res = result; parents = parents->next; } return res; } static int fsck_walk_tag(struct tag *tag, void *data, struct fsck_options *options) { char *name = get_object_name(options, &tag->object); if (parse_tag(tag)) return -1; if (name) put_object_name(options, tag->tagged, "%s", name); return options->walk(tag->tagged, OBJ_ANY, data, options); } int fsck_walk(struct object *obj, void *data, struct fsck_options *options) { if (!obj) return -1; if (obj->type == OBJ_NONE) parse_object(&obj->oid); switch (obj->type) { case OBJ_BLOB: return 0; case OBJ_TREE: return fsck_walk_tree((struct tree *)obj, data, options); case OBJ_COMMIT: return fsck_walk_commit((struct commit *)obj, data, options); case OBJ_TAG: return fsck_walk_tag((struct tag *)obj, data, options); default: error("Unknown object type for %s", describe_object(options, obj)); return -1; } } /* * The entries in a tree are ordered in the _path_ order, * which means that a directory entry is ordered by adding * a slash to the end of it. * * So a directory called "a" is ordered _after_ a file * called "a.c", because "a/" sorts after "a.c". */ #define TREE_UNORDERED (-1) #define TREE_HAS_DUPS (-2) static int verify_ordered(unsigned mode1, const char *name1, unsigned mode2, const char *name2) { int len1 = strlen(name1); int len2 = strlen(name2); int len = len1 < len2 ? len1 : len2; unsigned char c1, c2; int cmp; cmp = memcmp(name1, name2, len); if (cmp < 0) return 0; if (cmp > 0) return TREE_UNORDERED; /* * Ok, the first <len> characters are the same. * Now we need to order the next one, but turn * a '\0' into a '/' for a directory entry. */ c1 = name1[len]; c2 = name2[len]; if (!c1 && !c2) /* * git-write-tree used to write out a nonsense tree that has * entries with the same name, one blob and one tree. Make * sure we do not have duplicate entries. */ return TREE_HAS_DUPS; if (!c1 && S_ISDIR(mode1)) c1 = '/'; if (!c2 && S_ISDIR(mode2)) c2 = '/'; return c1 < c2 ? 0 : TREE_UNORDERED; } static int fsck_tree(struct tree *item, struct fsck_options *options) { int retval = 0; int has_null_sha1 = 0; int has_full_path = 0; int has_empty_name = 0; int has_dot = 0; int has_dotdot = 0; int has_dotgit = 0; int has_zero_pad = 0; int has_bad_modes = 0; int has_dup_entries = 0; int not_properly_sorted = 0; struct tree_desc desc; unsigned o_mode; const char *o_name; if (init_tree_desc_gently(&desc, item->buffer, item->size)) { retval += report(options, &item->object, FSCK_MSG_BAD_TREE, "cannot be parsed as a tree"); return retval; } o_mode = 0; o_name = NULL; while (desc.size) { unsigned mode; const char *name; const struct object_id *oid; oid = tree_entry_extract(&desc, &name, &mode); has_null_sha1 |= is_null_oid(oid); has_full_path |= !!strchr(name, '/'); has_empty_name |= !*name; has_dot |= !strcmp(name, "."); has_dotdot |= !strcmp(name, ".."); has_dotgit |= is_hfs_dotgit(name) || is_ntfs_dotgit(name); has_zero_pad |= *(char *)desc.buffer == '0'; if (is_hfs_dotgitmodules(name) || is_ntfs_dotgitmodules(name)) { if (!S_ISLNK(mode)) oidset_insert(&gitmodules_found, oid); else retval += report(options, &item->object, FSCK_MSG_GITMODULES_SYMLINK, ".gitmodules is a symbolic link"); } if (update_tree_entry_gently(&desc)) { retval += report(options, &item->object, FSCK_MSG_BAD_TREE, "cannot be parsed as a tree"); break; } switch (mode) { /* * Standard modes.. */ case S_IFREG | 0755: case S_IFREG | 0644: case S_IFLNK: case S_IFDIR: case S_IFGITLINK: break; /* * This is nonstandard, but we had a few of these * early on when we honored the full set of mode * bits.. */ case S_IFREG | 0664: if (!options->strict) break; /* fallthrough */ default: has_bad_modes = 1; } if (o_name) { switch (verify_ordered(o_mode, o_name, mode, name)) { case TREE_UNORDERED: not_properly_sorted = 1; break; case TREE_HAS_DUPS: has_dup_entries = 1; break; default: break; } } o_mode = mode; o_name = name; } if (has_null_sha1) retval += report(options, &item->object, FSCK_MSG_NULL_SHA1, "contains entries pointing to null sha1"); if (has_full_path) retval += report(options, &item->object, FSCK_MSG_FULL_PATHNAME, "contains full pathnames"); if (has_empty_name) retval += report(options, &item->object, FSCK_MSG_EMPTY_NAME, "contains empty pathname"); if (has_dot) retval += report(options, &item->object, FSCK_MSG_HAS_DOT, "contains '.'"); if (has_dotdot) retval += report(options, &item->object, FSCK_MSG_HAS_DOTDOT, "contains '..'"); if (has_dotgit) retval += report(options, &item->object, FSCK_MSG_HAS_DOTGIT, "contains '.git'"); if (has_zero_pad) retval += report(options, &item->object, FSCK_MSG_ZERO_PADDED_FILEMODE, "contains zero-padded file modes"); if (has_bad_modes) retval += report(options, &item->object, FSCK_MSG_BAD_FILEMODE, "contains bad file modes"); if (has_dup_entries) retval += report(options, &item->object, FSCK_MSG_DUPLICATE_ENTRIES, "contains duplicate file entries"); if (not_properly_sorted) retval += report(options, &item->object, FSCK_MSG_TREE_NOT_SORTED, "not properly sorted"); return retval; } static int verify_headers(const void *data, unsigned long size, struct object *obj, struct fsck_options *options) { const char *buffer = (const char *)data; unsigned long i; for (i = 0; i < size; i++) { switch (buffer[i]) { case '\0': return report(options, obj, FSCK_MSG_NUL_IN_HEADER, "unterminated header: NUL at offset %ld", i); case '\n': if (i + 1 < size && buffer[i + 1] == '\n') return 0; } } /* * We did not find double-LF that separates the header * and the body. Not having a body is not a crime but * we do want to see the terminating LF for the last header * line. */ if (size && buffer[size - 1] == '\n') return 0; return report(options, obj, FSCK_MSG_UNTERMINATED_HEADER, "unterminated header"); } static int fsck_ident(const char **ident, struct object *obj, struct fsck_options *options) { const char *p = *ident; char *end; *ident = strchrnul(*ident, '\n'); if (**ident == '\n') (*ident)++; if (*p == '<') return report(options, obj, FSCK_MSG_MISSING_NAME_BEFORE_EMAIL, "invalid author/committer line - missing space before email"); p += strcspn(p, "<>\n"); if (*p == '>') return report(options, obj, FSCK_MSG_BAD_NAME, "invalid author/committer line - bad name"); if (*p != '<') return report(options, obj, FSCK_MSG_MISSING_EMAIL, "invalid author/committer line - missing email"); if (p[-1] != ' ') return report(options, obj, FSCK_MSG_MISSING_SPACE_BEFORE_EMAIL, "invalid author/committer line - missing space before email"); p++; p += strcspn(p, "<>\n"); if (*p != '>') return report(options, obj, FSCK_MSG_BAD_EMAIL, "invalid author/committer line - bad email"); p++; if (*p != ' ') return report(options, obj, FSCK_MSG_MISSING_SPACE_BEFORE_DATE, "invalid author/committer line - missing space before date"); p++; if (*p == '0' && p[1] != ' ') return report(options, obj, FSCK_MSG_ZERO_PADDED_DATE, "invalid author/committer line - zero-padded date"); if (date_overflows(parse_timestamp(p, &end, 10))) return report(options, obj, FSCK_MSG_BAD_DATE_OVERFLOW, "invalid author/committer line - date causes integer overflow"); if ((end == p || *end != ' ')) return report(options, obj, FSCK_MSG_BAD_DATE, "invalid author/committer line - bad date"); p = end + 1; if ((*p != '+' && *p != '-') || !isdigit(p[1]) || !isdigit(p[2]) || !isdigit(p[3]) || !isdigit(p[4]) || (p[5] != '\n')) return report(options, obj, FSCK_MSG_BAD_TIMEZONE, "invalid author/committer line - bad time zone"); p += 6; return 0; } static int fsck_commit_buffer(struct commit *commit, const char *buffer, unsigned long size, struct fsck_options *options) { unsigned char tree_sha1[20], sha1[20]; struct commit_graft *graft; unsigned parent_count, parent_line_count = 0, author_count; int err; const char *buffer_begin = buffer; if (verify_headers(buffer, size, &commit->object, options)) return -1; if (!skip_prefix(buffer, "tree ", &buffer)) return report(options, &commit->object, FSCK_MSG_MISSING_TREE, "invalid format - expected 'tree' line"); if (get_sha1_hex(buffer, tree_sha1) || buffer[40] != '\n') { err = report(options, &commit->object, FSCK_MSG_BAD_TREE_SHA1, "invalid 'tree' line format - bad sha1"); if (err) return err; } buffer += 41; while (skip_prefix(buffer, "parent ", &buffer)) { if (get_sha1_hex(buffer, sha1) || buffer[40] != '\n') { err = report(options, &commit->object, FSCK_MSG_BAD_PARENT_SHA1, "invalid 'parent' line format - bad sha1"); if (err) return err; } buffer += 41; parent_line_count++; } graft = lookup_commit_graft(&commit->object.oid); parent_count = commit_list_count(commit->parents); if (graft) { if (graft->nr_parent == -1 && !parent_count) ; /* shallow commit */ else if (graft->nr_parent != parent_count) { err = report(options, &commit->object, FSCK_MSG_MISSING_GRAFT, "graft objects missing"); if (err) return err; } } else { if (parent_count != parent_line_count) { err = report(options, &commit->object, FSCK_MSG_MISSING_PARENT, "parent objects missing"); if (err) return err; } } author_count = 0; while (skip_prefix(buffer, "author ", &buffer)) { author_count++; err = fsck_ident(&buffer, &commit->object, options); if (err) return err; } if (author_count < 1) err = report(options, &commit->object, FSCK_MSG_MISSING_AUTHOR, "invalid format - expected 'author' line"); else if (author_count > 1) err = report(options, &commit->object, FSCK_MSG_MULTIPLE_AUTHORS, "invalid format - multiple 'author' lines"); if (err) return err; if (!skip_prefix(buffer, "committer ", &buffer)) return report(options, &commit->object, FSCK_MSG_MISSING_COMMITTER, "invalid format - expected 'committer' line"); err = fsck_ident(&buffer, &commit->object, options); if (err) return err; if (!commit->tree) { err = report(options, &commit->object, FSCK_MSG_BAD_TREE, "could not load commit's tree %s", sha1_to_hex(tree_sha1)); if (err) return err; } if (memchr(buffer_begin, '\0', size)) { err = report(options, &commit->object, FSCK_MSG_NUL_IN_COMMIT, "NUL byte in the commit object body"); if (err) return err; } return 0; } static int fsck_commit(struct commit *commit, const char *data, unsigned long size, struct fsck_options *options) { const char *buffer = data ? data : get_commit_buffer(commit, &size); int ret = fsck_commit_buffer(commit, buffer, size, options); if (!data) unuse_commit_buffer(commit, buffer); return ret; } static int fsck_tag_buffer(struct tag *tag, const char *data, unsigned long size, struct fsck_options *options) { unsigned char sha1[20]; int ret = 0; const char *buffer; char *to_free = NULL, *eol; struct strbuf sb = STRBUF_INIT; if (data) buffer = data; else { enum object_type type; buffer = to_free = read_sha1_file(tag->object.oid.hash, &type, &size); if (!buffer) return report(options, &tag->object, FSCK_MSG_MISSING_TAG_OBJECT, "cannot read tag object"); if (type != OBJ_TAG) { ret = report(options, &tag->object, FSCK_MSG_TAG_OBJECT_NOT_TAG, "expected tag got %s", type_name(type)); goto done; } } ret = verify_headers(buffer, size, &tag->object, options); if (ret) goto done; if (!skip_prefix(buffer, "object ", &buffer)) { ret = report(options, &tag->object, FSCK_MSG_MISSING_OBJECT, "invalid format - expected 'object' line"); goto done; } if (get_sha1_hex(buffer, sha1) || buffer[40] != '\n') { ret = report(options, &tag->object, FSCK_MSG_BAD_OBJECT_SHA1, "invalid 'object' line format - bad sha1"); if (ret) goto done; } buffer += 41; if (!skip_prefix(buffer, "type ", &buffer)) { ret = report(options, &tag->object, FSCK_MSG_MISSING_TYPE_ENTRY, "invalid format - expected 'type' line"); goto done; } eol = strchr(buffer, '\n'); if (!eol) { ret = report(options, &tag->object, FSCK_MSG_MISSING_TYPE, "invalid format - unexpected end after 'type' line"); goto done; } if (type_from_string_gently(buffer, eol - buffer, 1) < 0) ret = report(options, &tag->object, FSCK_MSG_BAD_TYPE, "invalid 'type' value"); if (ret) goto done; buffer = eol + 1; if (!skip_prefix(buffer, "tag ", &buffer)) { ret = report(options, &tag->object, FSCK_MSG_MISSING_TAG_ENTRY, "invalid format - expected 'tag' line"); goto done; } eol = strchr(buffer, '\n'); if (!eol) { ret = report(options, &tag->object, FSCK_MSG_MISSING_TAG, "invalid format - unexpected end after 'type' line"); goto done; } strbuf_addf(&sb, "refs/tags/%.*s", (int)(eol - buffer), buffer); if (check_refname_format(sb.buf, 0)) { ret = report(options, &tag->object, FSCK_MSG_BAD_TAG_NAME, "invalid 'tag' name: %.*s", (int)(eol - buffer), buffer); if (ret) goto done; } buffer = eol + 1; if (!skip_prefix(buffer, "tagger ", &buffer)) { /* early tags do not contain 'tagger' lines; warn only */ ret = report(options, &tag->object, FSCK_MSG_MISSING_TAGGER_ENTRY, "invalid format - expected 'tagger' line"); if (ret) goto done; } else ret = fsck_ident(&buffer, &tag->object, options); done: strbuf_release(&sb); free(to_free); return ret; } static int fsck_tag(struct tag *tag, const char *data, unsigned long size, struct fsck_options *options) { struct object *tagged = tag->tagged; if (!tagged) return report(options, &tag->object, FSCK_MSG_BAD_TAG_OBJECT, "could not load tagged object"); return fsck_tag_buffer(tag, data, size, options); } struct fsck_gitmodules_data { struct object *obj; struct fsck_options *options; int ret; }; static int fsck_gitmodules_fn(const char *var, const char *value, void *vdata) { struct fsck_gitmodules_data *data = vdata; const char *subsection, *key; int subsection_len; char *name; if (parse_config_key(var, "submodule", &subsection, &subsection_len, &key) < 0 || !subsection) return 0; name = xmemdupz(subsection, subsection_len); if (check_submodule_name(name) < 0) data->ret |= report(data->options, data->obj, FSCK_MSG_GITMODULES_NAME, "disallowed submodule name: %s", name); if (!strcmp(key, "url") && value && looks_like_command_line_option(value)) data->ret |= report(data->options, data->obj, FSCK_MSG_GITMODULES_URL, "disallowed submodule url: %s", value); if (!strcmp(key, "path") && value && looks_like_command_line_option(value)) data->ret |= report(data->options, data->obj, FSCK_MSG_GITMODULES_PATH, "disallowed submodule path: %s", value); free(name); return 0; } static int fsck_blob(struct blob *blob, const char *buf, unsigned long size, struct fsck_options *options) { struct fsck_gitmodules_data data; if (!oidset_contains(&gitmodules_found, &blob->object.oid)) return 0; oidset_insert(&gitmodules_done, &blob->object.oid); if (!buf) { /* * A missing buffer here is a sign that the caller found the * blob too gigantic to load into memory. Let's just consider * that an error. */ return report(options, &blob->object, FSCK_MSG_GITMODULES_PARSE, ".gitmodules too large to parse"); } data.obj = &blob->object; data.options = options; data.ret = 0; if (git_config_from_mem(fsck_gitmodules_fn, CONFIG_ORIGIN_BLOB, ".gitmodules", buf, size, &data)) data.ret |= report(options, &blob->object, FSCK_MSG_GITMODULES_PARSE, "could not parse gitmodules blob"); return data.ret; } int fsck_object(struct object *obj, void *data, unsigned long size, struct fsck_options *options) { if (!obj) return report(options, obj, FSCK_MSG_BAD_OBJECT_SHA1, "no valid object to fsck"); if (obj->type == OBJ_BLOB) return fsck_blob((struct blob *)obj, data, size, options); if (obj->type == OBJ_TREE) return fsck_tree((struct tree *) obj, options); if (obj->type == OBJ_COMMIT) return fsck_commit((struct commit *) obj, (const char *) data, size, options); if (obj->type == OBJ_TAG) return fsck_tag((struct tag *) obj, (const char *) data, size, options); return report(options, obj, FSCK_MSG_UNKNOWN_TYPE, "unknown type '%d' (internal fsck error)", obj->type); } int fsck_error_function(struct fsck_options *o, struct object *obj, int msg_type, const char *message) { if (msg_type == FSCK_WARN) { warning("object %s: %s", describe_object(o, obj), message); return 0; } error("object %s: %s", describe_object(o, obj), message); return 1; } int fsck_finish(struct fsck_options *options) { int ret = 0; struct oidset_iter iter; const struct object_id *oid; oidset_iter_init(&gitmodules_found, &iter); while ((oid = oidset_iter_next(&iter))) { struct blob *blob; enum object_type type; unsigned long size; char *buf; if (oidset_contains(&gitmodules_done, oid)) continue; blob = lookup_blob(oid); if (!blob) { ret |= report(options, &blob->object, FSCK_MSG_GITMODULES_BLOB, "non-blob found at .gitmodules"); continue; } buf = read_sha1_file(oid->hash, &type, &size); if (!buf) { if (is_promisor_object(&blob->object.oid)) continue; ret |= report(options, &blob->object, FSCK_MSG_GITMODULES_MISSING, "unable to read .gitmodules blob"); continue; } if (type == OBJ_BLOB) ret |= fsck_blob(blob, buf, size, options); else ret |= report(options, &blob->object, FSCK_MSG_GITMODULES_BLOB, "non-blob found at .gitmodules"); free(buf); } oidset_clear(&gitmodules_found); oidset_clear(&gitmodules_done); return ret; }
./CrossVul/dataset_final_sorted/CWE-20/c/good_396_0
crossvul-cpp_data_bad_1657_1
/* A Bison parser, made by GNU Bison 2.4.1. */ /* Skeleton implementation for Bison's Yacc-like parsers in C Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* As a special exception, you may create a larger work that contains part or all of the Bison parser skeleton and distribute that work under terms of your choice, so long as that work isn't itself a parser generator using the skeleton or a modified version thereof as a parser skeleton. Alternatively, if you modify or redistribute the parser skeleton itself, you may (at your option) remove this special exception, which will cause the skeleton and the resulting Bison output files to be licensed under the GNU General Public License without this special exception. This special exception was added by the Free Software Foundation in version 2.2 of Bison. */ /* C LALR(1) parser skeleton written by Richard Stallman, by simplifying the original so-called "semantic" parser. */ /* All symbols defined below should begin with yy or YY, to avoid infringing on user name space. This should be done even for local variables, as they might otherwise be expanded by user macros. There are some unavoidable exceptions within include files to define necessary library symbols; they are noted "INFRINGES ON USER NAME SPACE" below. */ /* Identify Bison output. */ #define YYBISON 1 /* Bison version. */ #define YYBISON_VERSION "2.4.1" /* Skeleton name. */ #define YYSKELETON_NAME "yacc.c" /* Pure parsers. */ #define YYPURE 0 /* Push parsers. */ #define YYPUSH 0 /* Pull parsers. */ #define YYPULL 1 /* Using locations. */ #define YYLSP_NEEDED 0 /* Copy the first part of user declarations. */ /* Line 189 of yacc.c */ #line 11 "ntp_parser.y" #ifdef HAVE_CONFIG_H # include <config.h> #endif #include "ntpd.h" #include "ntp_machine.h" #include "ntp.h" #include "ntp_stdlib.h" #include "ntp_filegen.h" #include "ntp_data_structures.h" #include "ntp_scanner.h" #include "ntp_config.h" #include "ntp_crypto.h" #include "ntpsim.h" /* HMS: Do we really want this all the time? */ /* SK: It might be a good idea to always include the simulator code. That way someone can use the same configuration file for both the simulator and the daemon */ struct FILE_INFO *ip_file; /* Pointer to the configuration file stream */ #define YYMALLOC emalloc #define YYFREE free #define YYERROR_VERBOSE #define YYMAXDEPTH 1000 /* stop the madness sooner */ void yyerror (char *msg); extern int input_from_file; /* 0=input from ntpq :config */ /* Line 189 of yacc.c */ #line 107 "ntp_parser.c" /* Enabling traces. */ #ifndef YYDEBUG # define YYDEBUG 1 #endif /* Enabling verbose error messages. */ #ifdef YYERROR_VERBOSE # undef YYERROR_VERBOSE # define YYERROR_VERBOSE 1 #else # define YYERROR_VERBOSE 0 #endif /* Enabling the token table. */ #ifndef YYTOKEN_TABLE # define YYTOKEN_TABLE 1 #endif /* Tokens. */ #ifndef YYTOKENTYPE # define YYTOKENTYPE /* Put the tokens into the symbol table, so that GDB and other debuggers know about them. */ enum yytokentype { T_Age = 258, T_All = 259, T_Allan = 260, T_Auth = 261, T_Autokey = 262, T_Automax = 263, T_Average = 264, T_Bclient = 265, T_Beacon = 266, T_Bias = 267, T_Broadcast = 268, T_Broadcastclient = 269, T_Broadcastdelay = 270, T_Burst = 271, T_Calibrate = 272, T_Calldelay = 273, T_Ceiling = 274, T_Clockstats = 275, T_Cohort = 276, T_ControlKey = 277, T_Crypto = 278, T_Cryptostats = 279, T_Day = 280, T_Default = 281, T_Digest = 282, T_Disable = 283, T_Discard = 284, T_Dispersion = 285, T_Double = 286, T_Driftfile = 287, T_Drop = 288, T_Ellipsis = 289, T_Enable = 290, T_End = 291, T_False = 292, T_File = 293, T_Filegen = 294, T_Flag1 = 295, T_Flag2 = 296, T_Flag3 = 297, T_Flag4 = 298, T_Flake = 299, T_Floor = 300, T_Freq = 301, T_Fudge = 302, T_Host = 303, T_Huffpuff = 304, T_Iburst = 305, T_Ident = 306, T_Ignore = 307, T_Incalloc = 308, T_Incmem = 309, T_Initalloc = 310, T_Initmem = 311, T_Includefile = 312, T_Integer = 313, T_Interface = 314, T_Ipv4 = 315, T_Ipv4_flag = 316, T_Ipv6 = 317, T_Ipv6_flag = 318, T_Kernel = 319, T_Key = 320, T_Keys = 321, T_Keysdir = 322, T_Kod = 323, T_Mssntp = 324, T_Leapfile = 325, T_Limited = 326, T_Link = 327, T_Listen = 328, T_Logconfig = 329, T_Logfile = 330, T_Loopstats = 331, T_Lowpriotrap = 332, T_Manycastclient = 333, T_Manycastserver = 334, T_Mask = 335, T_Maxage = 336, T_Maxclock = 337, T_Maxdepth = 338, T_Maxdist = 339, T_Maxmem = 340, T_Maxpoll = 341, T_Minclock = 342, T_Mindepth = 343, T_Mindist = 344, T_Minimum = 345, T_Minpoll = 346, T_Minsane = 347, T_Mode = 348, T_Monitor = 349, T_Month = 350, T_Mru = 351, T_Multicastclient = 352, T_Nic = 353, T_Nolink = 354, T_Nomodify = 355, T_None = 356, T_Nopeer = 357, T_Noquery = 358, T_Noselect = 359, T_Noserve = 360, T_Notrap = 361, T_Notrust = 362, T_Ntp = 363, T_Ntpport = 364, T_NtpSignDsocket = 365, T_Orphan = 366, T_Orphanwait = 367, T_Panic = 368, T_Peer = 369, T_Peerstats = 370, T_Phone = 371, T_Pid = 372, T_Pidfile = 373, T_Pool = 374, T_Port = 375, T_Preempt = 376, T_Prefer = 377, T_Protostats = 378, T_Pw = 379, T_Qos = 380, T_Randfile = 381, T_Rawstats = 382, T_Refid = 383, T_Requestkey = 384, T_Restrict = 385, T_Revoke = 386, T_Saveconfigdir = 387, T_Server = 388, T_Setvar = 389, T_Sign = 390, T_Source = 391, T_Statistics = 392, T_Stats = 393, T_Statsdir = 394, T_Step = 395, T_Stepout = 396, T_Stratum = 397, T_String = 398, T_Sysstats = 399, T_Tick = 400, T_Time1 = 401, T_Time2 = 402, T_Timingstats = 403, T_Tinker = 404, T_Tos = 405, T_Trap = 406, T_True = 407, T_Trustedkey = 408, T_Ttl = 409, T_Type = 410, T_Unconfig = 411, T_Unpeer = 412, T_Version = 413, T_WanderThreshold = 414, T_Week = 415, T_Wildcard = 416, T_Xleave = 417, T_Year = 418, T_Flag = 419, T_Void = 420, T_EOC = 421, T_Simulate = 422, T_Beep_Delay = 423, T_Sim_Duration = 424, T_Server_Offset = 425, T_Duration = 426, T_Freq_Offset = 427, T_Wander = 428, T_Jitter = 429, T_Prop_Delay = 430, T_Proc_Delay = 431 }; #endif /* Tokens. */ #define T_Age 258 #define T_All 259 #define T_Allan 260 #define T_Auth 261 #define T_Autokey 262 #define T_Automax 263 #define T_Average 264 #define T_Bclient 265 #define T_Beacon 266 #define T_Bias 267 #define T_Broadcast 268 #define T_Broadcastclient 269 #define T_Broadcastdelay 270 #define T_Burst 271 #define T_Calibrate 272 #define T_Calldelay 273 #define T_Ceiling 274 #define T_Clockstats 275 #define T_Cohort 276 #define T_ControlKey 277 #define T_Crypto 278 #define T_Cryptostats 279 #define T_Day 280 #define T_Default 281 #define T_Digest 282 #define T_Disable 283 #define T_Discard 284 #define T_Dispersion 285 #define T_Double 286 #define T_Driftfile 287 #define T_Drop 288 #define T_Ellipsis 289 #define T_Enable 290 #define T_End 291 #define T_False 292 #define T_File 293 #define T_Filegen 294 #define T_Flag1 295 #define T_Flag2 296 #define T_Flag3 297 #define T_Flag4 298 #define T_Flake 299 #define T_Floor 300 #define T_Freq 301 #define T_Fudge 302 #define T_Host 303 #define T_Huffpuff 304 #define T_Iburst 305 #define T_Ident 306 #define T_Ignore 307 #define T_Incalloc 308 #define T_Incmem 309 #define T_Initalloc 310 #define T_Initmem 311 #define T_Includefile 312 #define T_Integer 313 #define T_Interface 314 #define T_Ipv4 315 #define T_Ipv4_flag 316 #define T_Ipv6 317 #define T_Ipv6_flag 318 #define T_Kernel 319 #define T_Key 320 #define T_Keys 321 #define T_Keysdir 322 #define T_Kod 323 #define T_Mssntp 324 #define T_Leapfile 325 #define T_Limited 326 #define T_Link 327 #define T_Listen 328 #define T_Logconfig 329 #define T_Logfile 330 #define T_Loopstats 331 #define T_Lowpriotrap 332 #define T_Manycastclient 333 #define T_Manycastserver 334 #define T_Mask 335 #define T_Maxage 336 #define T_Maxclock 337 #define T_Maxdepth 338 #define T_Maxdist 339 #define T_Maxmem 340 #define T_Maxpoll 341 #define T_Minclock 342 #define T_Mindepth 343 #define T_Mindist 344 #define T_Minimum 345 #define T_Minpoll 346 #define T_Minsane 347 #define T_Mode 348 #define T_Monitor 349 #define T_Month 350 #define T_Mru 351 #define T_Multicastclient 352 #define T_Nic 353 #define T_Nolink 354 #define T_Nomodify 355 #define T_None 356 #define T_Nopeer 357 #define T_Noquery 358 #define T_Noselect 359 #define T_Noserve 360 #define T_Notrap 361 #define T_Notrust 362 #define T_Ntp 363 #define T_Ntpport 364 #define T_NtpSignDsocket 365 #define T_Orphan 366 #define T_Orphanwait 367 #define T_Panic 368 #define T_Peer 369 #define T_Peerstats 370 #define T_Phone 371 #define T_Pid 372 #define T_Pidfile 373 #define T_Pool 374 #define T_Port 375 #define T_Preempt 376 #define T_Prefer 377 #define T_Protostats 378 #define T_Pw 379 #define T_Qos 380 #define T_Randfile 381 #define T_Rawstats 382 #define T_Refid 383 #define T_Requestkey 384 #define T_Restrict 385 #define T_Revoke 386 #define T_Saveconfigdir 387 #define T_Server 388 #define T_Setvar 389 #define T_Sign 390 #define T_Source 391 #define T_Statistics 392 #define T_Stats 393 #define T_Statsdir 394 #define T_Step 395 #define T_Stepout 396 #define T_Stratum 397 #define T_String 398 #define T_Sysstats 399 #define T_Tick 400 #define T_Time1 401 #define T_Time2 402 #define T_Timingstats 403 #define T_Tinker 404 #define T_Tos 405 #define T_Trap 406 #define T_True 407 #define T_Trustedkey 408 #define T_Ttl 409 #define T_Type 410 #define T_Unconfig 411 #define T_Unpeer 412 #define T_Version 413 #define T_WanderThreshold 414 #define T_Week 415 #define T_Wildcard 416 #define T_Xleave 417 #define T_Year 418 #define T_Flag 419 #define T_Void 420 #define T_EOC 421 #define T_Simulate 422 #define T_Beep_Delay 423 #define T_Sim_Duration 424 #define T_Server_Offset 425 #define T_Duration 426 #define T_Freq_Offset 427 #define T_Wander 428 #define T_Jitter 429 #define T_Prop_Delay 430 #define T_Proc_Delay 431 #if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED typedef union YYSTYPE { /* Line 214 of yacc.c */ #line 50 "ntp_parser.y" char *String; double Double; int Integer; void *VoidPtr; queue *Queue; struct attr_val *Attr_val; struct address_node *Address_node; struct setvar_node *Set_var; /* Simulation types */ server_info *Sim_server; script_info *Sim_script; /* Line 214 of yacc.c */ #line 512 "ntp_parser.c" } YYSTYPE; # define YYSTYPE_IS_TRIVIAL 1 # define yystype YYSTYPE /* obsolescent; will be withdrawn */ # define YYSTYPE_IS_DECLARED 1 #endif /* Copy the second part of user declarations. */ /* Line 264 of yacc.c */ #line 524 "ntp_parser.c" #ifdef short # undef short #endif #ifdef YYTYPE_UINT8 typedef YYTYPE_UINT8 yytype_uint8; #else typedef unsigned char yytype_uint8; #endif #ifdef YYTYPE_INT8 typedef YYTYPE_INT8 yytype_int8; #elif (defined __STDC__ || defined __C99__FUNC__ \ || defined __cplusplus || defined _MSC_VER) typedef signed char yytype_int8; #else typedef short int yytype_int8; #endif #ifdef YYTYPE_UINT16 typedef YYTYPE_UINT16 yytype_uint16; #else typedef unsigned short int yytype_uint16; #endif #ifdef YYTYPE_INT16 typedef YYTYPE_INT16 yytype_int16; #else typedef short int yytype_int16; #endif #ifndef YYSIZE_T # ifdef __SIZE_TYPE__ # define YYSIZE_T __SIZE_TYPE__ # elif defined size_t # define YYSIZE_T size_t # elif ! defined YYSIZE_T && (defined __STDC__ || defined __C99__FUNC__ \ || defined __cplusplus || defined _MSC_VER) # include <stddef.h> /* INFRINGES ON USER NAME SPACE */ # define YYSIZE_T size_t # else # define YYSIZE_T unsigned int # endif #endif #define YYSIZE_MAXIMUM ((YYSIZE_T) -1) #ifndef YY_ # if YYENABLE_NLS # if ENABLE_NLS # include <libintl.h> /* INFRINGES ON USER NAME SPACE */ # define YY_(msgid) dgettext ("bison-runtime", msgid) # endif # endif # ifndef YY_ # define YY_(msgid) msgid # endif #endif /* Suppress unused-variable warnings by "using" E. */ #if ! defined lint || defined __GNUC__ # define YYUSE(e) ((void) (e)) #else # define YYUSE(e) /* empty */ #endif /* Identity function, used to suppress warnings about constant conditions. */ #ifndef lint # define YYID(n) (n) #else #if (defined __STDC__ || defined __C99__FUNC__ \ || defined __cplusplus || defined _MSC_VER) static int YYID (int yyi) #else static int YYID (yyi) int yyi; #endif { return yyi; } #endif #if ! defined yyoverflow || YYERROR_VERBOSE /* The parser invokes alloca or malloc; define the necessary symbols. */ # ifdef YYSTACK_USE_ALLOCA # if YYSTACK_USE_ALLOCA # ifdef __GNUC__ # define YYSTACK_ALLOC __builtin_alloca # elif defined __BUILTIN_VA_ARG_INCR # include <alloca.h> /* INFRINGES ON USER NAME SPACE */ # elif defined _AIX # define YYSTACK_ALLOC __alloca # elif defined _MSC_VER # include <malloc.h> /* INFRINGES ON USER NAME SPACE */ # define alloca _alloca # else # define YYSTACK_ALLOC alloca # if ! defined _ALLOCA_H && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \ || defined __cplusplus || defined _MSC_VER) # include <stdlib.h> /* INFRINGES ON USER NAME SPACE */ # ifndef _STDLIB_H # define _STDLIB_H 1 # endif # endif # endif # endif # endif # ifdef YYSTACK_ALLOC /* Pacify GCC's `empty if-body' warning. */ # define YYSTACK_FREE(Ptr) do { /* empty */; } while (YYID (0)) # ifndef YYSTACK_ALLOC_MAXIMUM /* The OS might guarantee only one guard page at the bottom of the stack, and a page size can be as small as 4096 bytes. So we cannot safely invoke alloca (N) if N exceeds 4096. Use a slightly smaller number to allow for a few compiler-allocated temporary stack slots. */ # define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */ # endif # else # define YYSTACK_ALLOC YYMALLOC # define YYSTACK_FREE YYFREE # ifndef YYSTACK_ALLOC_MAXIMUM # define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM # endif # if (defined __cplusplus && ! defined _STDLIB_H \ && ! ((defined YYMALLOC || defined malloc) \ && (defined YYFREE || defined free))) # include <stdlib.h> /* INFRINGES ON USER NAME SPACE */ # ifndef _STDLIB_H # define _STDLIB_H 1 # endif # endif # ifndef YYMALLOC # define YYMALLOC malloc # if ! defined malloc && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \ || defined __cplusplus || defined _MSC_VER) void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */ # endif # endif # ifndef YYFREE # define YYFREE free # if ! defined free && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \ || defined __cplusplus || defined _MSC_VER) void free (void *); /* INFRINGES ON USER NAME SPACE */ # endif # endif # endif #endif /* ! defined yyoverflow || YYERROR_VERBOSE */ #if (! defined yyoverflow \ && (! defined __cplusplus \ || (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL))) /* A type that is properly aligned for any stack member. */ union yyalloc { yytype_int16 yyss_alloc; YYSTYPE yyvs_alloc; }; /* The size of the maximum gap between one aligned stack and the next. */ # define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1) /* The size of an array large to enough to hold all stacks, each with N elements. */ # define YYSTACK_BYTES(N) \ ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \ + YYSTACK_GAP_MAXIMUM) /* Copy COUNT objects from FROM to TO. The source and destination do not overlap. */ # ifndef YYCOPY # if defined __GNUC__ && 1 < __GNUC__ # define YYCOPY(To, From, Count) \ __builtin_memcpy (To, From, (Count) * sizeof (*(From))) # else # define YYCOPY(To, From, Count) \ do \ { \ YYSIZE_T yyi; \ for (yyi = 0; yyi < (Count); yyi++) \ (To)[yyi] = (From)[yyi]; \ } \ while (YYID (0)) # endif # endif /* Relocate STACK from its old location to the new one. The local variables YYSIZE and YYSTACKSIZE give the old and new number of elements in the stack, and YYPTR gives the new location of the stack. Advance YYPTR to a properly aligned location for the next stack. */ # define YYSTACK_RELOCATE(Stack_alloc, Stack) \ do \ { \ YYSIZE_T yynewbytes; \ YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \ Stack = &yyptr->Stack_alloc; \ yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \ yyptr += yynewbytes / sizeof (*yyptr); \ } \ while (YYID (0)) #endif /* YYFINAL -- State number of the termination state. */ #define YYFINAL 190 /* YYLAST -- Last index in YYTABLE. */ #define YYLAST 718 /* YYNTOKENS -- Number of terminals. */ #define YYNTOKENS 182 /* YYNNTS -- Number of nonterminals. */ #define YYNNTS 73 /* YYNRULES -- Number of rules. */ #define YYNRULES 263 /* YYNRULES -- Number of states. */ #define YYNSTATES 417 /* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX. */ #define YYUNDEFTOK 2 #define YYMAXUTOK 431 #define YYTRANSLATE(YYX) \ ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK) /* YYTRANSLATE[YYLEX] -- Bison symbol number corresponding to YYLEX. */ static const yytype_uint8 yytranslate[] = { 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 178, 179, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 177, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 180, 2, 181, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176 }; #if YYDEBUG /* YYPRHS[YYN] -- Index of the first RHS symbol of rule number YYN in YYRHS. */ static const yytype_uint16 yyprhs[] = { 0, 0, 3, 5, 9, 12, 15, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 44, 47, 49, 51, 53, 55, 57, 59, 62, 65, 67, 70, 72, 74, 77, 79, 81, 84, 87, 90, 92, 94, 96, 98, 100, 103, 106, 109, 112, 114, 116, 118, 121, 124, 127, 130, 133, 136, 139, 142, 145, 148, 151, 153, 154, 157, 159, 162, 165, 168, 171, 174, 177, 180, 183, 186, 188, 191, 194, 197, 200, 203, 206, 209, 212, 215, 218, 221, 224, 227, 231, 234, 236, 238, 240, 242, 244, 246, 248, 250, 252, 255, 257, 260, 263, 265, 267, 269, 271, 273, 275, 277, 279, 281, 283, 285, 288, 291, 295, 301, 305, 310, 315, 319, 320, 323, 325, 327, 329, 331, 333, 335, 337, 339, 341, 343, 345, 347, 349, 351, 354, 356, 359, 362, 365, 368, 370, 373, 376, 379, 382, 385, 388, 391, 394, 398, 401, 403, 406, 409, 412, 415, 418, 421, 424, 427, 430, 433, 436, 438, 440, 442, 444, 446, 448, 450, 452, 455, 458, 460, 463, 466, 469, 472, 475, 478, 481, 483, 487, 489, 492, 495, 498, 501, 504, 507, 510, 513, 516, 519, 522, 525, 529, 532, 535, 537, 540, 541, 546, 550, 553, 555, 558, 561, 564, 566, 568, 572, 576, 578, 580, 582, 584, 586, 588, 590, 592, 594, 597, 599, 602, 604, 606, 608, 614, 617, 619, 622, 624, 626, 628, 630, 632, 634, 640, 642, 646, 649, 653, 657, 660, 662, 668, 673, 677, 680, 682, 689, 693, 696, 700, 704, 708, 712 }; /* YYRHS -- A `-1'-separated list of the rules' RHS. */ static const yytype_int16 yyrhs[] = { 183, 0, -1, 184, -1, 184, 185, 166, -1, 185, 166, -1, 1, 166, -1, -1, 186, -1, 192, -1, 194, -1, 195, -1, 202, -1, 208, -1, 199, -1, 215, -1, 218, -1, 221, -1, 224, -1, 243, -1, 187, 188, 190, -1, 187, 188, -1, 133, -1, 119, -1, 114, -1, 13, -1, 78, -1, 189, -1, 61, 143, -1, 63, 143, -1, 143, -1, 190, 191, -1, 191, -1, 7, -1, 12, 242, -1, 16, -1, 50, -1, 65, 58, -1, 91, 58, -1, 86, 58, -1, 104, -1, 121, -1, 122, -1, 152, -1, 162, -1, 154, 58, -1, 93, 58, -1, 158, 58, -1, 193, 188, -1, 156, -1, 157, -1, 14, -1, 79, 240, -1, 97, 240, -1, 8, 58, -1, 22, 58, -1, 23, 196, -1, 66, 143, -1, 67, 143, -1, 129, 58, -1, 131, 58, -1, 153, 236, -1, 110, 143, -1, 197, -1, -1, 197, 198, -1, 198, -1, 48, 143, -1, 51, 143, -1, 124, 143, -1, 126, 143, -1, 135, 143, -1, 27, 143, -1, 131, 58, -1, 150, 200, -1, 200, 201, -1, 201, -1, 19, 58, -1, 45, 58, -1, 21, 241, -1, 111, 58, -1, 112, 58, -1, 89, 242, -1, 84, 242, -1, 87, 242, -1, 82, 242, -1, 92, 58, -1, 11, 58, -1, 137, 203, -1, 139, 143, -1, 39, 204, 205, -1, 203, 204, -1, 204, -1, 20, -1, 24, -1, 76, -1, 115, -1, 127, -1, 144, -1, 148, -1, 123, -1, 205, 206, -1, 206, -1, 38, 143, -1, 155, 207, -1, 72, -1, 99, -1, 35, -1, 28, -1, 101, -1, 117, -1, 25, -1, 160, -1, 95, -1, 163, -1, 3, -1, 29, 211, -1, 96, 213, -1, 130, 188, 209, -1, 130, 189, 80, 189, 209, -1, 130, 26, 209, -1, 130, 61, 26, 209, -1, 130, 63, 26, 209, -1, 130, 136, 209, -1, -1, 209, 210, -1, 44, -1, 52, -1, 68, -1, 69, -1, 71, -1, 77, -1, 100, -1, 102, -1, 103, -1, 105, -1, 106, -1, 107, -1, 109, -1, 158, -1, 211, 212, -1, 212, -1, 9, 58, -1, 90, 58, -1, 94, 58, -1, 213, 214, -1, 214, -1, 53, 58, -1, 54, 58, -1, 55, 58, -1, 56, 58, -1, 81, 58, -1, 83, 58, -1, 85, 58, -1, 88, 58, -1, 47, 188, 216, -1, 216, 217, -1, 217, -1, 146, 242, -1, 147, 242, -1, 142, 58, -1, 128, 143, -1, 40, 241, -1, 41, 241, -1, 42, 241, -1, 43, 241, -1, 35, 219, -1, 28, 219, -1, 219, 220, -1, 220, -1, 6, -1, 10, -1, 17, -1, 64, -1, 94, -1, 108, -1, 138, -1, 149, 222, -1, 222, 223, -1, 223, -1, 5, 242, -1, 30, 242, -1, 46, 242, -1, 49, 242, -1, 113, 242, -1, 140, 242, -1, 141, 242, -1, 231, -1, 57, 143, 185, -1, 36, -1, 15, 242, -1, 18, 58, -1, 145, 242, -1, 32, 225, -1, 70, 143, -1, 118, 143, -1, 75, 143, -1, 74, 229, -1, 116, 239, -1, 132, 143, -1, 134, 226, -1, 151, 189, -1, 151, 189, 227, -1, 154, 235, -1, 125, 143, -1, 143, -1, 143, 31, -1, -1, 143, 177, 143, 26, -1, 143, 177, 143, -1, 227, 228, -1, 228, -1, 120, 58, -1, 59, 189, -1, 229, 230, -1, 230, -1, 143, -1, 232, 234, 233, -1, 232, 234, 143, -1, 59, -1, 98, -1, 4, -1, 60, -1, 62, -1, 161, -1, 73, -1, 52, -1, 33, -1, 235, 58, -1, 58, -1, 236, 237, -1, 237, -1, 58, -1, 238, -1, 178, 58, 34, 58, 179, -1, 239, 143, -1, 143, -1, 240, 188, -1, 188, -1, 58, -1, 152, -1, 37, -1, 58, -1, 31, -1, 244, 180, 245, 247, 181, -1, 167, -1, 245, 246, 166, -1, 246, 166, -1, 168, 177, 242, -1, 169, 177, 242, -1, 247, 248, -1, 248, -1, 250, 180, 249, 251, 181, -1, 170, 177, 242, 166, -1, 133, 177, 188, -1, 251, 252, -1, 252, -1, 171, 177, 242, 180, 253, 181, -1, 253, 254, 166, -1, 254, 166, -1, 172, 177, 242, -1, 173, 177, 242, -1, 174, 177, 242, -1, 175, 177, 242, -1, 176, 177, 242, -1 }; /* YYRLINE[YYN] -- source line where rule number YYN was defined. */ static const yytype_uint16 yyrline[] = { 0, 313, 313, 317, 318, 319, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 353, 359, 368, 369, 370, 371, 372, 376, 377, 378, 382, 386, 387, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 414, 422, 423, 433, 435, 437, 448, 450, 452, 457, 459, 461, 463, 465, 467, 472, 474, 478, 485, 495, 497, 499, 501, 503, 505, 507, 524, 529, 530, 534, 536, 538, 540, 542, 544, 546, 548, 550, 552, 554, 564, 566, 575, 583, 584, 588, 589, 590, 591, 592, 593, 594, 595, 599, 606, 616, 626, 635, 644, 653, 654, 658, 659, 660, 661, 662, 663, 664, 673, 677, 681, 686, 691, 696, 709, 722, 734, 735, 740, 741, 742, 743, 744, 745, 746, 747, 748, 749, 750, 751, 752, 753, 757, 759, 764, 765, 766, 770, 772, 777, 778, 779, 780, 781, 782, 783, 784, 792, 797, 799, 804, 805, 806, 807, 808, 809, 810, 811, 819, 821, 826, 833, 843, 844, 845, 846, 847, 848, 849, 865, 869, 870, 874, 875, 876, 877, 878, 879, 880, 889, 890, 906, 912, 914, 916, 918, 920, 923, 925, 936, 938, 940, 950, 952, 954, 956, 958, 963, 965, 969, 973, 975, 980, 982, 986, 987, 991, 992, 996, 1011, 1016, 1024, 1025, 1029, 1030, 1031, 1032, 1036, 1037, 1038, 1048, 1049, 1053, 1055, 1060, 1062, 1066, 1071, 1072, 1076, 1077, 1081, 1090, 1091, 1095, 1096, 1105, 1120, 1124, 1125, 1129, 1130, 1134, 1135, 1139, 1144, 1148, 1152, 1153, 1157, 1162, 1163, 1167, 1169, 1171, 1173, 1175 }; #endif #if YYDEBUG || YYERROR_VERBOSE || YYTOKEN_TABLE /* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM. First, the terminals, then, starting at YYNTOKENS, nonterminals. */ static const char *const yytname[] = { "$end", "error", "$undefined", "T_Age", "T_All", "T_Allan", "T_Auth", "T_Autokey", "T_Automax", "T_Average", "T_Bclient", "T_Beacon", "T_Bias", "T_Broadcast", "T_Broadcastclient", "T_Broadcastdelay", "T_Burst", "T_Calibrate", "T_Calldelay", "T_Ceiling", "T_Clockstats", "T_Cohort", "T_ControlKey", "T_Crypto", "T_Cryptostats", "T_Day", "T_Default", "T_Digest", "T_Disable", "T_Discard", "T_Dispersion", "T_Double", "T_Driftfile", "T_Drop", "T_Ellipsis", "T_Enable", "T_End", "T_False", "T_File", "T_Filegen", "T_Flag1", "T_Flag2", "T_Flag3", "T_Flag4", "T_Flake", "T_Floor", "T_Freq", "T_Fudge", "T_Host", "T_Huffpuff", "T_Iburst", "T_Ident", "T_Ignore", "T_Incalloc", "T_Incmem", "T_Initalloc", "T_Initmem", "T_Includefile", "T_Integer", "T_Interface", "T_Ipv4", "T_Ipv4_flag", "T_Ipv6", "T_Ipv6_flag", "T_Kernel", "T_Key", "T_Keys", "T_Keysdir", "T_Kod", "T_Mssntp", "T_Leapfile", "T_Limited", "T_Link", "T_Listen", "T_Logconfig", "T_Logfile", "T_Loopstats", "T_Lowpriotrap", "T_Manycastclient", "T_Manycastserver", "T_Mask", "T_Maxage", "T_Maxclock", "T_Maxdepth", "T_Maxdist", "T_Maxmem", "T_Maxpoll", "T_Minclock", "T_Mindepth", "T_Mindist", "T_Minimum", "T_Minpoll", "T_Minsane", "T_Mode", "T_Monitor", "T_Month", "T_Mru", "T_Multicastclient", "T_Nic", "T_Nolink", "T_Nomodify", "T_None", "T_Nopeer", "T_Noquery", "T_Noselect", "T_Noserve", "T_Notrap", "T_Notrust", "T_Ntp", "T_Ntpport", "T_NtpSignDsocket", "T_Orphan", "T_Orphanwait", "T_Panic", "T_Peer", "T_Peerstats", "T_Phone", "T_Pid", "T_Pidfile", "T_Pool", "T_Port", "T_Preempt", "T_Prefer", "T_Protostats", "T_Pw", "T_Qos", "T_Randfile", "T_Rawstats", "T_Refid", "T_Requestkey", "T_Restrict", "T_Revoke", "T_Saveconfigdir", "T_Server", "T_Setvar", "T_Sign", "T_Source", "T_Statistics", "T_Stats", "T_Statsdir", "T_Step", "T_Stepout", "T_Stratum", "T_String", "T_Sysstats", "T_Tick", "T_Time1", "T_Time2", "T_Timingstats", "T_Tinker", "T_Tos", "T_Trap", "T_True", "T_Trustedkey", "T_Ttl", "T_Type", "T_Unconfig", "T_Unpeer", "T_Version", "T_WanderThreshold", "T_Week", "T_Wildcard", "T_Xleave", "T_Year", "T_Flag", "T_Void", "T_EOC", "T_Simulate", "T_Beep_Delay", "T_Sim_Duration", "T_Server_Offset", "T_Duration", "T_Freq_Offset", "T_Wander", "T_Jitter", "T_Prop_Delay", "T_Proc_Delay", "'='", "'('", "')'", "'{'", "'}'", "$accept", "configuration", "command_list", "command", "server_command", "client_type", "address", "ip_address", "option_list", "option", "unpeer_command", "unpeer_keyword", "other_mode_command", "authentication_command", "crypto_command_line", "crypto_command_list", "crypto_command", "orphan_mode_command", "tos_option_list", "tos_option", "monitoring_command", "stats_list", "stat", "filegen_option_list", "filegen_option", "filegen_type", "access_control_command", "ac_flag_list", "access_control_flag", "discard_option_list", "discard_option", "mru_option_list", "mru_option", "fudge_command", "fudge_factor_list", "fudge_factor", "system_option_command", "system_option_list", "system_option", "tinker_command", "tinker_option_list", "tinker_option", "miscellaneous_command", "drift_parm", "variable_assign", "trap_option_list", "trap_option", "log_config_list", "log_config_command", "interface_command", "interface_nic", "nic_rule_class", "nic_rule_action", "integer_list", "integer_list_range", "integer_list_range_elt", "integer_range", "string_list", "address_list", "boolean", "number", "simulate_command", "sim_conf_start", "sim_init_statement_list", "sim_init_statement", "sim_server_list", "sim_server", "sim_server_offset", "sim_server_name", "sim_act_list", "sim_act", "sim_act_stmt_list", "sim_act_stmt", 0 }; #endif # ifdef YYPRINT /* YYTOKNUM[YYLEX-NUM] -- Internal token number corresponding to token YYLEX-NUM. */ static const yytype_uint16 yytoknum[] = { 0, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 61, 40, 41, 123, 125 }; # endif /* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */ static const yytype_uint8 yyr1[] = { 0, 182, 183, 184, 184, 184, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 186, 186, 187, 187, 187, 187, 187, 188, 188, 188, 189, 190, 190, 191, 191, 191, 191, 191, 191, 191, 191, 191, 191, 191, 191, 191, 191, 191, 192, 193, 193, 194, 194, 194, 195, 195, 195, 195, 195, 195, 195, 195, 195, 196, 196, 197, 197, 198, 198, 198, 198, 198, 198, 198, 199, 200, 200, 201, 201, 201, 201, 201, 201, 201, 201, 201, 201, 201, 202, 202, 202, 203, 203, 204, 204, 204, 204, 204, 204, 204, 204, 205, 205, 206, 206, 206, 206, 206, 206, 207, 207, 207, 207, 207, 207, 207, 208, 208, 208, 208, 208, 208, 208, 208, 209, 209, 210, 210, 210, 210, 210, 210, 210, 210, 210, 210, 210, 210, 210, 210, 211, 211, 212, 212, 212, 213, 213, 214, 214, 214, 214, 214, 214, 214, 214, 215, 216, 216, 217, 217, 217, 217, 217, 217, 217, 217, 218, 218, 219, 219, 220, 220, 220, 220, 220, 220, 220, 221, 222, 222, 223, 223, 223, 223, 223, 223, 223, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 225, 225, 225, 226, 226, 227, 227, 228, 228, 229, 229, 230, 231, 231, 232, 232, 233, 233, 233, 233, 234, 234, 234, 235, 235, 236, 236, 237, 237, 238, 239, 239, 240, 240, 241, 241, 241, 242, 242, 243, 244, 245, 245, 246, 246, 247, 247, 248, 249, 250, 251, 251, 252, 253, 253, 254, 254, 254, 254, 254 }; /* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN. */ static const yytype_uint8 yyr2[] = { 0, 2, 1, 3, 2, 2, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 2, 1, 1, 1, 1, 1, 1, 2, 2, 1, 2, 1, 1, 2, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 0, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 5, 3, 4, 4, 3, 0, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 1, 3, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 2, 1, 2, 0, 4, 3, 2, 1, 2, 2, 2, 1, 1, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 1, 1, 5, 2, 1, 2, 1, 1, 1, 1, 1, 1, 5, 1, 3, 2, 3, 3, 2, 1, 5, 4, 3, 2, 1, 6, 3, 2, 3, 3, 3, 3, 3 }; /* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state STATE-NUM when YYTABLE doesn't specify something else to do. Zero means the default is an error. */ static const yytype_uint16 yydefact[] = { 0, 0, 0, 24, 50, 0, 0, 0, 63, 0, 0, 206, 0, 188, 0, 0, 0, 218, 0, 0, 0, 0, 0, 25, 0, 0, 0, 219, 0, 23, 0, 0, 22, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 48, 49, 244, 0, 2, 0, 7, 0, 8, 0, 9, 10, 13, 11, 12, 14, 15, 16, 17, 186, 0, 18, 0, 5, 53, 242, 241, 189, 190, 54, 0, 0, 0, 0, 0, 0, 0, 55, 62, 65, 169, 170, 171, 172, 173, 174, 175, 166, 168, 0, 0, 0, 115, 140, 204, 192, 165, 92, 93, 94, 95, 99, 96, 97, 98, 0, 0, 0, 29, 0, 26, 6, 56, 57, 193, 215, 196, 214, 195, 237, 51, 0, 0, 0, 0, 0, 0, 0, 0, 116, 145, 52, 61, 235, 197, 194, 203, 58, 123, 0, 0, 123, 123, 26, 59, 198, 0, 199, 87, 91, 88, 191, 0, 0, 0, 0, 0, 0, 0, 176, 178, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 73, 75, 200, 231, 0, 60, 230, 232, 228, 202, 1, 0, 4, 20, 47, 226, 225, 224, 0, 0, 71, 66, 67, 68, 69, 72, 70, 64, 167, 141, 142, 143, 139, 205, 107, 106, 0, 104, 105, 0, 89, 101, 27, 28, 0, 0, 0, 0, 0, 0, 0, 0, 154, 156, 187, 213, 236, 146, 147, 148, 149, 150, 151, 152, 153, 144, 234, 119, 123, 123, 122, 117, 0, 0, 90, 179, 180, 181, 182, 183, 184, 185, 177, 86, 76, 240, 238, 239, 78, 77, 84, 82, 83, 81, 85, 79, 80, 74, 0, 0, 201, 210, 0, 229, 227, 3, 32, 0, 34, 35, 0, 0, 0, 0, 39, 40, 41, 42, 0, 0, 43, 19, 31, 220, 221, 222, 217, 223, 216, 0, 0, 0, 0, 102, 114, 110, 112, 108, 109, 111, 113, 103, 100, 161, 162, 163, 164, 160, 159, 157, 158, 155, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 124, 120, 121, 123, 208, 212, 211, 209, 0, 33, 36, 38, 37, 45, 44, 46, 30, 0, 0, 0, 0, 0, 250, 0, 246, 118, 207, 0, 247, 248, 0, 245, 243, 249, 0, 233, 253, 0, 0, 0, 0, 0, 255, 0, 0, 251, 254, 252, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 256, 0, 258, 259, 260, 261, 262, 263, 257 }; /* YYDEFGOTO[NTERM-NUM]. */ static const yytype_int16 yydefgoto[] = { -1, 51, 52, 53, 54, 55, 127, 118, 301, 302, 56, 57, 58, 59, 85, 86, 87, 60, 180, 181, 61, 156, 113, 220, 221, 321, 62, 247, 346, 100, 101, 137, 138, 63, 232, 233, 64, 95, 96, 65, 167, 168, 66, 103, 155, 280, 281, 124, 125, 67, 68, 308, 198, 189, 185, 186, 187, 142, 128, 268, 75, 69, 70, 311, 312, 367, 368, 384, 369, 387, 388, 401, 402 }; /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing STATE-NUM. */ #define YYPACT_NINF -148 static const yytype_int16 yypact[] = { 137, -132, -20, -148, -148, -7, -17, -16, 44, -1, 9, -99, -1, -148, 115, -46, -98, -148, -97, -95, -94, -90, -88, -148, -46, 144, -46, -148, -85, -148, -84, -78, -148, -77, -2, 13, 12, -71, -148, -70, 115, -66, -7, 1, 538, -65, -51, 32, -148, -148, -148, 91, 310, -68, -148, -46, -148, -46, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -12, -148, -80, -148, -148, -148, -148, -148, -148, -148, -47, -39, -38, -28, -27, 55, -26, -148, 44, -148, -148, -148, -148, -148, -148, -148, -148, -1, -148, 71, 72, 86, 9, -148, 117, -148, -1, -148, -148, -148, -148, -148, -148, -148, -148, -15, 10, 14, -148, 39, -148, 457, -148, -148, -148, -148, -90, -148, -148, -148, -46, 96, 104, 105, 106, 119, 120, 122, 124, 144, -148, -46, -148, -148, 40, -148, -148, -148, -148, 3, 4, -148, -148, 107, -148, -148, 15, -148, 115, -148, -148, -148, -7, -7, -7, -7, -7, -7, -7, 1, -148, 132, 135, 6, 143, -7, -7, -7, -7, 147, 148, 150, 538, -148, -37, -148, 151, -51, -148, -148, -148, 152, -148, 29, -148, 530, -148, -148, -148, -148, 0, -142, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, 70, -148, -148, 11, -15, -148, -148, -148, 6, 6, 6, 6, 74, 156, -7, -7, 39, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, 560, -148, -148, 560, 560, -65, 81, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -65, 168, -37, -148, 194, -148, -148, -148, -148, -7, -148, -148, 173, 178, 179, 181, -148, -148, -148, -148, 182, 183, -148, 530, -148, -148, -148, -148, -148, -148, -148, 66, 69, -100, 82, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, -148, 560, 560, -148, 223, -148, -148, -148, 192, -148, -148, -148, -148, -148, -148, -148, -148, -7, -7, 75, 88, -114, -148, 77, -148, 560, -148, 79, -148, -148, -46, -148, -148, -148, 90, -148, -148, 84, 93, -7, 95, -146, -148, 99, -7, -148, -148, -148, 97, 47, 98, 101, 102, 103, 108, -87, 118, -7, -7, -7, -7, -7, -148, 123, -148, -148, -148, -148, -148, -148, -148 }; /* YYPGOTO[NTERM-NUM]. */ static const yytype_int16 yypgoto[] = { -148, -148, -148, -44, -148, -148, -3, -34, -148, -18, -148, -148, -148, -148, -148, -148, 187, -148, -148, 112, -148, -148, -30, -148, 61, -148, -148, -147, -148, -148, 195, -148, 159, -148, -148, 65, -148, 286, -67, -148, -148, 133, -148, -148, -148, -148, 19, -148, 177, -148, -148, -148, -148, -148, -148, 121, -148, -148, 276, -116, -42, -148, -148, -148, -6, -148, -60, -148, -148, -148, -79, -148, -92 }; /* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If positive, shift that token. If negative, reduce the rule which number is the opposite. If zero, do what YYDEFACT says. If YYTABLE_NINF, syntax error. */ #define YYTABLE_NINF -7 static const yytype_int16 yytable[] = { 159, 151, 250, 251, 303, 88, 160, 183, 191, 89, 157, 182, 117, 214, 314, 114, 90, 115, 97, 365, 215, 195, 278, 216, 73, 386, 309, 310, 208, 248, 249, 161, 150, 365, 71, 391, 315, 208, 72, 146, 196, 76, 77, 265, 102, 119, 120, 162, 121, 122, 163, 74, 193, 123, 194, 126, 145, 217, 140, 141, 304, 197, 305, 91, 266, 143, 144, 378, 309, 310, 152, 78, 153, 154, 147, 234, 148, 158, 116, 224, 225, 226, 227, 279, 218, 396, 397, 398, 399, 400, 188, 190, 79, 92, 408, 80, 200, 116, 192, 98, 199, 347, 348, 99, 201, 202, 316, 93, 323, 324, 325, 326, 317, 205, 164, 203, 204, 206, 255, 256, 257, 258, 259, 260, 261, 236, 254, 184, 318, 209, 210, 270, 271, 272, 273, 105, 236, 94, 1, 106, 219, 165, 166, 306, 211, 2, 222, 223, 213, 149, 3, 4, 5, 222, 237, 6, 116, 223, 267, 7, 8, 307, 238, 239, 240, 9, 10, 228, 81, 11, 82, 319, 12, 13, 320, 83, 14, 241, 242, 84, 243, 229, 244, 246, 15, 230, 231, 252, 329, 330, 263, 107, 253, 264, 16, 285, 17, 129, 130, 131, 132, 269, 371, 18, 19, 274, 275, 20, 276, 282, 284, 21, 22, 313, 328, 23, 24, 327, 349, 396, 397, 398, 399, 400, 350, 133, 352, 134, 354, 135, 108, 356, 136, 25, 26, 27, 357, 358, 109, 359, 360, 361, 110, 363, 351, 355, 364, 28, 370, 372, 373, 29, 376, 30, 377, 31, 32, 380, 381, 111, 383, 385, 33, 112, 386, 393, 34, 35, 36, 37, 38, 39, 390, 207, 40, 403, 41, 395, 404, 405, 406, 322, 42, 362, 410, 407, 43, 44, 45, 416, 46, 47, 277, 48, 49, 212, 245, 331, 104, 353, 262, 235, 139, -6, 50, 366, 283, 379, 392, 409, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 374, 375, 3, 4, 5, 0, 0, 6, 0, 0, 0, 7, 8, 0, 0, 0, 0, 9, 10, 0, 0, 11, 389, 0, 12, 13, 0, 394, 14, 0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 0, 411, 412, 413, 414, 415, 0, 16, 0, 17, 0, 0, 0, 382, 0, 0, 18, 19, 0, 0, 20, 0, 0, 0, 21, 22, 0, 0, 23, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 25, 26, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 0, 0, 0, 29, 0, 30, 0, 31, 32, 0, 0, 0, 0, 0, 33, 0, 0, 0, 34, 35, 36, 37, 38, 39, 0, 0, 40, 0, 41, 0, 0, 0, 0, 0, 42, 0, 0, 0, 43, 44, 45, 0, 46, 47, 2, 48, 49, 0, 0, 3, 4, 5, 0, 0, 6, -6, 50, 0, 7, 8, 0, 0, 0, 0, 9, 10, 0, 0, 11, 0, 0, 12, 13, 0, 0, 14, 0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 17, 0, 0, 0, 0, 0, 0, 18, 19, 0, 0, 20, 0, 0, 0, 21, 22, 0, 0, 23, 24, 286, 0, 0, 0, 0, 287, 0, 0, 0, 288, 0, 0, 169, 0, 0, 0, 25, 26, 27, 0, 170, 0, 171, 0, 0, 0, 0, 0, 0, 0, 28, 0, 0, 0, 29, 0, 30, 0, 31, 32, 0, 0, 0, 289, 0, 33, 172, 0, 0, 34, 35, 36, 37, 38, 39, 0, 0, 40, 290, 41, 0, 0, 0, 0, 0, 42, 0, 332, 0, 43, 44, 45, 0, 46, 47, 333, 48, 49, 0, 291, 0, 0, 0, 173, 292, 174, 293, 50, 175, 0, 176, 334, 335, 177, 336, 0, 0, 294, 0, 0, 337, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 178, 179, 295, 296, 0, 0, 0, 0, 0, 0, 0, 338, 0, 339, 340, 0, 341, 342, 343, 0, 344, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 297, 0, 298, 0, 0, 0, 299, 0, 0, 0, 300, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 345 }; static const yytype_int16 yycheck[] = { 42, 35, 149, 150, 4, 6, 5, 58, 52, 10, 40, 45, 15, 28, 3, 61, 17, 63, 9, 133, 35, 33, 59, 38, 31, 171, 168, 169, 95, 26, 26, 30, 35, 133, 166, 181, 25, 104, 58, 26, 52, 58, 58, 37, 143, 143, 143, 46, 143, 143, 49, 58, 55, 143, 57, 143, 58, 72, 143, 143, 60, 73, 62, 64, 58, 143, 143, 181, 168, 169, 58, 27, 143, 143, 61, 119, 63, 143, 143, 40, 41, 42, 43, 120, 99, 172, 173, 174, 175, 176, 58, 0, 48, 94, 181, 51, 143, 143, 166, 90, 180, 248, 249, 94, 143, 143, 95, 108, 224, 225, 226, 227, 101, 58, 113, 143, 143, 143, 160, 161, 162, 163, 164, 165, 166, 128, 156, 178, 117, 58, 58, 173, 174, 175, 176, 20, 139, 138, 1, 24, 155, 140, 141, 143, 58, 8, 143, 143, 31, 136, 13, 14, 15, 143, 58, 18, 143, 143, 152, 22, 23, 161, 58, 58, 58, 28, 29, 128, 124, 32, 126, 160, 35, 36, 163, 131, 39, 58, 58, 135, 58, 142, 58, 143, 47, 146, 147, 80, 230, 231, 58, 76, 177, 58, 57, 166, 59, 53, 54, 55, 56, 58, 349, 66, 67, 58, 58, 70, 58, 58, 58, 74, 75, 143, 58, 78, 79, 143, 252, 172, 173, 174, 175, 176, 143, 81, 58, 83, 34, 85, 115, 58, 88, 96, 97, 98, 58, 58, 123, 58, 58, 58, 127, 177, 278, 287, 177, 110, 166, 26, 58, 114, 177, 116, 166, 118, 119, 180, 179, 144, 170, 177, 125, 148, 171, 166, 129, 130, 131, 132, 133, 134, 177, 86, 137, 177, 139, 180, 177, 177, 177, 220, 145, 301, 166, 177, 149, 150, 151, 166, 153, 154, 180, 156, 157, 100, 137, 232, 12, 280, 167, 124, 26, 166, 167, 311, 185, 367, 387, 401, -1, -1, -1, -1, -1, -1, -1, -1, 8, -1, -1, 363, 364, 13, 14, 15, -1, -1, 18, -1, -1, -1, 22, 23, -1, -1, -1, -1, 28, 29, -1, -1, 32, 385, -1, 35, 36, -1, 390, 39, -1, -1, -1, -1, -1, -1, -1, 47, -1, -1, -1, 403, 404, 405, 406, 407, -1, 57, -1, 59, -1, -1, -1, 376, -1, -1, 66, 67, -1, -1, 70, -1, -1, -1, 74, 75, -1, -1, 78, 79, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 96, 97, 98, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 110, -1, -1, -1, 114, -1, 116, -1, 118, 119, -1, -1, -1, -1, -1, 125, -1, -1, -1, 129, 130, 131, 132, 133, 134, -1, -1, 137, -1, 139, -1, -1, -1, -1, -1, 145, -1, -1, -1, 149, 150, 151, -1, 153, 154, 8, 156, 157, -1, -1, 13, 14, 15, -1, -1, 18, 166, 167, -1, 22, 23, -1, -1, -1, -1, 28, 29, -1, -1, 32, -1, -1, 35, 36, -1, -1, 39, -1, -1, -1, -1, -1, -1, -1, 47, -1, -1, -1, -1, -1, -1, -1, -1, -1, 57, -1, 59, -1, -1, -1, -1, -1, -1, 66, 67, -1, -1, 70, -1, -1, -1, 74, 75, -1, -1, 78, 79, 7, -1, -1, -1, -1, 12, -1, -1, -1, 16, -1, -1, 11, -1, -1, -1, 96, 97, 98, -1, 19, -1, 21, -1, -1, -1, -1, -1, -1, -1, 110, -1, -1, -1, 114, -1, 116, -1, 118, 119, -1, -1, -1, 50, -1, 125, 45, -1, -1, 129, 130, 131, 132, 133, 134, -1, -1, 137, 65, 139, -1, -1, -1, -1, -1, 145, -1, 44, -1, 149, 150, 151, -1, 153, 154, 52, 156, 157, -1, 86, -1, -1, -1, 82, 91, 84, 93, 167, 87, -1, 89, 68, 69, 92, 71, -1, -1, 104, -1, -1, 77, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 111, 112, 121, 122, -1, -1, -1, -1, -1, -1, -1, 100, -1, 102, 103, -1, 105, 106, 107, -1, 109, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 152, -1, 154, -1, -1, -1, 158, -1, -1, -1, 162, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 158 }; /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing symbol of state STATE-NUM. */ static const yytype_uint8 yystos[] = { 0, 1, 8, 13, 14, 15, 18, 22, 23, 28, 29, 32, 35, 36, 39, 47, 57, 59, 66, 67, 70, 74, 75, 78, 79, 96, 97, 98, 110, 114, 116, 118, 119, 125, 129, 130, 131, 132, 133, 134, 137, 139, 145, 149, 150, 151, 153, 154, 156, 157, 167, 183, 184, 185, 186, 187, 192, 193, 194, 195, 199, 202, 208, 215, 218, 221, 224, 231, 232, 243, 244, 166, 58, 31, 58, 242, 58, 58, 27, 48, 51, 124, 126, 131, 135, 196, 197, 198, 6, 10, 17, 64, 94, 108, 138, 219, 220, 9, 90, 94, 211, 212, 143, 225, 219, 20, 24, 76, 115, 123, 127, 144, 148, 204, 61, 63, 143, 188, 189, 143, 143, 143, 143, 143, 229, 230, 143, 188, 240, 53, 54, 55, 56, 81, 83, 85, 88, 213, 214, 240, 143, 143, 239, 143, 143, 58, 26, 61, 63, 136, 188, 189, 58, 143, 143, 226, 203, 204, 143, 242, 5, 30, 46, 49, 113, 140, 141, 222, 223, 11, 19, 21, 45, 82, 84, 87, 89, 92, 111, 112, 200, 201, 189, 58, 178, 236, 237, 238, 58, 235, 0, 185, 166, 188, 188, 33, 52, 73, 234, 180, 143, 143, 143, 143, 143, 58, 143, 198, 220, 58, 58, 58, 212, 31, 28, 35, 38, 72, 99, 155, 205, 206, 143, 143, 40, 41, 42, 43, 128, 142, 146, 147, 216, 217, 185, 230, 188, 58, 58, 58, 58, 58, 58, 58, 58, 214, 143, 209, 26, 26, 209, 209, 80, 177, 204, 242, 242, 242, 242, 242, 242, 242, 223, 58, 58, 37, 58, 152, 241, 58, 242, 242, 242, 242, 58, 58, 58, 201, 59, 120, 227, 228, 58, 237, 58, 166, 7, 12, 16, 50, 65, 86, 91, 93, 104, 121, 122, 152, 154, 158, 162, 190, 191, 4, 60, 62, 143, 161, 233, 168, 169, 245, 246, 143, 3, 25, 95, 101, 117, 160, 163, 207, 206, 241, 241, 241, 241, 143, 58, 242, 242, 217, 44, 52, 68, 69, 71, 77, 100, 102, 103, 105, 106, 107, 109, 158, 210, 209, 209, 189, 143, 189, 58, 228, 34, 242, 58, 58, 58, 58, 58, 58, 191, 177, 177, 133, 246, 247, 248, 250, 166, 209, 26, 58, 242, 242, 177, 166, 181, 248, 180, 179, 188, 170, 249, 177, 171, 251, 252, 242, 177, 181, 252, 166, 242, 180, 172, 173, 174, 175, 176, 253, 254, 177, 177, 177, 177, 177, 181, 254, 166, 242, 242, 242, 242, 242, 166 }; #define yyerrok (yyerrstatus = 0) #define yyclearin (yychar = YYEMPTY) #define YYEMPTY (-2) #define YYEOF 0 #define YYACCEPT goto yyacceptlab #define YYABORT goto yyabortlab #define YYERROR goto yyerrorlab /* Like YYERROR except do call yyerror. This remains here temporarily to ease the transition to the new meaning of YYERROR, for GCC. Once GCC version 2 has supplanted version 1, this can go. */ #define YYFAIL goto yyerrlab #define YYRECOVERING() (!!yyerrstatus) #define YYBACKUP(Token, Value) \ do \ if (yychar == YYEMPTY && yylen == 1) \ { \ yychar = (Token); \ yylval = (Value); \ yytoken = YYTRANSLATE (yychar); \ YYPOPSTACK (1); \ goto yybackup; \ } \ else \ { \ yyerror (YY_("syntax error: cannot back up")); \ YYERROR; \ } \ while (YYID (0)) #define YYTERROR 1 #define YYERRCODE 256 /* YYLLOC_DEFAULT -- Set CURRENT to span from RHS[1] to RHS[N]. If N is 0, then set CURRENT to the empty location which ends the previous symbol: RHS[0] (always defined). */ #define YYRHSLOC(Rhs, K) ((Rhs)[K]) #ifndef YYLLOC_DEFAULT # define YYLLOC_DEFAULT(Current, Rhs, N) \ do \ if (YYID (N)) \ { \ (Current).first_line = YYRHSLOC (Rhs, 1).first_line; \ (Current).first_column = YYRHSLOC (Rhs, 1).first_column; \ (Current).last_line = YYRHSLOC (Rhs, N).last_line; \ (Current).last_column = YYRHSLOC (Rhs, N).last_column; \ } \ else \ { \ (Current).first_line = (Current).last_line = \ YYRHSLOC (Rhs, 0).last_line; \ (Current).first_column = (Current).last_column = \ YYRHSLOC (Rhs, 0).last_column; \ } \ while (YYID (0)) #endif /* YY_LOCATION_PRINT -- Print the location on the stream. This macro was not mandated originally: define only if we know we won't break user code: when these are the locations we know. */ #ifndef YY_LOCATION_PRINT # if YYLTYPE_IS_TRIVIAL # define YY_LOCATION_PRINT(File, Loc) \ fprintf (File, "%d.%d-%d.%d", \ (Loc).first_line, (Loc).first_column, \ (Loc).last_line, (Loc).last_column) # else # define YY_LOCATION_PRINT(File, Loc) ((void) 0) # endif #endif /* YYLEX -- calling `yylex' with the right arguments. */ #ifdef YYLEX_PARAM # define YYLEX yylex (YYLEX_PARAM) #else # define YYLEX yylex () #endif /* Enable debugging if requested. */ #if YYDEBUG # ifndef YYFPRINTF # include <stdio.h> /* INFRINGES ON USER NAME SPACE */ # define YYFPRINTF fprintf # endif # define YYDPRINTF(Args) \ do { \ if (yydebug) \ YYFPRINTF Args; \ } while (YYID (0)) # define YY_SYMBOL_PRINT(Title, Type, Value, Location) \ do { \ if (yydebug) \ { \ YYFPRINTF (stderr, "%s ", Title); \ yy_symbol_print (stderr, \ Type, Value); \ YYFPRINTF (stderr, "\n"); \ } \ } while (YYID (0)) /*--------------------------------. | Print this symbol on YYOUTPUT. | `--------------------------------*/ /*ARGSUSED*/ #if (defined __STDC__ || defined __C99__FUNC__ \ || defined __cplusplus || defined _MSC_VER) static void yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep) #else static void yy_symbol_value_print (yyoutput, yytype, yyvaluep) FILE *yyoutput; int yytype; YYSTYPE const * const yyvaluep; #endif { if (!yyvaluep) return; # ifdef YYPRINT if (yytype < YYNTOKENS) YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep); # else YYUSE (yyoutput); # endif switch (yytype) { default: break; } } /*--------------------------------. | Print this symbol on YYOUTPUT. | `--------------------------------*/ #if (defined __STDC__ || defined __C99__FUNC__ \ || defined __cplusplus || defined _MSC_VER) static void yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep) #else static void yy_symbol_print (yyoutput, yytype, yyvaluep) FILE *yyoutput; int yytype; YYSTYPE const * const yyvaluep; #endif { if (yytype < YYNTOKENS) YYFPRINTF (yyoutput, "token %s (", yytname[yytype]); else YYFPRINTF (yyoutput, "nterm %s (", yytname[yytype]); yy_symbol_value_print (yyoutput, yytype, yyvaluep); YYFPRINTF (yyoutput, ")"); } /*------------------------------------------------------------------. | yy_stack_print -- Print the state stack from its BOTTOM up to its | | TOP (included). | `------------------------------------------------------------------*/ #if (defined __STDC__ || defined __C99__FUNC__ \ || defined __cplusplus || defined _MSC_VER) static void yy_stack_print (yytype_int16 *yybottom, yytype_int16 *yytop) #else static void yy_stack_print (yybottom, yytop) yytype_int16 *yybottom; yytype_int16 *yytop; #endif { YYFPRINTF (stderr, "Stack now"); for (; yybottom <= yytop; yybottom++) { int yybot = *yybottom; YYFPRINTF (stderr, " %d", yybot); } YYFPRINTF (stderr, "\n"); } # define YY_STACK_PRINT(Bottom, Top) \ do { \ if (yydebug) \ yy_stack_print ((Bottom), (Top)); \ } while (YYID (0)) /*------------------------------------------------. | Report that the YYRULE is going to be reduced. | `------------------------------------------------*/ #if (defined __STDC__ || defined __C99__FUNC__ \ || defined __cplusplus || defined _MSC_VER) static void yy_reduce_print (YYSTYPE *yyvsp, int yyrule) #else static void yy_reduce_print (yyvsp, yyrule) YYSTYPE *yyvsp; int yyrule; #endif { int yynrhs = yyr2[yyrule]; int yyi; unsigned long int yylno = yyrline[yyrule]; YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n", yyrule - 1, yylno); /* The symbols being reduced. */ for (yyi = 0; yyi < yynrhs; yyi++) { YYFPRINTF (stderr, " $%d = ", yyi + 1); yy_symbol_print (stderr, yyrhs[yyprhs[yyrule] + yyi], &(yyvsp[(yyi + 1) - (yynrhs)]) ); YYFPRINTF (stderr, "\n"); } } # define YY_REDUCE_PRINT(Rule) \ do { \ if (yydebug) \ yy_reduce_print (yyvsp, Rule); \ } while (YYID (0)) /* Nonzero means print parse trace. It is left uninitialized so that multiple parsers can coexist. */ int yydebug; #else /* !YYDEBUG */ # define YYDPRINTF(Args) # define YY_SYMBOL_PRINT(Title, Type, Value, Location) # define YY_STACK_PRINT(Bottom, Top) # define YY_REDUCE_PRINT(Rule) #endif /* !YYDEBUG */ /* YYINITDEPTH -- initial size of the parser's stacks. */ #ifndef YYINITDEPTH # define YYINITDEPTH 200 #endif /* YYMAXDEPTH -- maximum size the stacks can grow to (effective only if the built-in stack extension method is used). Do not make this value too large; the results are undefined if YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH) evaluated with infinite-precision integer arithmetic. */ #ifndef YYMAXDEPTH # define YYMAXDEPTH 10000 #endif #if YYERROR_VERBOSE # ifndef yystrlen # if defined __GLIBC__ && defined _STRING_H # define yystrlen strlen # else /* Return the length of YYSTR. */ #if (defined __STDC__ || defined __C99__FUNC__ \ || defined __cplusplus || defined _MSC_VER) static YYSIZE_T yystrlen (const char *yystr) #else static YYSIZE_T yystrlen (yystr) const char *yystr; #endif { YYSIZE_T yylen; for (yylen = 0; yystr[yylen]; yylen++) continue; return yylen; } # endif # endif # ifndef yystpcpy # if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE # define yystpcpy stpcpy # else /* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in YYDEST. */ #if (defined __STDC__ || defined __C99__FUNC__ \ || defined __cplusplus || defined _MSC_VER) static char * yystpcpy (char *yydest, const char *yysrc) #else static char * yystpcpy (yydest, yysrc) char *yydest; const char *yysrc; #endif { char *yyd = yydest; const char *yys = yysrc; while ((*yyd++ = *yys++) != '\0') continue; return yyd - 1; } # endif # endif # ifndef yytnamerr /* Copy to YYRES the contents of YYSTR after stripping away unnecessary quotes and backslashes, so that it's suitable for yyerror. The heuristic is that double-quoting is unnecessary unless the string contains an apostrophe, a comma, or backslash (other than backslash-backslash). YYSTR is taken from yytname. If YYRES is null, do not copy; instead, return the length of what the result would have been. */ static YYSIZE_T yytnamerr (char *yyres, const char *yystr) { if (*yystr == '"') { YYSIZE_T yyn = 0; char const *yyp = yystr; for (;;) switch (*++yyp) { case '\'': case ',': goto do_not_strip_quotes; case '\\': if (*++yyp != '\\') goto do_not_strip_quotes; /* Fall through. */ default: if (yyres) yyres[yyn] = *yyp; yyn++; break; case '"': if (yyres) yyres[yyn] = '\0'; return yyn; } do_not_strip_quotes: ; } if (! yyres) return yystrlen (yystr); return yystpcpy (yyres, yystr) - yyres; } # endif /* Copy into YYRESULT an error message about the unexpected token YYCHAR while in state YYSTATE. Return the number of bytes copied, including the terminating null byte. If YYRESULT is null, do not copy anything; just return the number of bytes that would be copied. As a special case, return 0 if an ordinary "syntax error" message will do. Return YYSIZE_MAXIMUM if overflow occurs during size calculation. */ static YYSIZE_T yysyntax_error (char *yyresult, int yystate, int yychar) { int yyn = yypact[yystate]; if (! (YYPACT_NINF < yyn && yyn <= YYLAST)) return 0; else { int yytype = YYTRANSLATE (yychar); YYSIZE_T yysize0 = yytnamerr (0, yytname[yytype]); YYSIZE_T yysize = yysize0; YYSIZE_T yysize1; int yysize_overflow = 0; enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 }; char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM]; int yyx; # if 0 /* This is so xgettext sees the translatable formats that are constructed on the fly. */ YY_("syntax error, unexpected %s"); YY_("syntax error, unexpected %s, expecting %s"); YY_("syntax error, unexpected %s, expecting %s or %s"); YY_("syntax error, unexpected %s, expecting %s or %s or %s"); YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s"); # endif char *yyfmt; char const *yyf; static char const yyunexpected[] = "syntax error, unexpected %s"; static char const yyexpecting[] = ", expecting %s"; static char const yyor[] = " or %s"; char yyformat[sizeof yyunexpected + sizeof yyexpecting - 1 + ((YYERROR_VERBOSE_ARGS_MAXIMUM - 2) * (sizeof yyor - 1))]; char const *yyprefix = yyexpecting; /* Start YYX at -YYN if negative to avoid negative indexes in YYCHECK. */ int yyxbegin = yyn < 0 ? -yyn : 0; /* Stay within bounds of both yycheck and yytname. */ int yychecklim = YYLAST - yyn + 1; int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS; int yycount = 1; yyarg[0] = yytname[yytype]; yyfmt = yystpcpy (yyformat, yyunexpected); for (yyx = yyxbegin; yyx < yyxend; ++yyx) if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR) { if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM) { yycount = 1; yysize = yysize0; yyformat[sizeof yyunexpected - 1] = '\0'; break; } yyarg[yycount++] = yytname[yyx]; yysize1 = yysize + yytnamerr (0, yytname[yyx]); yysize_overflow |= (yysize1 < yysize); yysize = yysize1; yyfmt = yystpcpy (yyfmt, yyprefix); yyprefix = yyor; } yyf = YY_(yyformat); yysize1 = yysize + yystrlen (yyf); yysize_overflow |= (yysize1 < yysize); yysize = yysize1; if (yysize_overflow) return YYSIZE_MAXIMUM; if (yyresult) { /* Avoid sprintf, as that infringes on the user's name space. Don't have undefined behavior even if the translation produced a string with the wrong number of "%s"s. */ char *yyp = yyresult; int yyi = 0; while ((*yyp = *yyf) != '\0') { if (*yyp == '%' && yyf[1] == 's' && yyi < yycount) { yyp += yytnamerr (yyp, yyarg[yyi++]); yyf += 2; } else { yyp++; yyf++; } } } return yysize; } } #endif /* YYERROR_VERBOSE */ /*-----------------------------------------------. | Release the memory associated to this symbol. | `-----------------------------------------------*/ /*ARGSUSED*/ #if (defined __STDC__ || defined __C99__FUNC__ \ || defined __cplusplus || defined _MSC_VER) static void yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep) #else static void yydestruct (yymsg, yytype, yyvaluep) const char *yymsg; int yytype; YYSTYPE *yyvaluep; #endif { YYUSE (yyvaluep); if (!yymsg) yymsg = "Deleting"; YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp); switch (yytype) { default: break; } } /* Prevent warnings from -Wmissing-prototypes. */ #ifdef YYPARSE_PARAM #if defined __STDC__ || defined __cplusplus int yyparse (void *YYPARSE_PARAM); #else int yyparse (); #endif #else /* ! YYPARSE_PARAM */ #if defined __STDC__ || defined __cplusplus int yyparse (void); #else int yyparse (); #endif #endif /* ! YYPARSE_PARAM */ /* The lookahead symbol. */ int yychar; /* The semantic value of the lookahead symbol. */ YYSTYPE yylval; /* Number of syntax errors so far. */ int yynerrs; /*-------------------------. | yyparse or yypush_parse. | `-------------------------*/ #ifdef YYPARSE_PARAM #if (defined __STDC__ || defined __C99__FUNC__ \ || defined __cplusplus || defined _MSC_VER) int yyparse (void *YYPARSE_PARAM) #else int yyparse (YYPARSE_PARAM) void *YYPARSE_PARAM; #endif #else /* ! YYPARSE_PARAM */ #if (defined __STDC__ || defined __C99__FUNC__ \ || defined __cplusplus || defined _MSC_VER) int yyparse (void) #else int yyparse () #endif #endif { int yystate; /* Number of tokens to shift before error messages enabled. */ int yyerrstatus; /* The stacks and their tools: `yyss': related to states. `yyvs': related to semantic values. Refer to the stacks thru separate pointers, to allow yyoverflow to reallocate them elsewhere. */ /* The state stack. */ yytype_int16 yyssa[YYINITDEPTH]; yytype_int16 *yyss; yytype_int16 *yyssp; /* The semantic value stack. */ YYSTYPE yyvsa[YYINITDEPTH]; YYSTYPE *yyvs; YYSTYPE *yyvsp; YYSIZE_T yystacksize; int yyn; int yyresult; /* Lookahead token as an internal (translated) token number. */ int yytoken; /* The variables used to return semantic value and location from the action routines. */ YYSTYPE yyval; #if YYERROR_VERBOSE /* Buffer for error messages, and its allocated size. */ char yymsgbuf[128]; char *yymsg = yymsgbuf; YYSIZE_T yymsg_alloc = sizeof yymsgbuf; #endif #define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N)) /* The number of symbols on the RHS of the reduced rule. Keep to zero when no symbol should be popped. */ int yylen = 0; yytoken = 0; yyss = yyssa; yyvs = yyvsa; yystacksize = YYINITDEPTH; YYDPRINTF ((stderr, "Starting parse\n")); yystate = 0; yyerrstatus = 0; yynerrs = 0; yychar = YYEMPTY; /* Cause a token to be read. */ /* Initialize stack pointers. Waste one element of value and location stack so that they stay on the same level as the state stack. The wasted elements are never initialized. */ yyssp = yyss; yyvsp = yyvs; goto yysetstate; /*------------------------------------------------------------. | yynewstate -- Push a new state, which is found in yystate. | `------------------------------------------------------------*/ yynewstate: /* In all cases, when you get here, the value and location stacks have just been pushed. So pushing a state here evens the stacks. */ yyssp++; yysetstate: *yyssp = yystate; if (yyss + yystacksize - 1 <= yyssp) { /* Get the current used size of the three stacks, in elements. */ YYSIZE_T yysize = yyssp - yyss + 1; #ifdef yyoverflow { /* Give user a chance to reallocate the stack. Use copies of these so that the &'s don't force the real ones into memory. */ YYSTYPE *yyvs1 = yyvs; yytype_int16 *yyss1 = yyss; /* Each stack pointer address is followed by the size of the data in use in that stack, in bytes. This used to be a conditional around just the two extra args, but that might be undefined if yyoverflow is a macro. */ yyoverflow (YY_("memory exhausted"), &yyss1, yysize * sizeof (*yyssp), &yyvs1, yysize * sizeof (*yyvsp), &yystacksize); yyss = yyss1; yyvs = yyvs1; } #else /* no yyoverflow */ # ifndef YYSTACK_RELOCATE goto yyexhaustedlab; # else /* Extend the stack our own way. */ if (YYMAXDEPTH <= yystacksize) goto yyexhaustedlab; yystacksize *= 2; if (YYMAXDEPTH < yystacksize) yystacksize = YYMAXDEPTH; { yytype_int16 *yyss1 = yyss; union yyalloc *yyptr = (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize)); if (! yyptr) goto yyexhaustedlab; YYSTACK_RELOCATE (yyss_alloc, yyss); YYSTACK_RELOCATE (yyvs_alloc, yyvs); # undef YYSTACK_RELOCATE if (yyss1 != yyssa) YYSTACK_FREE (yyss1); } # endif #endif /* no yyoverflow */ yyssp = yyss + yysize - 1; yyvsp = yyvs + yysize - 1; YYDPRINTF ((stderr, "Stack size increased to %lu\n", (unsigned long int) yystacksize)); if (yyss + yystacksize - 1 <= yyssp) YYABORT; } YYDPRINTF ((stderr, "Entering state %d\n", yystate)); if (yystate == YYFINAL) YYACCEPT; goto yybackup; /*-----------. | yybackup. | `-----------*/ yybackup: /* Do appropriate processing given the current state. Read a lookahead token if we need one and don't already have one. */ /* First try to decide what to do without reference to lookahead token. */ yyn = yypact[yystate]; if (yyn == YYPACT_NINF) goto yydefault; /* Not known => get a lookahead token if don't already have one. */ /* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */ if (yychar == YYEMPTY) { YYDPRINTF ((stderr, "Reading a token: ")); yychar = YYLEX; } if (yychar <= YYEOF) { yychar = yytoken = YYEOF; YYDPRINTF ((stderr, "Now at end of input.\n")); } else { yytoken = YYTRANSLATE (yychar); YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc); } /* If the proper action on seeing token YYTOKEN is to reduce or to detect an error, take that action. */ yyn += yytoken; if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken) goto yydefault; yyn = yytable[yyn]; if (yyn <= 0) { if (yyn == 0 || yyn == YYTABLE_NINF) goto yyerrlab; yyn = -yyn; goto yyreduce; } /* Count tokens shifted since error; after three, turn off error status. */ if (yyerrstatus) yyerrstatus--; /* Shift the lookahead token. */ YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc); /* Discard the shifted token. */ yychar = YYEMPTY; yystate = yyn; *++yyvsp = yylval; goto yynewstate; /*-----------------------------------------------------------. | yydefault -- do the default action for the current state. | `-----------------------------------------------------------*/ yydefault: yyn = yydefact[yystate]; if (yyn == 0) goto yyerrlab; goto yyreduce; /*-----------------------------. | yyreduce -- Do a reduction. | `-----------------------------*/ yyreduce: /* yyn is the number of a rule to reduce with. */ yylen = yyr2[yyn]; /* If YYLEN is nonzero, implement the default value of the action: `$$ = $1'. Otherwise, the following line sets YYVAL to garbage. This behavior is undocumented and Bison users should not rely upon it. Assigning to YYVAL unconditionally makes the parser a bit smaller, and it avoids a GCC warning that YYVAL may be used uninitialized. */ yyval = yyvsp[1-yylen]; YY_REDUCE_PRINT (yyn); switch (yyn) { case 5: /* Line 1455 of yacc.c */ #line 320 "ntp_parser.y" { /* I will need to incorporate much more fine grained * error messages. The following should suffice for * the time being. */ msyslog(LOG_ERR, "syntax error in %s line %d, column %d", ip_file->fname, ip_file->err_line_no, ip_file->err_col_no); } break; case 19: /* Line 1455 of yacc.c */ #line 354 "ntp_parser.y" { struct peer_node *my_node = create_peer_node((yyvsp[(1) - (3)].Integer), (yyvsp[(2) - (3)].Address_node), (yyvsp[(3) - (3)].Queue)); if (my_node) enqueue(cfgt.peers, my_node); } break; case 20: /* Line 1455 of yacc.c */ #line 360 "ntp_parser.y" { struct peer_node *my_node = create_peer_node((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Address_node), NULL); if (my_node) enqueue(cfgt.peers, my_node); } break; case 27: /* Line 1455 of yacc.c */ #line 377 "ntp_parser.y" { (yyval.Address_node) = create_address_node((yyvsp[(2) - (2)].String), AF_INET); } break; case 28: /* Line 1455 of yacc.c */ #line 378 "ntp_parser.y" { (yyval.Address_node) = create_address_node((yyvsp[(2) - (2)].String), AF_INET6); } break; case 29: /* Line 1455 of yacc.c */ #line 382 "ntp_parser.y" { (yyval.Address_node) = create_address_node((yyvsp[(1) - (1)].String), 0); } break; case 30: /* Line 1455 of yacc.c */ #line 386 "ntp_parser.y" { (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val)); } break; case 31: /* Line 1455 of yacc.c */ #line 387 "ntp_parser.y" { (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val)); } break; case 32: /* Line 1455 of yacc.c */ #line 391 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); } break; case 33: /* Line 1455 of yacc.c */ #line 392 "ntp_parser.y" { (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); } break; case 34: /* Line 1455 of yacc.c */ #line 393 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); } break; case 35: /* Line 1455 of yacc.c */ #line 394 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); } break; case 36: /* Line 1455 of yacc.c */ #line 395 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); } break; case 37: /* Line 1455 of yacc.c */ #line 396 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); } break; case 38: /* Line 1455 of yacc.c */ #line 397 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); } break; case 39: /* Line 1455 of yacc.c */ #line 398 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); } break; case 40: /* Line 1455 of yacc.c */ #line 399 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); } break; case 41: /* Line 1455 of yacc.c */ #line 400 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); } break; case 42: /* Line 1455 of yacc.c */ #line 401 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); } break; case 43: /* Line 1455 of yacc.c */ #line 402 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); } break; case 44: /* Line 1455 of yacc.c */ #line 403 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); } break; case 45: /* Line 1455 of yacc.c */ #line 404 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); } break; case 46: /* Line 1455 of yacc.c */ #line 405 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); } break; case 47: /* Line 1455 of yacc.c */ #line 415 "ntp_parser.y" { struct unpeer_node *my_node = create_unpeer_node((yyvsp[(2) - (2)].Address_node)); if (my_node) enqueue(cfgt.unpeers, my_node); } break; case 50: /* Line 1455 of yacc.c */ #line 434 "ntp_parser.y" { cfgt.broadcastclient = 1; } break; case 51: /* Line 1455 of yacc.c */ #line 436 "ntp_parser.y" { append_queue(cfgt.manycastserver, (yyvsp[(2) - (2)].Queue)); } break; case 52: /* Line 1455 of yacc.c */ #line 438 "ntp_parser.y" { append_queue(cfgt.multicastclient, (yyvsp[(2) - (2)].Queue)); } break; case 53: /* Line 1455 of yacc.c */ #line 449 "ntp_parser.y" { enqueue(cfgt.vars, create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer))); } break; case 54: /* Line 1455 of yacc.c */ #line 451 "ntp_parser.y" { cfgt.auth.control_key = (yyvsp[(2) - (2)].Integer); } break; case 55: /* Line 1455 of yacc.c */ #line 453 "ntp_parser.y" { cfgt.auth.cryptosw++; append_queue(cfgt.auth.crypto_cmd_list, (yyvsp[(2) - (2)].Queue)); } break; case 56: /* Line 1455 of yacc.c */ #line 458 "ntp_parser.y" { cfgt.auth.keys = (yyvsp[(2) - (2)].String); } break; case 57: /* Line 1455 of yacc.c */ #line 460 "ntp_parser.y" { cfgt.auth.keysdir = (yyvsp[(2) - (2)].String); } break; case 58: /* Line 1455 of yacc.c */ #line 462 "ntp_parser.y" { cfgt.auth.request_key = (yyvsp[(2) - (2)].Integer); } break; case 59: /* Line 1455 of yacc.c */ #line 464 "ntp_parser.y" { cfgt.auth.revoke = (yyvsp[(2) - (2)].Integer); } break; case 60: /* Line 1455 of yacc.c */ #line 466 "ntp_parser.y" { cfgt.auth.trusted_key_list = (yyvsp[(2) - (2)].Queue); } break; case 61: /* Line 1455 of yacc.c */ #line 468 "ntp_parser.y" { cfgt.auth.ntp_signd_socket = (yyvsp[(2) - (2)].String); } break; case 63: /* Line 1455 of yacc.c */ #line 474 "ntp_parser.y" { (yyval.Queue) = create_queue(); } break; case 64: /* Line 1455 of yacc.c */ #line 479 "ntp_parser.y" { if ((yyvsp[(2) - (2)].Attr_val) != NULL) (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val)); else (yyval.Queue) = (yyvsp[(1) - (2)].Queue); } break; case 65: /* Line 1455 of yacc.c */ #line 486 "ntp_parser.y" { if ((yyvsp[(1) - (1)].Attr_val) != NULL) (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val)); else (yyval.Queue) = create_queue(); } break; case 66: /* Line 1455 of yacc.c */ #line 496 "ntp_parser.y" { (yyval.Attr_val) = create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String)); } break; case 67: /* Line 1455 of yacc.c */ #line 498 "ntp_parser.y" { (yyval.Attr_val) = create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String)); } break; case 68: /* Line 1455 of yacc.c */ #line 500 "ntp_parser.y" { (yyval.Attr_val) = create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String)); } break; case 69: /* Line 1455 of yacc.c */ #line 502 "ntp_parser.y" { (yyval.Attr_val) = create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String)); } break; case 70: /* Line 1455 of yacc.c */ #line 504 "ntp_parser.y" { (yyval.Attr_val) = create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String)); } break; case 71: /* Line 1455 of yacc.c */ #line 506 "ntp_parser.y" { (yyval.Attr_val) = create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String)); } break; case 72: /* Line 1455 of yacc.c */ #line 508 "ntp_parser.y" { (yyval.Attr_val) = NULL; cfgt.auth.revoke = (yyvsp[(2) - (2)].Integer); msyslog(LOG_WARNING, "'crypto revoke %d' is deprecated, " "please use 'revoke %d' instead.", cfgt.auth.revoke, cfgt.auth.revoke); } break; case 73: /* Line 1455 of yacc.c */ #line 525 "ntp_parser.y" { append_queue(cfgt.orphan_cmds,(yyvsp[(2) - (2)].Queue)); } break; case 74: /* Line 1455 of yacc.c */ #line 529 "ntp_parser.y" { (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val)); } break; case 75: /* Line 1455 of yacc.c */ #line 530 "ntp_parser.y" { (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val)); } break; case 76: /* Line 1455 of yacc.c */ #line 535 "ntp_parser.y" { (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (double)(yyvsp[(2) - (2)].Integer)); } break; case 77: /* Line 1455 of yacc.c */ #line 537 "ntp_parser.y" { (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (double)(yyvsp[(2) - (2)].Integer)); } break; case 78: /* Line 1455 of yacc.c */ #line 539 "ntp_parser.y" { (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (double)(yyvsp[(2) - (2)].Integer)); } break; case 79: /* Line 1455 of yacc.c */ #line 541 "ntp_parser.y" { (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (double)(yyvsp[(2) - (2)].Integer)); } break; case 80: /* Line 1455 of yacc.c */ #line 543 "ntp_parser.y" { (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (double)(yyvsp[(2) - (2)].Integer)); } break; case 81: /* Line 1455 of yacc.c */ #line 545 "ntp_parser.y" { (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); } break; case 82: /* Line 1455 of yacc.c */ #line 547 "ntp_parser.y" { (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); } break; case 83: /* Line 1455 of yacc.c */ #line 549 "ntp_parser.y" { (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); } break; case 84: /* Line 1455 of yacc.c */ #line 551 "ntp_parser.y" { (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); } break; case 85: /* Line 1455 of yacc.c */ #line 553 "ntp_parser.y" { (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (double)(yyvsp[(2) - (2)].Integer)); } break; case 86: /* Line 1455 of yacc.c */ #line 555 "ntp_parser.y" { (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (double)(yyvsp[(2) - (2)].Integer)); } break; case 87: /* Line 1455 of yacc.c */ #line 565 "ntp_parser.y" { append_queue(cfgt.stats_list, (yyvsp[(2) - (2)].Queue)); } break; case 88: /* Line 1455 of yacc.c */ #line 567 "ntp_parser.y" { if (input_from_file) cfgt.stats_dir = (yyvsp[(2) - (2)].String); else { free((yyvsp[(2) - (2)].String)); yyerror("statsdir remote configuration ignored"); } } break; case 89: /* Line 1455 of yacc.c */ #line 576 "ntp_parser.y" { enqueue(cfgt.filegen_opts, create_filegen_node((yyvsp[(2) - (3)].Integer), (yyvsp[(3) - (3)].Queue))); } break; case 90: /* Line 1455 of yacc.c */ #line 583 "ntp_parser.y" { (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), create_ival((yyvsp[(2) - (2)].Integer))); } break; case 91: /* Line 1455 of yacc.c */ #line 584 "ntp_parser.y" { (yyval.Queue) = enqueue_in_new_queue(create_ival((yyvsp[(1) - (1)].Integer))); } break; case 100: /* Line 1455 of yacc.c */ #line 600 "ntp_parser.y" { if ((yyvsp[(2) - (2)].Attr_val) != NULL) (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val)); else (yyval.Queue) = (yyvsp[(1) - (2)].Queue); } break; case 101: /* Line 1455 of yacc.c */ #line 607 "ntp_parser.y" { if ((yyvsp[(1) - (1)].Attr_val) != NULL) (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val)); else (yyval.Queue) = create_queue(); } break; case 102: /* Line 1455 of yacc.c */ #line 617 "ntp_parser.y" { if (input_from_file) (yyval.Attr_val) = create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String)); else { (yyval.Attr_val) = NULL; free((yyvsp[(2) - (2)].String)); yyerror("filegen file remote configuration ignored"); } } break; case 103: /* Line 1455 of yacc.c */ #line 627 "ntp_parser.y" { if (input_from_file) (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); else { (yyval.Attr_val) = NULL; yyerror("filegen type remote configuration ignored"); } } break; case 104: /* Line 1455 of yacc.c */ #line 636 "ntp_parser.y" { if (input_from_file) (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); else { (yyval.Attr_val) = NULL; yyerror("filegen link remote configuration ignored"); } } break; case 105: /* Line 1455 of yacc.c */ #line 645 "ntp_parser.y" { if (input_from_file) (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); else { (yyval.Attr_val) = NULL; yyerror("filegen nolink remote configuration ignored"); } } break; case 106: /* Line 1455 of yacc.c */ #line 653 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); } break; case 107: /* Line 1455 of yacc.c */ #line 654 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); } break; case 115: /* Line 1455 of yacc.c */ #line 674 "ntp_parser.y" { append_queue(cfgt.discard_opts, (yyvsp[(2) - (2)].Queue)); } break; case 116: /* Line 1455 of yacc.c */ #line 678 "ntp_parser.y" { append_queue(cfgt.mru_opts, (yyvsp[(2) - (2)].Queue)); } break; case 117: /* Line 1455 of yacc.c */ #line 682 "ntp_parser.y" { enqueue(cfgt.restrict_opts, create_restrict_node((yyvsp[(2) - (3)].Address_node), NULL, (yyvsp[(3) - (3)].Queue), ip_file->line_no)); } break; case 118: /* Line 1455 of yacc.c */ #line 687 "ntp_parser.y" { enqueue(cfgt.restrict_opts, create_restrict_node((yyvsp[(2) - (5)].Address_node), (yyvsp[(4) - (5)].Address_node), (yyvsp[(5) - (5)].Queue), ip_file->line_no)); } break; case 119: /* Line 1455 of yacc.c */ #line 692 "ntp_parser.y" { enqueue(cfgt.restrict_opts, create_restrict_node(NULL, NULL, (yyvsp[(3) - (3)].Queue), ip_file->line_no)); } break; case 120: /* Line 1455 of yacc.c */ #line 697 "ntp_parser.y" { enqueue(cfgt.restrict_opts, create_restrict_node( create_address_node( estrdup("0.0.0.0"), AF_INET), create_address_node( estrdup("0.0.0.0"), AF_INET), (yyvsp[(4) - (4)].Queue), ip_file->line_no)); } break; case 121: /* Line 1455 of yacc.c */ #line 710 "ntp_parser.y" { enqueue(cfgt.restrict_opts, create_restrict_node( create_address_node( estrdup("::"), AF_INET6), create_address_node( estrdup("::"), AF_INET6), (yyvsp[(4) - (4)].Queue), ip_file->line_no)); } break; case 122: /* Line 1455 of yacc.c */ #line 723 "ntp_parser.y" { enqueue(cfgt.restrict_opts, create_restrict_node( NULL, NULL, enqueue((yyvsp[(3) - (3)].Queue), create_ival((yyvsp[(2) - (3)].Integer))), ip_file->line_no)); } break; case 123: /* Line 1455 of yacc.c */ #line 734 "ntp_parser.y" { (yyval.Queue) = create_queue(); } break; case 124: /* Line 1455 of yacc.c */ #line 736 "ntp_parser.y" { (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), create_ival((yyvsp[(2) - (2)].Integer))); } break; case 139: /* Line 1455 of yacc.c */ #line 758 "ntp_parser.y" { (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val)); } break; case 140: /* Line 1455 of yacc.c */ #line 760 "ntp_parser.y" { (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val)); } break; case 141: /* Line 1455 of yacc.c */ #line 764 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); } break; case 142: /* Line 1455 of yacc.c */ #line 765 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); } break; case 143: /* Line 1455 of yacc.c */ #line 766 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); } break; case 144: /* Line 1455 of yacc.c */ #line 771 "ntp_parser.y" { (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val)); } break; case 145: /* Line 1455 of yacc.c */ #line 773 "ntp_parser.y" { (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val)); } break; case 146: /* Line 1455 of yacc.c */ #line 777 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); } break; case 147: /* Line 1455 of yacc.c */ #line 778 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); } break; case 148: /* Line 1455 of yacc.c */ #line 779 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); } break; case 149: /* Line 1455 of yacc.c */ #line 780 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); } break; case 150: /* Line 1455 of yacc.c */ #line 781 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); } break; case 151: /* Line 1455 of yacc.c */ #line 782 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); } break; case 152: /* Line 1455 of yacc.c */ #line 783 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); } break; case 153: /* Line 1455 of yacc.c */ #line 784 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); } break; case 154: /* Line 1455 of yacc.c */ #line 793 "ntp_parser.y" { enqueue(cfgt.fudge, create_addr_opts_node((yyvsp[(2) - (3)].Address_node), (yyvsp[(3) - (3)].Queue))); } break; case 155: /* Line 1455 of yacc.c */ #line 798 "ntp_parser.y" { enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val)); } break; case 156: /* Line 1455 of yacc.c */ #line 800 "ntp_parser.y" { (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val)); } break; case 157: /* Line 1455 of yacc.c */ #line 804 "ntp_parser.y" { (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); } break; case 158: /* Line 1455 of yacc.c */ #line 805 "ntp_parser.y" { (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); } break; case 159: /* Line 1455 of yacc.c */ #line 806 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); } break; case 160: /* Line 1455 of yacc.c */ #line 807 "ntp_parser.y" { (yyval.Attr_val) = create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String)); } break; case 161: /* Line 1455 of yacc.c */ #line 808 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); } break; case 162: /* Line 1455 of yacc.c */ #line 809 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); } break; case 163: /* Line 1455 of yacc.c */ #line 810 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); } break; case 164: /* Line 1455 of yacc.c */ #line 811 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); } break; case 165: /* Line 1455 of yacc.c */ #line 820 "ntp_parser.y" { append_queue(cfgt.enable_opts, (yyvsp[(2) - (2)].Queue)); } break; case 166: /* Line 1455 of yacc.c */ #line 822 "ntp_parser.y" { append_queue(cfgt.disable_opts, (yyvsp[(2) - (2)].Queue)); } break; case 167: /* Line 1455 of yacc.c */ #line 827 "ntp_parser.y" { if ((yyvsp[(2) - (2)].Attr_val) != NULL) (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val)); else (yyval.Queue) = (yyvsp[(1) - (2)].Queue); } break; case 168: /* Line 1455 of yacc.c */ #line 834 "ntp_parser.y" { if ((yyvsp[(1) - (1)].Attr_val) != NULL) (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val)); else (yyval.Queue) = create_queue(); } break; case 169: /* Line 1455 of yacc.c */ #line 843 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); } break; case 170: /* Line 1455 of yacc.c */ #line 844 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); } break; case 171: /* Line 1455 of yacc.c */ #line 845 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); } break; case 172: /* Line 1455 of yacc.c */ #line 846 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); } break; case 173: /* Line 1455 of yacc.c */ #line 847 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); } break; case 174: /* Line 1455 of yacc.c */ #line 848 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); } break; case 175: /* Line 1455 of yacc.c */ #line 850 "ntp_parser.y" { if (input_from_file) (yyval.Attr_val) = create_attr_ival(T_Flag, (yyvsp[(1) - (1)].Integer)); else { (yyval.Attr_val) = NULL; yyerror("enable/disable stats remote configuration ignored"); } } break; case 176: /* Line 1455 of yacc.c */ #line 865 "ntp_parser.y" { append_queue(cfgt.tinker, (yyvsp[(2) - (2)].Queue)); } break; case 177: /* Line 1455 of yacc.c */ #line 869 "ntp_parser.y" { (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val)); } break; case 178: /* Line 1455 of yacc.c */ #line 870 "ntp_parser.y" { (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val)); } break; case 179: /* Line 1455 of yacc.c */ #line 874 "ntp_parser.y" { (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); } break; case 180: /* Line 1455 of yacc.c */ #line 875 "ntp_parser.y" { (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); } break; case 181: /* Line 1455 of yacc.c */ #line 876 "ntp_parser.y" { (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); } break; case 182: /* Line 1455 of yacc.c */ #line 877 "ntp_parser.y" { (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); } break; case 183: /* Line 1455 of yacc.c */ #line 878 "ntp_parser.y" { (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); } break; case 184: /* Line 1455 of yacc.c */ #line 879 "ntp_parser.y" { (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); } break; case 185: /* Line 1455 of yacc.c */ #line 880 "ntp_parser.y" { (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double)); } break; case 187: /* Line 1455 of yacc.c */ #line 891 "ntp_parser.y" { if (curr_include_level >= MAXINCLUDELEVEL) { fprintf(stderr, "getconfig: Maximum include file level exceeded.\n"); msyslog(LOG_ERR, "getconfig: Maximum include file level exceeded."); } else { fp[curr_include_level + 1] = F_OPEN(FindConfig((yyvsp[(2) - (3)].String)), "r"); if (fp[curr_include_level + 1] == NULL) { fprintf(stderr, "getconfig: Couldn't open <%s>\n", FindConfig((yyvsp[(2) - (3)].String))); msyslog(LOG_ERR, "getconfig: Couldn't open <%s>", FindConfig((yyvsp[(2) - (3)].String))); } else ip_file = fp[++curr_include_level]; } } break; case 188: /* Line 1455 of yacc.c */ #line 907 "ntp_parser.y" { while (curr_include_level != -1) FCLOSE(fp[curr_include_level--]); } break; case 189: /* Line 1455 of yacc.c */ #line 913 "ntp_parser.y" { enqueue(cfgt.vars, create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double))); } break; case 190: /* Line 1455 of yacc.c */ #line 915 "ntp_parser.y" { enqueue(cfgt.vars, create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer))); } break; case 191: /* Line 1455 of yacc.c */ #line 917 "ntp_parser.y" { enqueue(cfgt.vars, create_attr_dval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Double))); } break; case 192: /* Line 1455 of yacc.c */ #line 919 "ntp_parser.y" { /* Null action, possibly all null parms */ } break; case 193: /* Line 1455 of yacc.c */ #line 921 "ntp_parser.y" { enqueue(cfgt.vars, create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String))); } break; case 194: /* Line 1455 of yacc.c */ #line 924 "ntp_parser.y" { enqueue(cfgt.vars, create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String))); } break; case 195: /* Line 1455 of yacc.c */ #line 926 "ntp_parser.y" { if (input_from_file) enqueue(cfgt.vars, create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String))); else { free((yyvsp[(2) - (2)].String)); yyerror("logfile remote configuration ignored"); } } break; case 196: /* Line 1455 of yacc.c */ #line 937 "ntp_parser.y" { append_queue(cfgt.logconfig, (yyvsp[(2) - (2)].Queue)); } break; case 197: /* Line 1455 of yacc.c */ #line 939 "ntp_parser.y" { append_queue(cfgt.phone, (yyvsp[(2) - (2)].Queue)); } break; case 198: /* Line 1455 of yacc.c */ #line 941 "ntp_parser.y" { if (input_from_file) enqueue(cfgt.vars, create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String))); else { free((yyvsp[(2) - (2)].String)); yyerror("saveconfigdir remote configuration ignored"); } } break; case 199: /* Line 1455 of yacc.c */ #line 951 "ntp_parser.y" { enqueue(cfgt.setvar, (yyvsp[(2) - (2)].Set_var)); } break; case 200: /* Line 1455 of yacc.c */ #line 953 "ntp_parser.y" { enqueue(cfgt.trap, create_addr_opts_node((yyvsp[(2) - (2)].Address_node), NULL)); } break; case 201: /* Line 1455 of yacc.c */ #line 955 "ntp_parser.y" { enqueue(cfgt.trap, create_addr_opts_node((yyvsp[(2) - (3)].Address_node), (yyvsp[(3) - (3)].Queue))); } break; case 202: /* Line 1455 of yacc.c */ #line 957 "ntp_parser.y" { append_queue(cfgt.ttl, (yyvsp[(2) - (2)].Queue)); } break; case 203: /* Line 1455 of yacc.c */ #line 959 "ntp_parser.y" { enqueue(cfgt.qos, create_attr_sval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].String))); } break; case 204: /* Line 1455 of yacc.c */ #line 964 "ntp_parser.y" { enqueue(cfgt.vars, create_attr_sval(T_Driftfile, (yyvsp[(1) - (1)].String))); } break; case 205: /* Line 1455 of yacc.c */ #line 966 "ntp_parser.y" { enqueue(cfgt.vars, create_attr_dval(T_WanderThreshold, (yyvsp[(2) - (2)].Double))); enqueue(cfgt.vars, create_attr_sval(T_Driftfile, (yyvsp[(1) - (2)].String))); } break; case 206: /* Line 1455 of yacc.c */ #line 969 "ntp_parser.y" { enqueue(cfgt.vars, create_attr_sval(T_Driftfile, "\0")); } break; case 207: /* Line 1455 of yacc.c */ #line 974 "ntp_parser.y" { (yyval.Set_var) = create_setvar_node((yyvsp[(1) - (4)].String), (yyvsp[(3) - (4)].String), (yyvsp[(4) - (4)].Integer)); } break; case 208: /* Line 1455 of yacc.c */ #line 976 "ntp_parser.y" { (yyval.Set_var) = create_setvar_node((yyvsp[(1) - (3)].String), (yyvsp[(3) - (3)].String), 0); } break; case 209: /* Line 1455 of yacc.c */ #line 981 "ntp_parser.y" { (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val)); } break; case 210: /* Line 1455 of yacc.c */ #line 982 "ntp_parser.y" { (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val)); } break; case 211: /* Line 1455 of yacc.c */ #line 986 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Integer)); } break; case 212: /* Line 1455 of yacc.c */ #line 987 "ntp_parser.y" { (yyval.Attr_val) = create_attr_pval((yyvsp[(1) - (2)].Integer), (yyvsp[(2) - (2)].Address_node)); } break; case 213: /* Line 1455 of yacc.c */ #line 991 "ntp_parser.y" { (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val)); } break; case 214: /* Line 1455 of yacc.c */ #line 992 "ntp_parser.y" { (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val)); } break; case 215: /* Line 1455 of yacc.c */ #line 997 "ntp_parser.y" { char prefix = (yyvsp[(1) - (1)].String)[0]; char *type = (yyvsp[(1) - (1)].String) + 1; if (prefix != '+' && prefix != '-' && prefix != '=') { yyerror("Logconfig prefix is not '+', '-' or '='\n"); } else (yyval.Attr_val) = create_attr_sval(prefix, estrdup(type)); YYFREE((yyvsp[(1) - (1)].String)); } break; case 216: /* Line 1455 of yacc.c */ #line 1012 "ntp_parser.y" { enqueue(cfgt.nic_rules, create_nic_rule_node((yyvsp[(3) - (3)].Integer), NULL, (yyvsp[(2) - (3)].Integer))); } break; case 217: /* Line 1455 of yacc.c */ #line 1017 "ntp_parser.y" { enqueue(cfgt.nic_rules, create_nic_rule_node(0, (yyvsp[(3) - (3)].String), (yyvsp[(2) - (3)].Integer))); } break; case 227: /* Line 1455 of yacc.c */ #line 1048 "ntp_parser.y" { (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), create_ival((yyvsp[(2) - (2)].Integer))); } break; case 228: /* Line 1455 of yacc.c */ #line 1049 "ntp_parser.y" { (yyval.Queue) = enqueue_in_new_queue(create_ival((yyvsp[(1) - (1)].Integer))); } break; case 229: /* Line 1455 of yacc.c */ #line 1054 "ntp_parser.y" { (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Attr_val)); } break; case 230: /* Line 1455 of yacc.c */ #line 1056 "ntp_parser.y" { (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Attr_val)); } break; case 231: /* Line 1455 of yacc.c */ #line 1061 "ntp_parser.y" { (yyval.Attr_val) = create_attr_ival('i', (yyvsp[(1) - (1)].Integer)); } break; case 233: /* Line 1455 of yacc.c */ #line 1067 "ntp_parser.y" { (yyval.Attr_val) = create_attr_shorts('-', (yyvsp[(2) - (5)].Integer), (yyvsp[(4) - (5)].Integer)); } break; case 234: /* Line 1455 of yacc.c */ #line 1071 "ntp_parser.y" { (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), create_pval((yyvsp[(2) - (2)].String))); } break; case 235: /* Line 1455 of yacc.c */ #line 1072 "ntp_parser.y" { (yyval.Queue) = enqueue_in_new_queue(create_pval((yyvsp[(1) - (1)].String))); } break; case 236: /* Line 1455 of yacc.c */ #line 1076 "ntp_parser.y" { (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Address_node)); } break; case 237: /* Line 1455 of yacc.c */ #line 1077 "ntp_parser.y" { (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Address_node)); } break; case 238: /* Line 1455 of yacc.c */ #line 1082 "ntp_parser.y" { if ((yyvsp[(1) - (1)].Integer) != 0 && (yyvsp[(1) - (1)].Integer) != 1) { yyerror("Integer value is not boolean (0 or 1). Assuming 1"); (yyval.Integer) = 1; } else (yyval.Integer) = (yyvsp[(1) - (1)].Integer); } break; case 239: /* Line 1455 of yacc.c */ #line 1090 "ntp_parser.y" { (yyval.Integer) = 1; } break; case 240: /* Line 1455 of yacc.c */ #line 1091 "ntp_parser.y" { (yyval.Integer) = 0; } break; case 241: /* Line 1455 of yacc.c */ #line 1095 "ntp_parser.y" { (yyval.Double) = (double)(yyvsp[(1) - (1)].Integer); } break; case 243: /* Line 1455 of yacc.c */ #line 1106 "ntp_parser.y" { cfgt.sim_details = create_sim_node((yyvsp[(3) - (5)].Queue), (yyvsp[(4) - (5)].Queue)); /* Reset the old_config_style variable */ old_config_style = 1; } break; case 244: /* Line 1455 of yacc.c */ #line 1120 "ntp_parser.y" { old_config_style = 0; } break; case 245: /* Line 1455 of yacc.c */ #line 1124 "ntp_parser.y" { (yyval.Queue) = enqueue((yyvsp[(1) - (3)].Queue), (yyvsp[(2) - (3)].Attr_val)); } break; case 246: /* Line 1455 of yacc.c */ #line 1125 "ntp_parser.y" { (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (2)].Attr_val)); } break; case 247: /* Line 1455 of yacc.c */ #line 1129 "ntp_parser.y" { (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (3)].Integer), (yyvsp[(3) - (3)].Double)); } break; case 248: /* Line 1455 of yacc.c */ #line 1130 "ntp_parser.y" { (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (3)].Integer), (yyvsp[(3) - (3)].Double)); } break; case 249: /* Line 1455 of yacc.c */ #line 1134 "ntp_parser.y" { (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Sim_server)); } break; case 250: /* Line 1455 of yacc.c */ #line 1135 "ntp_parser.y" { (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Sim_server)); } break; case 251: /* Line 1455 of yacc.c */ #line 1140 "ntp_parser.y" { (yyval.Sim_server) = create_sim_server((yyvsp[(1) - (5)].Address_node), (yyvsp[(3) - (5)].Double), (yyvsp[(4) - (5)].Queue)); } break; case 252: /* Line 1455 of yacc.c */ #line 1144 "ntp_parser.y" { (yyval.Double) = (yyvsp[(3) - (4)].Double); } break; case 253: /* Line 1455 of yacc.c */ #line 1148 "ntp_parser.y" { (yyval.Address_node) = (yyvsp[(3) - (3)].Address_node); } break; case 254: /* Line 1455 of yacc.c */ #line 1152 "ntp_parser.y" { (yyval.Queue) = enqueue((yyvsp[(1) - (2)].Queue), (yyvsp[(2) - (2)].Sim_script)); } break; case 255: /* Line 1455 of yacc.c */ #line 1153 "ntp_parser.y" { (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (1)].Sim_script)); } break; case 256: /* Line 1455 of yacc.c */ #line 1158 "ntp_parser.y" { (yyval.Sim_script) = create_sim_script_info((yyvsp[(3) - (6)].Double), (yyvsp[(5) - (6)].Queue)); } break; case 257: /* Line 1455 of yacc.c */ #line 1162 "ntp_parser.y" { (yyval.Queue) = enqueue((yyvsp[(1) - (3)].Queue), (yyvsp[(2) - (3)].Attr_val)); } break; case 258: /* Line 1455 of yacc.c */ #line 1163 "ntp_parser.y" { (yyval.Queue) = enqueue_in_new_queue((yyvsp[(1) - (2)].Attr_val)); } break; case 259: /* Line 1455 of yacc.c */ #line 1168 "ntp_parser.y" { (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (3)].Integer), (yyvsp[(3) - (3)].Double)); } break; case 260: /* Line 1455 of yacc.c */ #line 1170 "ntp_parser.y" { (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (3)].Integer), (yyvsp[(3) - (3)].Double)); } break; case 261: /* Line 1455 of yacc.c */ #line 1172 "ntp_parser.y" { (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (3)].Integer), (yyvsp[(3) - (3)].Double)); } break; case 262: /* Line 1455 of yacc.c */ #line 1174 "ntp_parser.y" { (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (3)].Integer), (yyvsp[(3) - (3)].Double)); } break; case 263: /* Line 1455 of yacc.c */ #line 1176 "ntp_parser.y" { (yyval.Attr_val) = create_attr_dval((yyvsp[(1) - (3)].Integer), (yyvsp[(3) - (3)].Double)); } break; /* Line 1455 of yacc.c */ #line 3826 "ntp_parser.c" default: break; } YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc); YYPOPSTACK (yylen); yylen = 0; YY_STACK_PRINT (yyss, yyssp); *++yyvsp = yyval; /* Now `shift' the result of the reduction. Determine what state that goes to, based on the state we popped back to and the rule number reduced by. */ yyn = yyr1[yyn]; yystate = yypgoto[yyn - YYNTOKENS] + *yyssp; if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp) yystate = yytable[yystate]; else yystate = yydefgoto[yyn - YYNTOKENS]; goto yynewstate; /*------------------------------------. | yyerrlab -- here on detecting error | `------------------------------------*/ yyerrlab: /* If not already recovering from an error, report this error. */ if (!yyerrstatus) { ++yynerrs; #if ! YYERROR_VERBOSE yyerror (YY_("syntax error")); #else { YYSIZE_T yysize = yysyntax_error (0, yystate, yychar); if (yymsg_alloc < yysize && yymsg_alloc < YYSTACK_ALLOC_MAXIMUM) { YYSIZE_T yyalloc = 2 * yysize; if (! (yysize <= yyalloc && yyalloc <= YYSTACK_ALLOC_MAXIMUM)) yyalloc = YYSTACK_ALLOC_MAXIMUM; if (yymsg != yymsgbuf) YYSTACK_FREE (yymsg); yymsg = (char *) YYSTACK_ALLOC (yyalloc); if (yymsg) yymsg_alloc = yyalloc; else { yymsg = yymsgbuf; yymsg_alloc = sizeof yymsgbuf; } } if (0 < yysize && yysize <= yymsg_alloc) { (void) yysyntax_error (yymsg, yystate, yychar); yyerror (yymsg); } else { yyerror (YY_("syntax error")); if (yysize != 0) goto yyexhaustedlab; } } #endif } if (yyerrstatus == 3) { /* If just tried and failed to reuse lookahead token after an error, discard it. */ if (yychar <= YYEOF) { /* Return failure if at end of input. */ if (yychar == YYEOF) YYABORT; } else { yydestruct ("Error: discarding", yytoken, &yylval); yychar = YYEMPTY; } } /* Else will try to reuse lookahead token after shifting the error token. */ goto yyerrlab1; /*---------------------------------------------------. | yyerrorlab -- error raised explicitly by YYERROR. | `---------------------------------------------------*/ yyerrorlab: /* Pacify compilers like GCC when the user code never invokes YYERROR and the label yyerrorlab therefore never appears in user code. */ if (/*CONSTCOND*/ 0) goto yyerrorlab; /* Do not reclaim the symbols of the rule which action triggered this YYERROR. */ YYPOPSTACK (yylen); yylen = 0; YY_STACK_PRINT (yyss, yyssp); yystate = *yyssp; goto yyerrlab1; /*-------------------------------------------------------------. | yyerrlab1 -- common code for both syntax error and YYERROR. | `-------------------------------------------------------------*/ yyerrlab1: yyerrstatus = 3; /* Each real token shifted decrements this. */ for (;;) { yyn = yypact[yystate]; if (yyn != YYPACT_NINF) { yyn += YYTERROR; if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR) { yyn = yytable[yyn]; if (0 < yyn) break; } } /* Pop the current state because it cannot handle the error token. */ if (yyssp == yyss) YYABORT; yydestruct ("Error: popping", yystos[yystate], yyvsp); YYPOPSTACK (1); yystate = *yyssp; YY_STACK_PRINT (yyss, yyssp); } *++yyvsp = yylval; /* Shift the error token. */ YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp); yystate = yyn; goto yynewstate; /*-------------------------------------. | yyacceptlab -- YYACCEPT comes here. | `-------------------------------------*/ yyacceptlab: yyresult = 0; goto yyreturn; /*-----------------------------------. | yyabortlab -- YYABORT comes here. | `-----------------------------------*/ yyabortlab: yyresult = 1; goto yyreturn; #if !defined(yyoverflow) || YYERROR_VERBOSE /*-------------------------------------------------. | yyexhaustedlab -- memory exhaustion comes here. | `-------------------------------------------------*/ yyexhaustedlab: yyerror (YY_("memory exhausted")); yyresult = 2; /* Fall through. */ #endif yyreturn: if (yychar != YYEMPTY) yydestruct ("Cleanup: discarding lookahead", yytoken, &yylval); /* Do not reclaim the symbols of the rule which action triggered this YYABORT or YYACCEPT. */ YYPOPSTACK (yylen); YY_STACK_PRINT (yyss, yyssp); while (yyssp != yyss) { yydestruct ("Cleanup: popping", yystos[*yyssp], yyvsp); YYPOPSTACK (1); } #ifndef yyoverflow if (yyss != yyssa) YYSTACK_FREE (yyss); #endif #if YYERROR_VERBOSE if (yymsg != yymsgbuf) YYSTACK_FREE (yymsg); #endif /* Make sure YYID is used. */ return YYID (yyresult); } /* Line 1675 of yacc.c */ #line 1180 "ntp_parser.y" void yyerror (char *msg) { int retval; ip_file->err_line_no = ip_file->prev_token_line_no; ip_file->err_col_no = ip_file->prev_token_col_no; msyslog(LOG_ERR, "line %d column %d %s", ip_file->err_line_no, ip_file->err_col_no, msg); if (!input_from_file) { /* Save the error message in the correct buffer */ retval = snprintf(remote_config.err_msg + remote_config.err_pos, MAXLINE - remote_config.err_pos, "column %d %s", ip_file->err_col_no, msg); /* Increment the value of err_pos */ if (retval > 0) remote_config.err_pos += retval; /* Increment the number of errors */ ++remote_config.no_errors; } } /* * token_name - convert T_ token integers to text * example: token_name(T_Server) returns "T_Server" */ const char * token_name( int token ) { return yytname[YYTRANSLATE(token)]; } /* Initial Testing function -- ignore int main(int argc, char *argv[]) { ip_file = FOPEN(argv[1], "r"); if (!ip_file) { fprintf(stderr, "ERROR!! Could not open file: %s\n", argv[1]); } key_scanner = create_keyword_scanner(keyword_list); print_keyword_scanner(key_scanner, 0); yyparse(); return 0; } */
./CrossVul/dataset_final_sorted/CWE-20/c/bad_1657_1
crossvul-cpp_data_good_5418_5
/* * Copyright (c) 2002-2003 Michael David Adams. * All rights reserved. */ /* __START_OF_JASPER_LICENSE__ * * JasPer License Version 2.0 * * Copyright (c) 2001-2006 Michael David Adams * Copyright (c) 1999-2000 Image Power, Inc. * Copyright (c) 1999-2000 The University of British Columbia * * All rights reserved. * * Permission is hereby granted, free of charge, to any person (the * "User") obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Software, and to permit * persons to whom the Software is furnished to do so, subject to the * following conditions: * * 1. The above copyright notices and this permission notice (which * includes the disclaimer below) shall be included in all copies or * substantial portions of the Software. * * 2. The name of a copyright holder shall not be used to endorse or * promote products derived from the Software without specific prior * written permission. * * THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS * LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER * THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS * "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL * INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE * PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE * THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY. * EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS * BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL * PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS * GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE * ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE * IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL * SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES, * AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL * SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH * THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH, * PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH * RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY * EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES. * * __END_OF_JASPER_LICENSE__ */ #include <jasper/jas_config.h> #include <jasper/jas_types.h> jas_uchar jas_iccprofdata_srgb[] = { 0x00, 0x00, 0x0c, 0x48, 0x4c, 0x69, 0x6e, 0x6f, 0x02, 0x10, 0x00, 0x00, 0x6d, 0x6e, 0x74, 0x72, 0x52, 0x47, 0x42, 0x20, 0x58, 0x59, 0x5a, 0x20, 0x07, 0xce, 0x00, 0x02, 0x00, 0x09, 0x00, 0x06, 0x00, 0x31, 0x00, 0x00, 0x61, 0x63, 0x73, 0x70, 0x4d, 0x53, 0x46, 0x54, 0x00, 0x00, 0x00, 0x00, 0x49, 0x45, 0x43, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x48, 0x50, 0x20, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0x01, 0x50, 0x00, 0x00, 0x00, 0x33, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x01, 0x84, 0x00, 0x00, 0x00, 0x6c, 0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0x01, 0xf0, 0x00, 0x00, 0x00, 0x14, 0x62, 0x6b, 0x70, 0x74, 0x00, 0x00, 0x02, 0x04, 0x00, 0x00, 0x00, 0x14, 0x72, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x02, 0x18, 0x00, 0x00, 0x00, 0x14, 0x67, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x02, 0x2c, 0x00, 0x00, 0x00, 0x14, 0x62, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x02, 0x40, 0x00, 0x00, 0x00, 0x14, 0x64, 0x6d, 0x6e, 0x64, 0x00, 0x00, 0x02, 0x54, 0x00, 0x00, 0x00, 0x70, 0x64, 0x6d, 0x64, 0x64, 0x00, 0x00, 0x02, 0xc4, 0x00, 0x00, 0x00, 0x88, 0x76, 0x75, 0x65, 0x64, 0x00, 0x00, 0x03, 0x4c, 0x00, 0x00, 0x00, 0x86, 0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x03, 0xd4, 0x00, 0x00, 0x00, 0x24, 0x6c, 0x75, 0x6d, 0x69, 0x00, 0x00, 0x03, 0xf8, 0x00, 0x00, 0x00, 0x14, 0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x04, 0x0c, 0x00, 0x00, 0x00, 0x24, 0x74, 0x65, 0x63, 0x68, 0x00, 0x00, 0x04, 0x30, 0x00, 0x00, 0x00, 0x0c, 0x72, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x3c, 0x00, 0x00, 0x08, 0x0c, 0x67, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x3c, 0x00, 0x00, 0x08, 0x0c, 0x62, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x3c, 0x00, 0x00, 0x08, 0x0c, 0x74, 0x65, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x43, 0x6f, 0x70, 0x79, 0x72, 0x69, 0x67, 0x68, 0x74, 0x20, 0x28, 0x63, 0x29, 0x20, 0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x65, 0x77, 0x6c, 0x65, 0x74, 0x74, 0x2d, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x20, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x6e, 0x79, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x51, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x16, 0xcc, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6f, 0xa2, 0x00, 0x00, 0x38, 0xf5, 0x00, 0x00, 0x03, 0x90, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x99, 0x00, 0x00, 0xb7, 0x85, 0x00, 0x00, 0x18, 0xda, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0xa0, 0x00, 0x00, 0x0f, 0x84, 0x00, 0x00, 0xb6, 0xcf, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2c, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x20, 0x56, 0x69, 0x65, 0x77, 0x69, 0x6e, 0x67, 0x20, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x69, 0x6e, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2c, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x20, 0x56, 0x69, 0x65, 0x77, 0x69, 0x6e, 0x67, 0x20, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x69, 0x6e, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xa4, 0xfe, 0x00, 0x14, 0x5f, 0x2e, 0x00, 0x10, 0xcf, 0x14, 0x00, 0x03, 0xed, 0xcc, 0x00, 0x04, 0x13, 0x0b, 0x00, 0x03, 0x5c, 0x9e, 0x00, 0x00, 0x00, 0x01, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x09, 0x56, 0x00, 0x50, 0x00, 0x00, 0x00, 0x57, 0x1f, 0xe7, 0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x8f, 0x00, 0x00, 0x00, 0x02, 0x73, 0x69, 0x67, 0x20, 0x00, 0x00, 0x00, 0x00, 0x43, 0x52, 0x54, 0x20, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x0a, 0x00, 0x0f, 0x00, 0x14, 0x00, 0x19, 0x00, 0x1e, 0x00, 0x23, 0x00, 0x28, 0x00, 0x2d, 0x00, 0x32, 0x00, 0x37, 0x00, 0x3b, 0x00, 0x40, 0x00, 0x45, 0x00, 0x4a, 0x00, 0x4f, 0x00, 0x54, 0x00, 0x59, 0x00, 0x5e, 0x00, 0x63, 0x00, 0x68, 0x00, 0x6d, 0x00, 0x72, 0x00, 0x77, 0x00, 0x7c, 0x00, 0x81, 0x00, 0x86, 0x00, 0x8b, 0x00, 0x90, 0x00, 0x95, 0x00, 0x9a, 0x00, 0x9f, 0x00, 0xa4, 0x00, 0xa9, 0x00, 0xae, 0x00, 0xb2, 0x00, 0xb7, 0x00, 0xbc, 0x00, 0xc1, 0x00, 0xc6, 0x00, 0xcb, 0x00, 0xd0, 0x00, 0xd5, 0x00, 0xdb, 0x00, 0xe0, 0x00, 0xe5, 0x00, 0xeb, 0x00, 0xf0, 0x00, 0xf6, 0x00, 0xfb, 0x01, 0x01, 0x01, 0x07, 0x01, 0x0d, 0x01, 0x13, 0x01, 0x19, 0x01, 0x1f, 0x01, 0x25, 0x01, 0x2b, 0x01, 0x32, 0x01, 0x38, 0x01, 0x3e, 0x01, 0x45, 0x01, 0x4c, 0x01, 0x52, 0x01, 0x59, 0x01, 0x60, 0x01, 0x67, 0x01, 0x6e, 0x01, 0x75, 0x01, 0x7c, 0x01, 0x83, 0x01, 0x8b, 0x01, 0x92, 0x01, 0x9a, 0x01, 0xa1, 0x01, 0xa9, 0x01, 0xb1, 0x01, 0xb9, 0x01, 0xc1, 0x01, 0xc9, 0x01, 0xd1, 0x01, 0xd9, 0x01, 0xe1, 0x01, 0xe9, 0x01, 0xf2, 0x01, 0xfa, 0x02, 0x03, 0x02, 0x0c, 0x02, 0x14, 0x02, 0x1d, 0x02, 0x26, 0x02, 0x2f, 0x02, 0x38, 0x02, 0x41, 0x02, 0x4b, 0x02, 0x54, 0x02, 0x5d, 0x02, 0x67, 0x02, 0x71, 0x02, 0x7a, 0x02, 0x84, 0x02, 0x8e, 0x02, 0x98, 0x02, 0xa2, 0x02, 0xac, 0x02, 0xb6, 0x02, 0xc1, 0x02, 0xcb, 0x02, 0xd5, 0x02, 0xe0, 0x02, 0xeb, 0x02, 0xf5, 0x03, 0x00, 0x03, 0x0b, 0x03, 0x16, 0x03, 0x21, 0x03, 0x2d, 0x03, 0x38, 0x03, 0x43, 0x03, 0x4f, 0x03, 0x5a, 0x03, 0x66, 0x03, 0x72, 0x03, 0x7e, 0x03, 0x8a, 0x03, 0x96, 0x03, 0xa2, 0x03, 0xae, 0x03, 0xba, 0x03, 0xc7, 0x03, 0xd3, 0x03, 0xe0, 0x03, 0xec, 0x03, 0xf9, 0x04, 0x06, 0x04, 0x13, 0x04, 0x20, 0x04, 0x2d, 0x04, 0x3b, 0x04, 0x48, 0x04, 0x55, 0x04, 0x63, 0x04, 0x71, 0x04, 0x7e, 0x04, 0x8c, 0x04, 0x9a, 0x04, 0xa8, 0x04, 0xb6, 0x04, 0xc4, 0x04, 0xd3, 0x04, 0xe1, 0x04, 0xf0, 0x04, 0xfe, 0x05, 0x0d, 0x05, 0x1c, 0x05, 0x2b, 0x05, 0x3a, 0x05, 0x49, 0x05, 0x58, 0x05, 0x67, 0x05, 0x77, 0x05, 0x86, 0x05, 0x96, 0x05, 0xa6, 0x05, 0xb5, 0x05, 0xc5, 0x05, 0xd5, 0x05, 0xe5, 0x05, 0xf6, 0x06, 0x06, 0x06, 0x16, 0x06, 0x27, 0x06, 0x37, 0x06, 0x48, 0x06, 0x59, 0x06, 0x6a, 0x06, 0x7b, 0x06, 0x8c, 0x06, 0x9d, 0x06, 0xaf, 0x06, 0xc0, 0x06, 0xd1, 0x06, 0xe3, 0x06, 0xf5, 0x07, 0x07, 0x07, 0x19, 0x07, 0x2b, 0x07, 0x3d, 0x07, 0x4f, 0x07, 0x61, 0x07, 0x74, 0x07, 0x86, 0x07, 0x99, 0x07, 0xac, 0x07, 0xbf, 0x07, 0xd2, 0x07, 0xe5, 0x07, 0xf8, 0x08, 0x0b, 0x08, 0x1f, 0x08, 0x32, 0x08, 0x46, 0x08, 0x5a, 0x08, 0x6e, 0x08, 0x82, 0x08, 0x96, 0x08, 0xaa, 0x08, 0xbe, 0x08, 0xd2, 0x08, 0xe7, 0x08, 0xfb, 0x09, 0x10, 0x09, 0x25, 0x09, 0x3a, 0x09, 0x4f, 0x09, 0x64, 0x09, 0x79, 0x09, 0x8f, 0x09, 0xa4, 0x09, 0xba, 0x09, 0xcf, 0x09, 0xe5, 0x09, 0xfb, 0x0a, 0x11, 0x0a, 0x27, 0x0a, 0x3d, 0x0a, 0x54, 0x0a, 0x6a, 0x0a, 0x81, 0x0a, 0x98, 0x0a, 0xae, 0x0a, 0xc5, 0x0a, 0xdc, 0x0a, 0xf3, 0x0b, 0x0b, 0x0b, 0x22, 0x0b, 0x39, 0x0b, 0x51, 0x0b, 0x69, 0x0b, 0x80, 0x0b, 0x98, 0x0b, 0xb0, 0x0b, 0xc8, 0x0b, 0xe1, 0x0b, 0xf9, 0x0c, 0x12, 0x0c, 0x2a, 0x0c, 0x43, 0x0c, 0x5c, 0x0c, 0x75, 0x0c, 0x8e, 0x0c, 0xa7, 0x0c, 0xc0, 0x0c, 0xd9, 0x0c, 0xf3, 0x0d, 0x0d, 0x0d, 0x26, 0x0d, 0x40, 0x0d, 0x5a, 0x0d, 0x74, 0x0d, 0x8e, 0x0d, 0xa9, 0x0d, 0xc3, 0x0d, 0xde, 0x0d, 0xf8, 0x0e, 0x13, 0x0e, 0x2e, 0x0e, 0x49, 0x0e, 0x64, 0x0e, 0x7f, 0x0e, 0x9b, 0x0e, 0xb6, 0x0e, 0xd2, 0x0e, 0xee, 0x0f, 0x09, 0x0f, 0x25, 0x0f, 0x41, 0x0f, 0x5e, 0x0f, 0x7a, 0x0f, 0x96, 0x0f, 0xb3, 0x0f, 0xcf, 0x0f, 0xec, 0x10, 0x09, 0x10, 0x26, 0x10, 0x43, 0x10, 0x61, 0x10, 0x7e, 0x10, 0x9b, 0x10, 0xb9, 0x10, 0xd7, 0x10, 0xf5, 0x11, 0x13, 0x11, 0x31, 0x11, 0x4f, 0x11, 0x6d, 0x11, 0x8c, 0x11, 0xaa, 0x11, 0xc9, 0x11, 0xe8, 0x12, 0x07, 0x12, 0x26, 0x12, 0x45, 0x12, 0x64, 0x12, 0x84, 0x12, 0xa3, 0x12, 0xc3, 0x12, 0xe3, 0x13, 0x03, 0x13, 0x23, 0x13, 0x43, 0x13, 0x63, 0x13, 0x83, 0x13, 0xa4, 0x13, 0xc5, 0x13, 0xe5, 0x14, 0x06, 0x14, 0x27, 0x14, 0x49, 0x14, 0x6a, 0x14, 0x8b, 0x14, 0xad, 0x14, 0xce, 0x14, 0xf0, 0x15, 0x12, 0x15, 0x34, 0x15, 0x56, 0x15, 0x78, 0x15, 0x9b, 0x15, 0xbd, 0x15, 0xe0, 0x16, 0x03, 0x16, 0x26, 0x16, 0x49, 0x16, 0x6c, 0x16, 0x8f, 0x16, 0xb2, 0x16, 0xd6, 0x16, 0xfa, 0x17, 0x1d, 0x17, 0x41, 0x17, 0x65, 0x17, 0x89, 0x17, 0xae, 0x17, 0xd2, 0x17, 0xf7, 0x18, 0x1b, 0x18, 0x40, 0x18, 0x65, 0x18, 0x8a, 0x18, 0xaf, 0x18, 0xd5, 0x18, 0xfa, 0x19, 0x20, 0x19, 0x45, 0x19, 0x6b, 0x19, 0x91, 0x19, 0xb7, 0x19, 0xdd, 0x1a, 0x04, 0x1a, 0x2a, 0x1a, 0x51, 0x1a, 0x77, 0x1a, 0x9e, 0x1a, 0xc5, 0x1a, 0xec, 0x1b, 0x14, 0x1b, 0x3b, 0x1b, 0x63, 0x1b, 0x8a, 0x1b, 0xb2, 0x1b, 0xda, 0x1c, 0x02, 0x1c, 0x2a, 0x1c, 0x52, 0x1c, 0x7b, 0x1c, 0xa3, 0x1c, 0xcc, 0x1c, 0xf5, 0x1d, 0x1e, 0x1d, 0x47, 0x1d, 0x70, 0x1d, 0x99, 0x1d, 0xc3, 0x1d, 0xec, 0x1e, 0x16, 0x1e, 0x40, 0x1e, 0x6a, 0x1e, 0x94, 0x1e, 0xbe, 0x1e, 0xe9, 0x1f, 0x13, 0x1f, 0x3e, 0x1f, 0x69, 0x1f, 0x94, 0x1f, 0xbf, 0x1f, 0xea, 0x20, 0x15, 0x20, 0x41, 0x20, 0x6c, 0x20, 0x98, 0x20, 0xc4, 0x20, 0xf0, 0x21, 0x1c, 0x21, 0x48, 0x21, 0x75, 0x21, 0xa1, 0x21, 0xce, 0x21, 0xfb, 0x22, 0x27, 0x22, 0x55, 0x22, 0x82, 0x22, 0xaf, 0x22, 0xdd, 0x23, 0x0a, 0x23, 0x38, 0x23, 0x66, 0x23, 0x94, 0x23, 0xc2, 0x23, 0xf0, 0x24, 0x1f, 0x24, 0x4d, 0x24, 0x7c, 0x24, 0xab, 0x24, 0xda, 0x25, 0x09, 0x25, 0x38, 0x25, 0x68, 0x25, 0x97, 0x25, 0xc7, 0x25, 0xf7, 0x26, 0x27, 0x26, 0x57, 0x26, 0x87, 0x26, 0xb7, 0x26, 0xe8, 0x27, 0x18, 0x27, 0x49, 0x27, 0x7a, 0x27, 0xab, 0x27, 0xdc, 0x28, 0x0d, 0x28, 0x3f, 0x28, 0x71, 0x28, 0xa2, 0x28, 0xd4, 0x29, 0x06, 0x29, 0x38, 0x29, 0x6b, 0x29, 0x9d, 0x29, 0xd0, 0x2a, 0x02, 0x2a, 0x35, 0x2a, 0x68, 0x2a, 0x9b, 0x2a, 0xcf, 0x2b, 0x02, 0x2b, 0x36, 0x2b, 0x69, 0x2b, 0x9d, 0x2b, 0xd1, 0x2c, 0x05, 0x2c, 0x39, 0x2c, 0x6e, 0x2c, 0xa2, 0x2c, 0xd7, 0x2d, 0x0c, 0x2d, 0x41, 0x2d, 0x76, 0x2d, 0xab, 0x2d, 0xe1, 0x2e, 0x16, 0x2e, 0x4c, 0x2e, 0x82, 0x2e, 0xb7, 0x2e, 0xee, 0x2f, 0x24, 0x2f, 0x5a, 0x2f, 0x91, 0x2f, 0xc7, 0x2f, 0xfe, 0x30, 0x35, 0x30, 0x6c, 0x30, 0xa4, 0x30, 0xdb, 0x31, 0x12, 0x31, 0x4a, 0x31, 0x82, 0x31, 0xba, 0x31, 0xf2, 0x32, 0x2a, 0x32, 0x63, 0x32, 0x9b, 0x32, 0xd4, 0x33, 0x0d, 0x33, 0x46, 0x33, 0x7f, 0x33, 0xb8, 0x33, 0xf1, 0x34, 0x2b, 0x34, 0x65, 0x34, 0x9e, 0x34, 0xd8, 0x35, 0x13, 0x35, 0x4d, 0x35, 0x87, 0x35, 0xc2, 0x35, 0xfd, 0x36, 0x37, 0x36, 0x72, 0x36, 0xae, 0x36, 0xe9, 0x37, 0x24, 0x37, 0x60, 0x37, 0x9c, 0x37, 0xd7, 0x38, 0x14, 0x38, 0x50, 0x38, 0x8c, 0x38, 0xc8, 0x39, 0x05, 0x39, 0x42, 0x39, 0x7f, 0x39, 0xbc, 0x39, 0xf9, 0x3a, 0x36, 0x3a, 0x74, 0x3a, 0xb2, 0x3a, 0xef, 0x3b, 0x2d, 0x3b, 0x6b, 0x3b, 0xaa, 0x3b, 0xe8, 0x3c, 0x27, 0x3c, 0x65, 0x3c, 0xa4, 0x3c, 0xe3, 0x3d, 0x22, 0x3d, 0x61, 0x3d, 0xa1, 0x3d, 0xe0, 0x3e, 0x20, 0x3e, 0x60, 0x3e, 0xa0, 0x3e, 0xe0, 0x3f, 0x21, 0x3f, 0x61, 0x3f, 0xa2, 0x3f, 0xe2, 0x40, 0x23, 0x40, 0x64, 0x40, 0xa6, 0x40, 0xe7, 0x41, 0x29, 0x41, 0x6a, 0x41, 0xac, 0x41, 0xee, 0x42, 0x30, 0x42, 0x72, 0x42, 0xb5, 0x42, 0xf7, 0x43, 0x3a, 0x43, 0x7d, 0x43, 0xc0, 0x44, 0x03, 0x44, 0x47, 0x44, 0x8a, 0x44, 0xce, 0x45, 0x12, 0x45, 0x55, 0x45, 0x9a, 0x45, 0xde, 0x46, 0x22, 0x46, 0x67, 0x46, 0xab, 0x46, 0xf0, 0x47, 0x35, 0x47, 0x7b, 0x47, 0xc0, 0x48, 0x05, 0x48, 0x4b, 0x48, 0x91, 0x48, 0xd7, 0x49, 0x1d, 0x49, 0x63, 0x49, 0xa9, 0x49, 0xf0, 0x4a, 0x37, 0x4a, 0x7d, 0x4a, 0xc4, 0x4b, 0x0c, 0x4b, 0x53, 0x4b, 0x9a, 0x4b, 0xe2, 0x4c, 0x2a, 0x4c, 0x72, 0x4c, 0xba, 0x4d, 0x02, 0x4d, 0x4a, 0x4d, 0x93, 0x4d, 0xdc, 0x4e, 0x25, 0x4e, 0x6e, 0x4e, 0xb7, 0x4f, 0x00, 0x4f, 0x49, 0x4f, 0x93, 0x4f, 0xdd, 0x50, 0x27, 0x50, 0x71, 0x50, 0xbb, 0x51, 0x06, 0x51, 0x50, 0x51, 0x9b, 0x51, 0xe6, 0x52, 0x31, 0x52, 0x7c, 0x52, 0xc7, 0x53, 0x13, 0x53, 0x5f, 0x53, 0xaa, 0x53, 0xf6, 0x54, 0x42, 0x54, 0x8f, 0x54, 0xdb, 0x55, 0x28, 0x55, 0x75, 0x55, 0xc2, 0x56, 0x0f, 0x56, 0x5c, 0x56, 0xa9, 0x56, 0xf7, 0x57, 0x44, 0x57, 0x92, 0x57, 0xe0, 0x58, 0x2f, 0x58, 0x7d, 0x58, 0xcb, 0x59, 0x1a, 0x59, 0x69, 0x59, 0xb8, 0x5a, 0x07, 0x5a, 0x56, 0x5a, 0xa6, 0x5a, 0xf5, 0x5b, 0x45, 0x5b, 0x95, 0x5b, 0xe5, 0x5c, 0x35, 0x5c, 0x86, 0x5c, 0xd6, 0x5d, 0x27, 0x5d, 0x78, 0x5d, 0xc9, 0x5e, 0x1a, 0x5e, 0x6c, 0x5e, 0xbd, 0x5f, 0x0f, 0x5f, 0x61, 0x5f, 0xb3, 0x60, 0x05, 0x60, 0x57, 0x60, 0xaa, 0x60, 0xfc, 0x61, 0x4f, 0x61, 0xa2, 0x61, 0xf5, 0x62, 0x49, 0x62, 0x9c, 0x62, 0xf0, 0x63, 0x43, 0x63, 0x97, 0x63, 0xeb, 0x64, 0x40, 0x64, 0x94, 0x64, 0xe9, 0x65, 0x3d, 0x65, 0x92, 0x65, 0xe7, 0x66, 0x3d, 0x66, 0x92, 0x66, 0xe8, 0x67, 0x3d, 0x67, 0x93, 0x67, 0xe9, 0x68, 0x3f, 0x68, 0x96, 0x68, 0xec, 0x69, 0x43, 0x69, 0x9a, 0x69, 0xf1, 0x6a, 0x48, 0x6a, 0x9f, 0x6a, 0xf7, 0x6b, 0x4f, 0x6b, 0xa7, 0x6b, 0xff, 0x6c, 0x57, 0x6c, 0xaf, 0x6d, 0x08, 0x6d, 0x60, 0x6d, 0xb9, 0x6e, 0x12, 0x6e, 0x6b, 0x6e, 0xc4, 0x6f, 0x1e, 0x6f, 0x78, 0x6f, 0xd1, 0x70, 0x2b, 0x70, 0x86, 0x70, 0xe0, 0x71, 0x3a, 0x71, 0x95, 0x71, 0xf0, 0x72, 0x4b, 0x72, 0xa6, 0x73, 0x01, 0x73, 0x5d, 0x73, 0xb8, 0x74, 0x14, 0x74, 0x70, 0x74, 0xcc, 0x75, 0x28, 0x75, 0x85, 0x75, 0xe1, 0x76, 0x3e, 0x76, 0x9b, 0x76, 0xf8, 0x77, 0x56, 0x77, 0xb3, 0x78, 0x11, 0x78, 0x6e, 0x78, 0xcc, 0x79, 0x2a, 0x79, 0x89, 0x79, 0xe7, 0x7a, 0x46, 0x7a, 0xa5, 0x7b, 0x04, 0x7b, 0x63, 0x7b, 0xc2, 0x7c, 0x21, 0x7c, 0x81, 0x7c, 0xe1, 0x7d, 0x41, 0x7d, 0xa1, 0x7e, 0x01, 0x7e, 0x62, 0x7e, 0xc2, 0x7f, 0x23, 0x7f, 0x84, 0x7f, 0xe5, 0x80, 0x47, 0x80, 0xa8, 0x81, 0x0a, 0x81, 0x6b, 0x81, 0xcd, 0x82, 0x30, 0x82, 0x92, 0x82, 0xf4, 0x83, 0x57, 0x83, 0xba, 0x84, 0x1d, 0x84, 0x80, 0x84, 0xe3, 0x85, 0x47, 0x85, 0xab, 0x86, 0x0e, 0x86, 0x72, 0x86, 0xd7, 0x87, 0x3b, 0x87, 0x9f, 0x88, 0x04, 0x88, 0x69, 0x88, 0xce, 0x89, 0x33, 0x89, 0x99, 0x89, 0xfe, 0x8a, 0x64, 0x8a, 0xca, 0x8b, 0x30, 0x8b, 0x96, 0x8b, 0xfc, 0x8c, 0x63, 0x8c, 0xca, 0x8d, 0x31, 0x8d, 0x98, 0x8d, 0xff, 0x8e, 0x66, 0x8e, 0xce, 0x8f, 0x36, 0x8f, 0x9e, 0x90, 0x06, 0x90, 0x6e, 0x90, 0xd6, 0x91, 0x3f, 0x91, 0xa8, 0x92, 0x11, 0x92, 0x7a, 0x92, 0xe3, 0x93, 0x4d, 0x93, 0xb6, 0x94, 0x20, 0x94, 0x8a, 0x94, 0xf4, 0x95, 0x5f, 0x95, 0xc9, 0x96, 0x34, 0x96, 0x9f, 0x97, 0x0a, 0x97, 0x75, 0x97, 0xe0, 0x98, 0x4c, 0x98, 0xb8, 0x99, 0x24, 0x99, 0x90, 0x99, 0xfc, 0x9a, 0x68, 0x9a, 0xd5, 0x9b, 0x42, 0x9b, 0xaf, 0x9c, 0x1c, 0x9c, 0x89, 0x9c, 0xf7, 0x9d, 0x64, 0x9d, 0xd2, 0x9e, 0x40, 0x9e, 0xae, 0x9f, 0x1d, 0x9f, 0x8b, 0x9f, 0xfa, 0xa0, 0x69, 0xa0, 0xd8, 0xa1, 0x47, 0xa1, 0xb6, 0xa2, 0x26, 0xa2, 0x96, 0xa3, 0x06, 0xa3, 0x76, 0xa3, 0xe6, 0xa4, 0x56, 0xa4, 0xc7, 0xa5, 0x38, 0xa5, 0xa9, 0xa6, 0x1a, 0xa6, 0x8b, 0xa6, 0xfd, 0xa7, 0x6e, 0xa7, 0xe0, 0xa8, 0x52, 0xa8, 0xc4, 0xa9, 0x37, 0xa9, 0xa9, 0xaa, 0x1c, 0xaa, 0x8f, 0xab, 0x02, 0xab, 0x75, 0xab, 0xe9, 0xac, 0x5c, 0xac, 0xd0, 0xad, 0x44, 0xad, 0xb8, 0xae, 0x2d, 0xae, 0xa1, 0xaf, 0x16, 0xaf, 0x8b, 0xb0, 0x00, 0xb0, 0x75, 0xb0, 0xea, 0xb1, 0x60, 0xb1, 0xd6, 0xb2, 0x4b, 0xb2, 0xc2, 0xb3, 0x38, 0xb3, 0xae, 0xb4, 0x25, 0xb4, 0x9c, 0xb5, 0x13, 0xb5, 0x8a, 0xb6, 0x01, 0xb6, 0x79, 0xb6, 0xf0, 0xb7, 0x68, 0xb7, 0xe0, 0xb8, 0x59, 0xb8, 0xd1, 0xb9, 0x4a, 0xb9, 0xc2, 0xba, 0x3b, 0xba, 0xb5, 0xbb, 0x2e, 0xbb, 0xa7, 0xbc, 0x21, 0xbc, 0x9b, 0xbd, 0x15, 0xbd, 0x8f, 0xbe, 0x0a, 0xbe, 0x84, 0xbe, 0xff, 0xbf, 0x7a, 0xbf, 0xf5, 0xc0, 0x70, 0xc0, 0xec, 0xc1, 0x67, 0xc1, 0xe3, 0xc2, 0x5f, 0xc2, 0xdb, 0xc3, 0x58, 0xc3, 0xd4, 0xc4, 0x51, 0xc4, 0xce, 0xc5, 0x4b, 0xc5, 0xc8, 0xc6, 0x46, 0xc6, 0xc3, 0xc7, 0x41, 0xc7, 0xbf, 0xc8, 0x3d, 0xc8, 0xbc, 0xc9, 0x3a, 0xc9, 0xb9, 0xca, 0x38, 0xca, 0xb7, 0xcb, 0x36, 0xcb, 0xb6, 0xcc, 0x35, 0xcc, 0xb5, 0xcd, 0x35, 0xcd, 0xb5, 0xce, 0x36, 0xce, 0xb6, 0xcf, 0x37, 0xcf, 0xb8, 0xd0, 0x39, 0xd0, 0xba, 0xd1, 0x3c, 0xd1, 0xbe, 0xd2, 0x3f, 0xd2, 0xc1, 0xd3, 0x44, 0xd3, 0xc6, 0xd4, 0x49, 0xd4, 0xcb, 0xd5, 0x4e, 0xd5, 0xd1, 0xd6, 0x55, 0xd6, 0xd8, 0xd7, 0x5c, 0xd7, 0xe0, 0xd8, 0x64, 0xd8, 0xe8, 0xd9, 0x6c, 0xd9, 0xf1, 0xda, 0x76, 0xda, 0xfb, 0xdb, 0x80, 0xdc, 0x05, 0xdc, 0x8a, 0xdd, 0x10, 0xdd, 0x96, 0xde, 0x1c, 0xde, 0xa2, 0xdf, 0x29, 0xdf, 0xaf, 0xe0, 0x36, 0xe0, 0xbd, 0xe1, 0x44, 0xe1, 0xcc, 0xe2, 0x53, 0xe2, 0xdb, 0xe3, 0x63, 0xe3, 0xeb, 0xe4, 0x73, 0xe4, 0xfc, 0xe5, 0x84, 0xe6, 0x0d, 0xe6, 0x96, 0xe7, 0x1f, 0xe7, 0xa9, 0xe8, 0x32, 0xe8, 0xbc, 0xe9, 0x46, 0xe9, 0xd0, 0xea, 0x5b, 0xea, 0xe5, 0xeb, 0x70, 0xeb, 0xfb, 0xec, 0x86, 0xed, 0x11, 0xed, 0x9c, 0xee, 0x28, 0xee, 0xb4, 0xef, 0x40, 0xef, 0xcc, 0xf0, 0x58, 0xf0, 0xe5, 0xf1, 0x72, 0xf1, 0xff, 0xf2, 0x8c, 0xf3, 0x19, 0xf3, 0xa7, 0xf4, 0x34, 0xf4, 0xc2, 0xf5, 0x50, 0xf5, 0xde, 0xf6, 0x6d, 0xf6, 0xfb, 0xf7, 0x8a, 0xf8, 0x19, 0xf8, 0xa8, 0xf9, 0x38, 0xf9, 0xc7, 0xfa, 0x57, 0xfa, 0xe7, 0xfb, 0x77, 0xfc, 0x07, 0xfc, 0x98, 0xfd, 0x29, 0xfd, 0xba, 0xfe, 0x4b, 0xfe, 0xdc, 0xff, 0x6d, 0xff, 0xff }; int jas_iccprofdata_srgblen = sizeof(jas_iccprofdata_srgb); jas_uchar jas_iccprofdata_sgray[] = { 0x00, 0x00, 0x01, 0x8a, 0x00, 0x00, 0x00, 0x00, 0x02, 0x20, 0x00, 0x00, 0x73, 0x63, 0x6e, 0x72, 0x47, 0x52, 0x41, 0x59, 0x58, 0x59, 0x5a, 0x20, 0x07, 0xd3, 0x00, 0x01, 0x00, 0x1f, 0x00, 0x0d, 0x00, 0x35, 0x00, 0x21, 0x61, 0x63, 0x73, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x4b, 0x4f, 0x44, 0x41, 0x73, 0x47, 0x72, 0x79, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x4a, 0x50, 0x45, 0x47, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, 0x86, 0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0x01, 0x3c, 0x00, 0x00, 0x00, 0x2b, 0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0x01, 0x68, 0x00, 0x00, 0x00, 0x14, 0x6b, 0x54, 0x52, 0x43, 0x00, 0x00, 0x01, 0x7c, 0x00, 0x00, 0x00, 0x0e, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2c, 0x52, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x65, 0x64, 0x20, 0x49, 0x43, 0x43, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x20, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x69, 0x6e, 0x67, 0x20, 0x73, 0x52, 0x47, 0x42, 0x2d, 0x67, 0x72, 0x65, 0x79, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x74, 0x65, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x43, 0x6f, 0x70, 0x79, 0x72, 0x69, 0x67, 0x68, 0x74, 0x20, 0x32, 0x30, 0x30, 0x33, 0x20, 0x73, 0x52, 0x47, 0x42, 0x2d, 0x67, 0x72, 0x65, 0x79, 0x20, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x00, 0x00, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x54, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x16, 0xcf, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0xcd }; int jas_iccprofdata_sgraylen = sizeof(jas_iccprofdata_sgray);
./CrossVul/dataset_final_sorted/CWE-20/c/good_5418_5
crossvul-cpp_data_good_254_0
/** * @file * Usenet network mailbox type; talk to an NNTP server * * @authors * Copyright (C) 1998 Brandon Long <blong@fiction.net> * Copyright (C) 1999 Andrej Gritsenko <andrej@lucky.net> * Copyright (C) 2000-2017 Vsevolod Volkov <vvv@mutt.org.ua> * * @copyright * This program is free software: you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the Free Software * Foundation, either version 2 of the License, or (at your option) any later * version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ /** * @page nntp Usenet network mailbox type; talk to an NNTP server * * Usenet network mailbox type; talk to an NNTP server */ #include "config.h" #include <ctype.h> #include <limits.h> #include <string.h> #include <strings.h> #include <unistd.h> #include "mutt/mutt.h" #include "conn/conn.h" #include "mutt.h" #include "nntp.h" #include "bcache.h" #include "body.h" #include "context.h" #include "envelope.h" #include "globals.h" #include "header.h" #include "mailbox.h" #include "mutt_account.h" #include "mutt_curses.h" #include "mutt_logging.h" #include "mutt_socket.h" #include "mx.h" #include "ncrypt/ncrypt.h" #include "options.h" #include "progress.h" #include "protos.h" #include "thread.h" #include "url.h" #ifdef USE_HCACHE #include "hcache/hcache.h" #endif #ifdef USE_SASL #include <sasl/sasl.h> #include <sasl/saslutil.h> #endif struct NntpServer *CurrentNewsSrv; /** * nntp_connect_error - Signal a failed connection * @param nserv NNTP server * @retval -1 Always */ static int nntp_connect_error(struct NntpServer *nserv) { nserv->status = NNTP_NONE; mutt_error(_("Server closed connection!")); return -1; } /** * nntp_capabilities - Get capabilities * @param nserv NNTP server * @retval -1 Error, connection is closed * @retval 0 Mode is reader, capabilities set up * @retval 1 Need to switch to reader mode */ static int nntp_capabilities(struct NntpServer *nserv) { struct Connection *conn = nserv->conn; bool mode_reader = false; char buf[LONG_STRING]; char authinfo[LONG_STRING] = ""; nserv->hasCAPABILITIES = false; nserv->hasSTARTTLS = false; nserv->hasDATE = false; nserv->hasLIST_NEWSGROUPS = false; nserv->hasLISTGROUP = false; nserv->hasLISTGROUPrange = false; nserv->hasOVER = false; FREE(&nserv->authenticators); if (mutt_socket_send(conn, "CAPABILITIES\r\n") < 0 || mutt_socket_readln(buf, sizeof(buf), conn) < 0) { return nntp_connect_error(nserv); } /* no capabilities */ if (mutt_str_strncmp("101", buf, 3) != 0) return 1; nserv->hasCAPABILITIES = true; /* parse capabilities */ do { if (mutt_socket_readln(buf, sizeof(buf), conn) < 0) return nntp_connect_error(nserv); if (mutt_str_strcmp("STARTTLS", buf) == 0) nserv->hasSTARTTLS = true; else if (mutt_str_strcmp("MODE-READER", buf) == 0) mode_reader = true; else if (mutt_str_strcmp("READER", buf) == 0) { nserv->hasDATE = true; nserv->hasLISTGROUP = true; nserv->hasLISTGROUPrange = true; } else if (mutt_str_strncmp("AUTHINFO ", buf, 9) == 0) { mutt_str_strcat(buf, sizeof(buf), " "); mutt_str_strfcpy(authinfo, buf + 8, sizeof(authinfo)); } #ifdef USE_SASL else if (mutt_str_strncmp("SASL ", buf, 5) == 0) { char *p = buf + 5; while (*p == ' ') p++; nserv->authenticators = mutt_str_strdup(p); } #endif else if (mutt_str_strcmp("OVER", buf) == 0) nserv->hasOVER = true; else if (mutt_str_strncmp("LIST ", buf, 5) == 0) { char *p = strstr(buf, " NEWSGROUPS"); if (p) { p += 11; if (*p == '\0' || *p == ' ') nserv->hasLIST_NEWSGROUPS = true; } } } while (mutt_str_strcmp(".", buf) != 0); *buf = '\0'; #ifdef USE_SASL if (nserv->authenticators && strcasestr(authinfo, " SASL ")) mutt_str_strfcpy(buf, nserv->authenticators, sizeof(buf)); #endif if (strcasestr(authinfo, " USER ")) { if (*buf) mutt_str_strcat(buf, sizeof(buf), " "); mutt_str_strcat(buf, sizeof(buf), "USER"); } mutt_str_replace(&nserv->authenticators, buf); /* current mode is reader */ if (nserv->hasDATE) return 0; /* server is mode-switching, need to switch to reader mode */ if (mode_reader) return 1; mutt_socket_close(conn); nserv->status = NNTP_BYE; mutt_error(_("Server doesn't support reader mode.")); return -1; } char *OverviewFmt = "Subject:\0" "From:\0" "Date:\0" "Message-ID:\0" "References:\0" "Content-Length:\0" "Lines:\0" "\0"; /** * nntp_attempt_features - Detect supported commands * @param nserv NNTP server * @retval 0 Success * @retval -1 Failure */ static int nntp_attempt_features(struct NntpServer *nserv) { struct Connection *conn = nserv->conn; char buf[LONG_STRING]; /* no CAPABILITIES, trying DATE, LISTGROUP, LIST NEWSGROUPS */ if (!nserv->hasCAPABILITIES) { if (mutt_socket_send(conn, "DATE\r\n") < 0 || mutt_socket_readln(buf, sizeof(buf), conn) < 0) { return nntp_connect_error(nserv); } if (mutt_str_strncmp("500", buf, 3) != 0) nserv->hasDATE = true; if (mutt_socket_send(conn, "LISTGROUP\r\n") < 0 || mutt_socket_readln(buf, sizeof(buf), conn) < 0) { return nntp_connect_error(nserv); } if (mutt_str_strncmp("500", buf, 3) != 0) nserv->hasLISTGROUP = true; if (mutt_socket_send(conn, "LIST NEWSGROUPS +\r\n") < 0 || mutt_socket_readln(buf, sizeof(buf), conn) < 0) { return nntp_connect_error(nserv); } if (mutt_str_strncmp("500", buf, 3) != 0) nserv->hasLIST_NEWSGROUPS = true; if (mutt_str_strncmp("215", buf, 3) == 0) { do { if (mutt_socket_readln(buf, sizeof(buf), conn) < 0) return nntp_connect_error(nserv); } while (mutt_str_strcmp(".", buf) != 0); } } /* no LIST NEWSGROUPS, trying XGTITLE */ if (!nserv->hasLIST_NEWSGROUPS) { if (mutt_socket_send(conn, "XGTITLE\r\n") < 0 || mutt_socket_readln(buf, sizeof(buf), conn) < 0) { return nntp_connect_error(nserv); } if (mutt_str_strncmp("500", buf, 3) != 0) nserv->hasXGTITLE = true; } /* no OVER, trying XOVER */ if (!nserv->hasOVER) { if (mutt_socket_send(conn, "XOVER\r\n") < 0 || mutt_socket_readln(buf, sizeof(buf), conn) < 0) { return nntp_connect_error(nserv); } if (mutt_str_strncmp("500", buf, 3) != 0) nserv->hasXOVER = true; } /* trying LIST OVERVIEW.FMT */ if (nserv->hasOVER || nserv->hasXOVER) { if (mutt_socket_send(conn, "LIST OVERVIEW.FMT\r\n") < 0 || mutt_socket_readln(buf, sizeof(buf), conn) < 0) { return nntp_connect_error(nserv); } if (mutt_str_strncmp("215", buf, 3) != 0) nserv->overview_fmt = OverviewFmt; else { int cont = 0; size_t buflen = 2 * LONG_STRING, off = 0, b = 0; if (nserv->overview_fmt) FREE(&nserv->overview_fmt); nserv->overview_fmt = mutt_mem_malloc(buflen); while (true) { if (buflen - off < LONG_STRING) { buflen *= 2; mutt_mem_realloc(&nserv->overview_fmt, buflen); } const int chunk = mutt_socket_readln(nserv->overview_fmt + off, buflen - off, conn); if (chunk < 0) { FREE(&nserv->overview_fmt); return nntp_connect_error(nserv); } if (!cont && (mutt_str_strcmp(".", nserv->overview_fmt + off) == 0)) break; cont = chunk >= buflen - off ? 1 : 0; off += strlen(nserv->overview_fmt + off); if (!cont) { char *colon = NULL; if (nserv->overview_fmt[b] == ':') { memmove(nserv->overview_fmt + b, nserv->overview_fmt + b + 1, off - b - 1); nserv->overview_fmt[off - 1] = ':'; } colon = strchr(nserv->overview_fmt + b, ':'); if (!colon) nserv->overview_fmt[off++] = ':'; else if (strcmp(colon + 1, "full") != 0) off = colon + 1 - nserv->overview_fmt; if (strcasecmp(nserv->overview_fmt + b, "Bytes:") == 0) { size_t len = strlen(nserv->overview_fmt + b); mutt_str_strfcpy(nserv->overview_fmt + b, "Content-Length:", len + 1); off = b + len; } nserv->overview_fmt[off++] = '\0'; b = off; } } nserv->overview_fmt[off++] = '\0'; mutt_mem_realloc(&nserv->overview_fmt, off); } } return 0; } /** * nntp_auth - Get login, password and authenticate * @param nserv NNTP server * @retval 0 Success * @retval -1 Failure */ static int nntp_auth(struct NntpServer *nserv) { struct Connection *conn = nserv->conn; char buf[LONG_STRING]; char authenticators[LONG_STRING] = "USER"; char *method = NULL, *a = NULL, *p = NULL; unsigned char flags = conn->account.flags; while (true) { /* get login and password */ if ((mutt_account_getuser(&conn->account) < 0) || (conn->account.user[0] == '\0') || (mutt_account_getpass(&conn->account) < 0) || (conn->account.pass[0] == '\0')) { break; } /* get list of authenticators */ if (NntpAuthenticators && *NntpAuthenticators) mutt_str_strfcpy(authenticators, NntpAuthenticators, sizeof(authenticators)); else if (nserv->hasCAPABILITIES) { mutt_str_strfcpy(authenticators, NONULL(nserv->authenticators), sizeof(authenticators)); p = authenticators; while (*p) { if (*p == ' ') *p = ':'; p++; } } p = authenticators; while (*p) { *p = toupper(*p); p++; } mutt_debug(1, "available methods: %s\n", nserv->authenticators); a = authenticators; while (true) { if (!a) { mutt_error(_("No authenticators available")); break; } method = a; a = strchr(a, ':'); if (a) *a++ = '\0'; /* check authenticator */ if (nserv->hasCAPABILITIES) { char *m = NULL; if (!nserv->authenticators) continue; m = strcasestr(nserv->authenticators, method); if (!m) continue; if (m > nserv->authenticators && *(m - 1) != ' ') continue; m += strlen(method); if (*m != '\0' && *m != ' ') continue; } mutt_debug(1, "trying method %s\n", method); /* AUTHINFO USER authentication */ if (strcmp(method, "USER") == 0) { mutt_message(_("Authenticating (%s)..."), method); snprintf(buf, sizeof(buf), "AUTHINFO USER %s\r\n", conn->account.user); if (mutt_socket_send(conn, buf) < 0 || mutt_socket_readln(buf, sizeof(buf), conn) < 0) { break; } /* authenticated, password is not required */ if (mutt_str_strncmp("281", buf, 3) == 0) return 0; /* username accepted, sending password */ if (mutt_str_strncmp("381", buf, 3) == 0) { if (DebugLevel < MUTT_SOCK_LOG_FULL) mutt_debug(MUTT_SOCK_LOG_CMD, "%d> AUTHINFO PASS *\n", conn->fd); snprintf(buf, sizeof(buf), "AUTHINFO PASS %s\r\n", conn->account.pass); if (mutt_socket_send_d(conn, buf, MUTT_SOCK_LOG_FULL) < 0 || mutt_socket_readln(buf, sizeof(buf), conn) < 0) { break; } /* authenticated */ if (mutt_str_strncmp("281", buf, 3) == 0) return 0; } /* server doesn't support AUTHINFO USER, trying next method */ if (*buf == '5') continue; } else { #ifdef USE_SASL sasl_conn_t *saslconn = NULL; sasl_interact_t *interaction = NULL; int rc; char inbuf[LONG_STRING] = ""; const char *mech = NULL; const char *client_out = NULL; unsigned int client_len, len; if (mutt_sasl_client_new(conn, &saslconn) < 0) { mutt_debug(1, "error allocating SASL connection.\n"); continue; } while (true) { rc = sasl_client_start(saslconn, method, &interaction, &client_out, &client_len, &mech); if (rc != SASL_INTERACT) break; mutt_sasl_interact(interaction); } if (rc != SASL_OK && rc != SASL_CONTINUE) { sasl_dispose(&saslconn); mutt_debug(1, "error starting SASL authentication exchange.\n"); continue; } mutt_message(_("Authenticating (%s)..."), method); snprintf(buf, sizeof(buf), "AUTHINFO SASL %s", method); /* looping protocol */ while (rc == SASL_CONTINUE || (rc == SASL_OK && client_len)) { /* send out client response */ if (client_len) { if (DebugLevel >= MUTT_SOCK_LOG_FULL) { char tmp[LONG_STRING]; memcpy(tmp, client_out, client_len); for (p = tmp; p < tmp + client_len; p++) { if (*p == '\0') *p = '.'; } *p = '\0'; mutt_debug(1, "SASL> %s\n", tmp); } if (*buf) mutt_str_strcat(buf, sizeof(buf), " "); len = strlen(buf); if (sasl_encode64(client_out, client_len, buf + len, sizeof(buf) - len, &len) != SASL_OK) { mutt_debug(1, "error base64-encoding client response.\n"); break; } } mutt_str_strcat(buf, sizeof(buf), "\r\n"); if (DebugLevel < MUTT_SOCK_LOG_FULL) { if (strchr(buf, ' ')) { mutt_debug(MUTT_SOCK_LOG_CMD, "%d> AUTHINFO SASL %s%s\n", conn->fd, method, client_len ? " sasl_data" : ""); } else mutt_debug(MUTT_SOCK_LOG_CMD, "%d> sasl_data\n", conn->fd); } client_len = 0; if (mutt_socket_send_d(conn, buf, MUTT_SOCK_LOG_FULL) < 0 || mutt_socket_readln_d(inbuf, sizeof(inbuf), conn, MUTT_SOCK_LOG_FULL) < 0) { break; } if ((mutt_str_strncmp(inbuf, "283 ", 4) != 0) && (mutt_str_strncmp(inbuf, "383 ", 4) != 0)) { if (DebugLevel < MUTT_SOCK_LOG_FULL) mutt_debug(MUTT_SOCK_LOG_CMD, "%d< %s\n", conn->fd, inbuf); break; } if (DebugLevel < MUTT_SOCK_LOG_FULL) { inbuf[3] = '\0'; mutt_debug(MUTT_SOCK_LOG_CMD, "%d< %s sasl_data\n", conn->fd, inbuf); } if (strcmp("=", inbuf + 4) == 0) len = 0; else if (sasl_decode64(inbuf + 4, strlen(inbuf + 4), buf, sizeof(buf) - 1, &len) != SASL_OK) { mutt_debug(1, "error base64-decoding server response.\n"); break; } else if (DebugLevel >= MUTT_SOCK_LOG_FULL) { char tmp[LONG_STRING]; memcpy(tmp, buf, len); for (p = tmp; p < tmp + len; p++) { if (*p == '\0') *p = '.'; } *p = '\0'; mutt_debug(1, "SASL< %s\n", tmp); } while (true) { rc = sasl_client_step(saslconn, buf, len, &interaction, &client_out, &client_len); if (rc != SASL_INTERACT) break; mutt_sasl_interact(interaction); } if (*inbuf != '3') break; *buf = '\0'; } /* looping protocol */ if (rc == SASL_OK && client_len == 0 && *inbuf == '2') { mutt_sasl_setup_conn(conn, saslconn); return 0; } /* terminate SASL session */ sasl_dispose(&saslconn); if (conn->fd < 0) break; if (mutt_str_strncmp(inbuf, "383 ", 4) == 0) { if (mutt_socket_send(conn, "*\r\n") < 0 || mutt_socket_readln(inbuf, sizeof(inbuf), conn) < 0) { break; } } /* server doesn't support AUTHINFO SASL, trying next method */ if (*inbuf == '5') continue; #else continue; #endif /* USE_SASL */ } mutt_error(_("%s authentication failed."), method); break; } break; } /* error */ nserv->status = NNTP_BYE; conn->account.flags = flags; if (conn->fd < 0) { mutt_error(_("Server closed connection!")); } else mutt_socket_close(conn); return -1; } /** * nntp_open_connection - Connect to server, authenticate and get capabilities * @param nserv NNTP server * @retval 0 Success * @retval -1 Failure */ int nntp_open_connection(struct NntpServer *nserv) { struct Connection *conn = nserv->conn; char buf[STRING]; int cap; bool posting = false, auth = true; if (nserv->status == NNTP_OK) return 0; if (nserv->status == NNTP_BYE) return -1; nserv->status = NNTP_NONE; if (mutt_socket_open(conn) < 0) return -1; if (mutt_socket_readln(buf, sizeof(buf), conn) < 0) return nntp_connect_error(nserv); if (mutt_str_strncmp("200", buf, 3) == 0) posting = true; else if (mutt_str_strncmp("201", buf, 3) != 0) { mutt_socket_close(conn); mutt_str_remove_trailing_ws(buf); mutt_error("%s", buf); return -1; } /* get initial capabilities */ cap = nntp_capabilities(nserv); if (cap < 0) return -1; /* tell news server to switch to mode reader if it isn't so */ if (cap > 0) { if (mutt_socket_send(conn, "MODE READER\r\n") < 0 || mutt_socket_readln(buf, sizeof(buf), conn) < 0) { return nntp_connect_error(nserv); } if (mutt_str_strncmp("200", buf, 3) == 0) posting = true; else if (mutt_str_strncmp("201", buf, 3) == 0) posting = false; /* error if has capabilities, ignore result if no capabilities */ else if (nserv->hasCAPABILITIES) { mutt_socket_close(conn); mutt_error(_("Could not switch to reader mode.")); return -1; } /* recheck capabilities after MODE READER */ if (nserv->hasCAPABILITIES) { cap = nntp_capabilities(nserv); if (cap < 0) return -1; } } mutt_message(_("Connected to %s. %s"), conn->account.host, posting ? _("Posting is ok.") : _("Posting is NOT ok.")); mutt_sleep(1); #ifdef USE_SSL /* Attempt STARTTLS if available and desired. */ if (nserv->use_tls != 1 && (nserv->hasSTARTTLS || SslForceTls)) { if (nserv->use_tls == 0) { nserv->use_tls = SslForceTls || query_quadoption(SslStarttls, _("Secure connection with TLS?")) == MUTT_YES ? 2 : 1; } if (nserv->use_tls == 2) { if (mutt_socket_send(conn, "STARTTLS\r\n") < 0 || mutt_socket_readln(buf, sizeof(buf), conn) < 0) { return nntp_connect_error(nserv); } if (mutt_str_strncmp("382", buf, 3) != 0) { nserv->use_tls = 0; mutt_error("STARTTLS: %s", buf); } else if (mutt_ssl_starttls(conn)) { nserv->use_tls = 0; nserv->status = NNTP_NONE; mutt_socket_close(nserv->conn); mutt_error(_("Could not negotiate TLS connection")); return -1; } else { /* recheck capabilities after STARTTLS */ cap = nntp_capabilities(nserv); if (cap < 0) return -1; } } } #endif /* authentication required? */ if (conn->account.flags & MUTT_ACCT_USER) { if (!conn->account.user[0]) auth = false; } else { if (mutt_socket_send(conn, "STAT\r\n") < 0 || mutt_socket_readln(buf, sizeof(buf), conn) < 0) { return nntp_connect_error(nserv); } if (mutt_str_strncmp("480", buf, 3) != 0) auth = false; } /* authenticate */ if (auth && nntp_auth(nserv) < 0) return -1; /* get final capabilities after authentication */ if (nserv->hasCAPABILITIES && (auth || cap > 0)) { cap = nntp_capabilities(nserv); if (cap < 0) return -1; if (cap > 0) { mutt_socket_close(conn); mutt_error(_("Could not switch to reader mode.")); return -1; } } /* attempt features */ if (nntp_attempt_features(nserv) < 0) return -1; nserv->status = NNTP_OK; return 0; } /** * nntp_query - Send data from buffer and receive answer to same buffer * @param nntp_data NNTP server data * @param line Buffer containing data * @param linelen Length of buffer * @retval 0 Success * @retval -1 Failure */ static int nntp_query(struct NntpData *nntp_data, char *line, size_t linelen) { struct NntpServer *nserv = nntp_data->nserv; char buf[LONG_STRING] = { 0 }; if (nserv->status == NNTP_BYE) return -1; while (true) { if (nserv->status == NNTP_OK) { int rc = 0; if (*line) rc = mutt_socket_send(nserv->conn, line); else if (nntp_data->group) { snprintf(buf, sizeof(buf), "GROUP %s\r\n", nntp_data->group); rc = mutt_socket_send(nserv->conn, buf); } if (rc >= 0) rc = mutt_socket_readln(buf, sizeof(buf), nserv->conn); if (rc >= 0) break; } /* reconnect */ while (true) { nserv->status = NNTP_NONE; if (nntp_open_connection(nserv) == 0) break; snprintf(buf, sizeof(buf), _("Connection to %s lost. Reconnect?"), nserv->conn->account.host); if (mutt_yesorno(buf, MUTT_YES) != MUTT_YES) { nserv->status = NNTP_BYE; return -1; } } /* select newsgroup after reconnection */ if (nntp_data->group) { snprintf(buf, sizeof(buf), "GROUP %s\r\n", nntp_data->group); if (mutt_socket_send(nserv->conn, buf) < 0 || mutt_socket_readln(buf, sizeof(buf), nserv->conn) < 0) { return nntp_connect_error(nserv); } } if (!*line) break; } mutt_str_strfcpy(line, buf, linelen); return 0; } /** * nntp_fetch_lines - Read lines, calling a callback function for each * @param nntp_data NNTP server data * @param query Query to match * @param qlen Length of query * @param msg Progess message (OPTIONAL) * @param funct Callback function * @param data Data for callback function * @retval 0 Success * @retval 1 Bad response (answer in query buffer) * @retval -1 Connection lost * @retval -2 Error in funct(*line, *data) * * This function calls funct(*line, *data) for each received line, * funct(NULL, *data) if rewind(*data) needs, exits when fail or done: */ static int nntp_fetch_lines(struct NntpData *nntp_data, char *query, size_t qlen, const char *msg, int (*funct)(char *, void *), void *data) { int done = false; int rc; while (!done) { char buf[LONG_STRING]; char *line = NULL; unsigned int lines = 0; size_t off = 0; struct Progress progress; if (msg) mutt_progress_init(&progress, msg, MUTT_PROGRESS_MSG, ReadInc, 0); mutt_str_strfcpy(buf, query, sizeof(buf)); if (nntp_query(nntp_data, buf, sizeof(buf)) < 0) return -1; if (buf[0] != '2') { mutt_str_strfcpy(query, buf, qlen); return 1; } line = mutt_mem_malloc(sizeof(buf)); rc = 0; while (true) { char *p = NULL; int chunk = mutt_socket_readln_d(buf, sizeof(buf), nntp_data->nserv->conn, MUTT_SOCK_LOG_HDR); if (chunk < 0) { nntp_data->nserv->status = NNTP_NONE; break; } p = buf; if (!off && buf[0] == '.') { if (buf[1] == '\0') { done = true; break; } if (buf[1] == '.') p++; } mutt_str_strfcpy(line + off, p, sizeof(buf)); if (chunk >= sizeof(buf)) off += strlen(p); else { if (msg) mutt_progress_update(&progress, ++lines, -1); if (rc == 0 && funct(line, data) < 0) rc = -2; off = 0; } mutt_mem_realloc(&line, off + sizeof(buf)); } FREE(&line); funct(NULL, data); } return rc; } /** * fetch_description - Parse newsgroup description * @param line String to parse * @param data NNTP Server * @retval 0 Always */ static int fetch_description(char *line, void *data) { struct NntpServer *nserv = data; struct NntpData *nntp_data = NULL; char *desc = NULL; if (!line) return 0; desc = strpbrk(line, " \t"); if (desc) { *desc++ = '\0'; desc += strspn(desc, " \t"); } else desc = strchr(line, '\0'); nntp_data = mutt_hash_find(nserv->groups_hash, line); if (nntp_data && (mutt_str_strcmp(desc, nntp_data->desc) != 0)) { mutt_str_replace(&nntp_data->desc, desc); mutt_debug(2, "group: %s, desc: %s\n", line, desc); } return 0; } /** * get_description - Fetch newsgroups descriptions * @param nntp_data NNTP data * @param wildmat String to match * @param msg Progress message * @retval 0 Success * @retval 1 Bad response (answer in query buffer) * @retval -1 Connection lost * @retval -2 Error */ static int get_description(struct NntpData *nntp_data, char *wildmat, char *msg) { char buf[STRING]; char *cmd = NULL; /* get newsgroup description, if possible */ struct NntpServer *nserv = nntp_data->nserv; if (!wildmat) wildmat = nntp_data->group; if (nserv->hasLIST_NEWSGROUPS) cmd = "LIST NEWSGROUPS"; else if (nserv->hasXGTITLE) cmd = "XGTITLE"; else return 0; snprintf(buf, sizeof(buf), "%s %s\r\n", cmd, wildmat); int rc = nntp_fetch_lines(nntp_data, buf, sizeof(buf), msg, fetch_description, nserv); if (rc > 0) { mutt_error("%s: %s", cmd, buf); } return rc; } /** * nntp_parse_xref - Parse cross-reference * @param ctx Mailbox * @param hdr Email header * * Update read flag and set article number if empty */ static void nntp_parse_xref(struct Context *ctx, struct Header *hdr) { struct NntpData *nntp_data = ctx->data; char *buf = mutt_str_strdup(hdr->env->xref); char *p = buf; while (p) { anum_t anum; /* skip to next word */ p += strspn(p, " \t"); char *grp = p; /* skip to end of word */ p = strpbrk(p, " \t"); if (p) *p++ = '\0'; /* find colon */ char *colon = strchr(grp, ':'); if (!colon) continue; *colon++ = '\0'; if (sscanf(colon, ANUM, &anum) != 1) continue; nntp_article_status(ctx, hdr, grp, anum); if (!NHDR(hdr)->article_num && (mutt_str_strcmp(nntp_data->group, grp) == 0)) NHDR(hdr)->article_num = anum; } FREE(&buf); } /** * fetch_tempfile - Write line to temporary file * @param line Text to write * @param data FILE pointer * @retval 0 Success * @retval -1 Failure */ static int fetch_tempfile(char *line, void *data) { FILE *fp = data; if (!line) rewind(fp); else if (fputs(line, fp) == EOF || fputc('\n', fp) == EOF) return -1; return 0; } /** * struct FetchCtx - Keep track when getting data from a server */ struct FetchCtx { struct Context *ctx; anum_t first; anum_t last; int restore; unsigned char *messages; struct Progress progress; #ifdef USE_HCACHE header_cache_t *hc; #endif }; /** * fetch_numbers - Parse article number * @param line Article number * @param data FetchCtx * @retval 0 Always */ static int fetch_numbers(char *line, void *data) { struct FetchCtx *fc = data; anum_t anum; if (!line) return 0; if (sscanf(line, ANUM, &anum) != 1) return 0; if (anum < fc->first || anum > fc->last) return 0; fc->messages[anum - fc->first] = 1; return 0; } /** * parse_overview_line - Parse overview line * @param line String to parse * @param data FetchCtx * @retval 0 Success * @retval -1 Failure */ static int parse_overview_line(char *line, void *data) { struct FetchCtx *fc = data; struct Context *ctx = fc->ctx; struct NntpData *nntp_data = ctx->data; struct Header *hdr = NULL; char *header = NULL, *field = NULL; bool save = true; anum_t anum; if (!line) return 0; /* parse article number */ field = strchr(line, '\t'); if (field) *field++ = '\0'; if (sscanf(line, ANUM, &anum) != 1) return 0; mutt_debug(2, "" ANUM "\n", anum); /* out of bounds */ if (anum < fc->first || anum > fc->last) return 0; /* not in LISTGROUP */ if (!fc->messages[anum - fc->first]) { /* progress */ if (!ctx->quiet) mutt_progress_update(&fc->progress, anum - fc->first + 1, -1); return 0; } /* convert overview line to header */ FILE *fp = mutt_file_mkstemp(); if (!fp) return -1; header = nntp_data->nserv->overview_fmt; while (field) { char *b = field; if (*header) { if (strstr(header, ":full") == NULL && fputs(header, fp) == EOF) { mutt_file_fclose(&fp); return -1; } header = strchr(header, '\0') + 1; } field = strchr(field, '\t'); if (field) *field++ = '\0'; if (fputs(b, fp) == EOF || fputc('\n', fp) == EOF) { mutt_file_fclose(&fp); return -1; } } rewind(fp); /* allocate memory for headers */ if (ctx->msgcount >= ctx->hdrmax) mx_alloc_memory(ctx); /* parse header */ hdr = ctx->hdrs[ctx->msgcount] = mutt_header_new(); hdr->env = mutt_rfc822_read_header(fp, hdr, 0, 0); hdr->env->newsgroups = mutt_str_strdup(nntp_data->group); hdr->received = hdr->date_sent; mutt_file_fclose(&fp); #ifdef USE_HCACHE if (fc->hc) { char buf[16]; /* try to replace with header from cache */ snprintf(buf, sizeof(buf), "%u", anum); void *hdata = mutt_hcache_fetch(fc->hc, buf, strlen(buf)); if (hdata) { mutt_debug(2, "mutt_hcache_fetch %s\n", buf); mutt_header_free(&hdr); ctx->hdrs[ctx->msgcount] = hdr = mutt_hcache_restore(hdata); mutt_hcache_free(fc->hc, &hdata); hdr->data = 0; hdr->read = false; hdr->old = false; /* skip header marked as deleted in cache */ if (hdr->deleted && !fc->restore) { if (nntp_data->bcache) { mutt_debug(2, "mutt_bcache_del %s\n", buf); mutt_bcache_del(nntp_data->bcache, buf); } save = false; } } /* not cached yet, store header */ else { mutt_debug(2, "mutt_hcache_store %s\n", buf); mutt_hcache_store(fc->hc, buf, strlen(buf), hdr, 0); } } #endif if (save) { hdr->index = ctx->msgcount++; hdr->read = false; hdr->old = false; hdr->deleted = false; hdr->data = mutt_mem_calloc(1, sizeof(struct NntpHeaderData)); NHDR(hdr)->article_num = anum; if (fc->restore) hdr->changed = true; else { nntp_article_status(ctx, hdr, NULL, anum); if (!hdr->read) nntp_parse_xref(ctx, hdr); } if (anum > nntp_data->last_loaded) nntp_data->last_loaded = anum; } else mutt_header_free(&hdr); /* progress */ if (!ctx->quiet) mutt_progress_update(&fc->progress, anum - fc->first + 1, -1); return 0; } /** * nntp_fetch_headers - Fetch headers * @param ctx Mailbox * @param hc Header cache * @param first Number of first header to fetch * @param last Number of last header to fetch * @param restore Restore message listed as deleted * @retval 0 Success * @retval -1 Failure */ static int nntp_fetch_headers(struct Context *ctx, void *hc, anum_t first, anum_t last, int restore) { struct NntpData *nntp_data = ctx->data; struct FetchCtx fc; struct Header *hdr = NULL; char buf[HUGE_STRING]; int rc = 0; int oldmsgcount = ctx->msgcount; anum_t current; anum_t first_over = first; #ifdef USE_HCACHE void *hdata = NULL; #endif /* if empty group or nothing to do */ if (!last || first > last) return 0; /* init fetch context */ fc.ctx = ctx; fc.first = first; fc.last = last; fc.restore = restore; fc.messages = mutt_mem_calloc(last - first + 1, sizeof(unsigned char)); if (fc.messages == NULL) return -1; #ifdef USE_HCACHE fc.hc = hc; #endif /* fetch list of articles */ if (NntpListgroup && nntp_data->nserv->hasLISTGROUP && !nntp_data->deleted) { if (!ctx->quiet) mutt_message(_("Fetching list of articles...")); if (nntp_data->nserv->hasLISTGROUPrange) snprintf(buf, sizeof(buf), "LISTGROUP %s %u-%u\r\n", nntp_data->group, first, last); else snprintf(buf, sizeof(buf), "LISTGROUP %s\r\n", nntp_data->group); rc = nntp_fetch_lines(nntp_data, buf, sizeof(buf), NULL, fetch_numbers, &fc); if (rc > 0) { mutt_error("LISTGROUP: %s", buf); } if (rc == 0) { for (current = first; current <= last && rc == 0; current++) { if (fc.messages[current - first]) continue; snprintf(buf, sizeof(buf), "%u", current); if (nntp_data->bcache) { mutt_debug(2, "#1 mutt_bcache_del %s\n", buf); mutt_bcache_del(nntp_data->bcache, buf); } #ifdef USE_HCACHE if (fc.hc) { mutt_debug(2, "mutt_hcache_delete %s\n", buf); mutt_hcache_delete(fc.hc, buf, strlen(buf)); } #endif } } } else { for (current = first; current <= last; current++) fc.messages[current - first] = 1; } /* fetching header from cache or server, or fallback to fetch overview */ if (!ctx->quiet) { mutt_progress_init(&fc.progress, _("Fetching message headers..."), MUTT_PROGRESS_MSG, ReadInc, last - first + 1); } for (current = first; current <= last && rc == 0; current++) { if (!ctx->quiet) mutt_progress_update(&fc.progress, current - first + 1, -1); #ifdef USE_HCACHE snprintf(buf, sizeof(buf), "%u", current); #endif /* delete header from cache that does not exist on server */ if (!fc.messages[current - first]) continue; /* allocate memory for headers */ if (ctx->msgcount >= ctx->hdrmax) mx_alloc_memory(ctx); #ifdef USE_HCACHE /* try to fetch header from cache */ hdata = mutt_hcache_fetch(fc.hc, buf, strlen(buf)); if (hdata) { mutt_debug(2, "mutt_hcache_fetch %s\n", buf); ctx->hdrs[ctx->msgcount] = hdr = mutt_hcache_restore(hdata); mutt_hcache_free(fc.hc, &hdata); hdr->data = 0; /* skip header marked as deleted in cache */ if (hdr->deleted && !restore) { mutt_header_free(&hdr); if (nntp_data->bcache) { mutt_debug(2, "#2 mutt_bcache_del %s\n", buf); mutt_bcache_del(nntp_data->bcache, buf); } continue; } hdr->read = false; hdr->old = false; } else #endif /* don't try to fetch header from removed newsgroup */ if (nntp_data->deleted) continue; /* fallback to fetch overview */ else if (nntp_data->nserv->hasOVER || nntp_data->nserv->hasXOVER) { if (NntpListgroup && nntp_data->nserv->hasLISTGROUP) break; else continue; } /* fetch header from server */ else { FILE *fp = mutt_file_mkstemp(); if (!fp) { mutt_perror("mutt_file_mkstemp() failed!"); rc = -1; break; } snprintf(buf, sizeof(buf), "HEAD %u\r\n", current); rc = nntp_fetch_lines(nntp_data, buf, sizeof(buf), NULL, fetch_tempfile, fp); if (rc) { mutt_file_fclose(&fp); if (rc < 0) break; /* invalid response */ if (mutt_str_strncmp("423", buf, 3) != 0) { mutt_error("HEAD: %s", buf); break; } /* no such article */ if (nntp_data->bcache) { snprintf(buf, sizeof(buf), "%u", current); mutt_debug(2, "#3 mutt_bcache_del %s\n", buf); mutt_bcache_del(nntp_data->bcache, buf); } rc = 0; continue; } /* parse header */ hdr = ctx->hdrs[ctx->msgcount] = mutt_header_new(); hdr->env = mutt_rfc822_read_header(fp, hdr, 0, 0); hdr->received = hdr->date_sent; mutt_file_fclose(&fp); } /* save header in context */ hdr->index = ctx->msgcount++; hdr->read = false; hdr->old = false; hdr->deleted = false; hdr->data = mutt_mem_calloc(1, sizeof(struct NntpHeaderData)); NHDR(hdr)->article_num = current; if (restore) hdr->changed = true; else { nntp_article_status(ctx, hdr, NULL, NHDR(hdr)->article_num); if (!hdr->read) nntp_parse_xref(ctx, hdr); } if (current > nntp_data->last_loaded) nntp_data->last_loaded = current; first_over = current + 1; } if (!NntpListgroup || !nntp_data->nserv->hasLISTGROUP) current = first_over; /* fetch overview information */ if (current <= last && rc == 0 && !nntp_data->deleted) { char *cmd = nntp_data->nserv->hasOVER ? "OVER" : "XOVER"; snprintf(buf, sizeof(buf), "%s %u-%u\r\n", cmd, current, last); rc = nntp_fetch_lines(nntp_data, buf, sizeof(buf), NULL, parse_overview_line, &fc); if (rc > 0) { mutt_error("%s: %s", cmd, buf); } } if (ctx->msgcount > oldmsgcount) mx_update_context(ctx, ctx->msgcount - oldmsgcount); FREE(&fc.messages); if (rc != 0) return -1; mutt_clear_error(); return 0; } /** * nntp_mbox_open - Implements MxOps::mbox_open() */ static int nntp_mbox_open(struct Context *ctx) { struct NntpServer *nserv = NULL; struct NntpData *nntp_data = NULL; char buf[HUGE_STRING]; char server[LONG_STRING]; char *group = NULL; int rc; void *hc = NULL; anum_t first, last, count = 0; struct Url url; mutt_str_strfcpy(buf, ctx->path, sizeof(buf)); if (url_parse(&url, buf) < 0 || !url.host || !url.path || !(url.scheme == U_NNTP || url.scheme == U_NNTPS)) { url_free(&url); mutt_error(_("%s is an invalid newsgroup specification!"), ctx->path); return -1; } group = url.path; url.path = strchr(url.path, '\0'); url_tostring(&url, server, sizeof(server), 0); nserv = nntp_select_server(server, true); url_free(&url); if (!nserv) return -1; CurrentNewsSrv = nserv; /* find news group data structure */ nntp_data = mutt_hash_find(nserv->groups_hash, group); if (!nntp_data) { nntp_newsrc_close(nserv); mutt_error(_("Newsgroup %s not found on the server."), group); return -1; } mutt_bit_unset(ctx->rights, MUTT_ACL_INSERT); if (!nntp_data->newsrc_ent && !nntp_data->subscribed && !SaveUnsubscribed) ctx->readonly = true; /* select newsgroup */ mutt_message(_("Selecting %s..."), group); buf[0] = '\0'; if (nntp_query(nntp_data, buf, sizeof(buf)) < 0) { nntp_newsrc_close(nserv); return -1; } /* newsgroup not found, remove it */ if (mutt_str_strncmp("411", buf, 3) == 0) { mutt_error(_("Newsgroup %s has been removed from the server."), nntp_data->group); if (!nntp_data->deleted) { nntp_data->deleted = true; nntp_active_save_cache(nserv); } if (nntp_data->newsrc_ent && !nntp_data->subscribed && !SaveUnsubscribed) { FREE(&nntp_data->newsrc_ent); nntp_data->newsrc_len = 0; nntp_delete_group_cache(nntp_data); nntp_newsrc_update(nserv); } } /* parse newsgroup info */ else { if (sscanf(buf, "211 " ANUM " " ANUM " " ANUM, &count, &first, &last) != 3) { nntp_newsrc_close(nserv); mutt_error("GROUP: %s", buf); return -1; } nntp_data->first_message = first; nntp_data->last_message = last; nntp_data->deleted = false; /* get description if empty */ if (NntpLoadDescription && !nntp_data->desc) { if (get_description(nntp_data, NULL, NULL) < 0) { nntp_newsrc_close(nserv); return -1; } if (nntp_data->desc) nntp_active_save_cache(nserv); } } time(&nserv->check_time); ctx->data = nntp_data; if (!nntp_data->bcache && (nntp_data->newsrc_ent || nntp_data->subscribed || SaveUnsubscribed)) nntp_data->bcache = mutt_bcache_open(&nserv->conn->account, nntp_data->group); /* strip off extra articles if adding context is greater than $nntp_context */ first = nntp_data->first_message; if (NntpContext && nntp_data->last_message - first + 1 > NntpContext) first = nntp_data->last_message - NntpContext + 1; nntp_data->last_loaded = first ? first - 1 : 0; count = nntp_data->first_message; nntp_data->first_message = first; nntp_bcache_update(nntp_data); nntp_data->first_message = count; #ifdef USE_HCACHE hc = nntp_hcache_open(nntp_data); nntp_hcache_update(nntp_data, hc); #endif if (!hc) { mutt_bit_unset(ctx->rights, MUTT_ACL_WRITE); mutt_bit_unset(ctx->rights, MUTT_ACL_DELETE); } nntp_newsrc_close(nserv); rc = nntp_fetch_headers(ctx, hc, first, nntp_data->last_message, 0); #ifdef USE_HCACHE mutt_hcache_close(hc); #endif if (rc < 0) return -1; nntp_data->last_loaded = nntp_data->last_message; nserv->newsrc_modified = false; return 0; } /** * nntp_msg_open - Implements MxOps::msg_open() */ static int nntp_msg_open(struct Context *ctx, struct Message *msg, int msgno) { struct NntpData *nntp_data = ctx->data; struct Header *hdr = ctx->hdrs[msgno]; char article[16]; /* try to get article from cache */ struct NntpAcache *acache = &nntp_data->acache[hdr->index % NNTP_ACACHE_LEN]; if (acache->path) { if (acache->index == hdr->index) { msg->fp = mutt_file_fopen(acache->path, "r"); if (msg->fp) return 0; } /* clear previous entry */ else { unlink(acache->path); FREE(&acache->path); } } snprintf(article, sizeof(article), "%d", NHDR(hdr)->article_num); msg->fp = mutt_bcache_get(nntp_data->bcache, article); if (msg->fp) { if (NHDR(hdr)->parsed) return 0; } else { char buf[PATH_MAX]; /* don't try to fetch article from removed newsgroup */ if (nntp_data->deleted) return -1; /* create new cache file */ const char *fetch_msg = _("Fetching message..."); mutt_message(fetch_msg); msg->fp = mutt_bcache_put(nntp_data->bcache, article); if (!msg->fp) { mutt_mktemp(buf, sizeof(buf)); acache->path = mutt_str_strdup(buf); acache->index = hdr->index; msg->fp = mutt_file_fopen(acache->path, "w+"); if (!msg->fp) { mutt_perror(acache->path); unlink(acache->path); FREE(&acache->path); return -1; } } /* fetch message to cache file */ snprintf(buf, sizeof(buf), "ARTICLE %s\r\n", NHDR(hdr)->article_num ? article : hdr->env->message_id); const int rc = nntp_fetch_lines(nntp_data, buf, sizeof(buf), fetch_msg, fetch_tempfile, msg->fp); if (rc) { mutt_file_fclose(&msg->fp); if (acache->path) { unlink(acache->path); FREE(&acache->path); } if (rc > 0) { if (mutt_str_strncmp(NHDR(hdr)->article_num ? "423" : "430", buf, 3) == 0) { mutt_error(_("Article %d not found on the server."), NHDR(hdr)->article_num ? article : hdr->env->message_id); } else mutt_error("ARTICLE: %s", buf); } return -1; } if (!acache->path) mutt_bcache_commit(nntp_data->bcache, article); } /* replace envelope with new one * hash elements must be updated because pointers will be changed */ if (ctx->id_hash && hdr->env->message_id) mutt_hash_delete(ctx->id_hash, hdr->env->message_id, hdr); if (ctx->subj_hash && hdr->env->real_subj) mutt_hash_delete(ctx->subj_hash, hdr->env->real_subj, hdr); mutt_env_free(&hdr->env); hdr->env = mutt_rfc822_read_header(msg->fp, hdr, 0, 0); if (ctx->id_hash && hdr->env->message_id) mutt_hash_insert(ctx->id_hash, hdr->env->message_id, hdr); if (ctx->subj_hash && hdr->env->real_subj) mutt_hash_insert(ctx->subj_hash, hdr->env->real_subj, hdr); /* fix content length */ fseek(msg->fp, 0, SEEK_END); hdr->content->length = ftell(msg->fp) - hdr->content->offset; /* this is called in neomutt before the open which fetches the message, * which is probably wrong, but we just call it again here to handle * the problem instead of fixing it */ NHDR(hdr)->parsed = true; mutt_parse_mime_message(ctx, hdr); /* these would normally be updated in mx_update_context(), but the * full headers aren't parsed with overview, so the information wasn't * available then */ if (WithCrypto) hdr->security = crypt_query(hdr->content); rewind(msg->fp); mutt_clear_error(); return 0; } /** * nntp_msg_close - Implements MxOps::msg_close() * * @note May also return EOF Failure, see errno */ static int nntp_msg_close(struct Context *ctx, struct Message *msg) { return mutt_file_fclose(&msg->fp); } /** * nntp_post - Post article * @param msg Message to post * @retval 0 Success * @retval -1 Failure */ int nntp_post(const char *msg) { struct NntpData *nntp_data, nntp_tmp; char buf[LONG_STRING]; if (Context && Context->magic == MUTT_NNTP) nntp_data = Context->data; else { CurrentNewsSrv = nntp_select_server(NewsServer, false); if (!CurrentNewsSrv) return -1; nntp_data = &nntp_tmp; nntp_data->nserv = CurrentNewsSrv; nntp_data->group = NULL; } FILE *fp = mutt_file_fopen(msg, "r"); if (!fp) { mutt_perror(msg); return -1; } mutt_str_strfcpy(buf, "POST\r\n", sizeof(buf)); if (nntp_query(nntp_data, buf, sizeof(buf)) < 0) { mutt_file_fclose(&fp); return -1; } if (buf[0] != '3') { mutt_error(_("Can't post article: %s"), buf); mutt_file_fclose(&fp); return -1; } buf[0] = '.'; buf[1] = '\0'; while (fgets(buf + 1, sizeof(buf) - 2, fp)) { size_t len = strlen(buf); if (buf[len - 1] == '\n') { buf[len - 1] = '\r'; buf[len] = '\n'; len++; buf[len] = '\0'; } if (mutt_socket_send_d(nntp_data->nserv->conn, buf[1] == '.' ? buf : buf + 1, MUTT_SOCK_LOG_HDR) < 0) { mutt_file_fclose(&fp); return nntp_connect_error(nntp_data->nserv); } } mutt_file_fclose(&fp); if ((buf[strlen(buf) - 1] != '\n' && mutt_socket_send_d(nntp_data->nserv->conn, "\r\n", MUTT_SOCK_LOG_HDR) < 0) || mutt_socket_send_d(nntp_data->nserv->conn, ".\r\n", MUTT_SOCK_LOG_HDR) < 0 || mutt_socket_readln(buf, sizeof(buf), nntp_data->nserv->conn) < 0) { return nntp_connect_error(nntp_data->nserv); } if (buf[0] != '2') { mutt_error(_("Can't post article: %s"), buf); return -1; } return 0; } /** * nntp_group_poll - Check newsgroup for new articles * @param nntp_data NNTP server data * @param update_stat Update the stats? * @retval 1 New articles found * @retval 0 No change * @retval -1 Lost connection */ static int nntp_group_poll(struct NntpData *nntp_data, int update_stat) { char buf[LONG_STRING] = ""; anum_t count, first, last; /* use GROUP command to poll newsgroup */ if (nntp_query(nntp_data, buf, sizeof(buf)) < 0) return -1; if (sscanf(buf, "211 " ANUM " " ANUM " " ANUM, &count, &first, &last) != 3) return 0; if (first == nntp_data->first_message && last == nntp_data->last_message) return 0; /* articles have been renumbered */ if (last < nntp_data->last_message) { nntp_data->last_cached = 0; if (nntp_data->newsrc_len) { mutt_mem_realloc(&nntp_data->newsrc_ent, sizeof(struct NewsrcEntry)); nntp_data->newsrc_len = 1; nntp_data->newsrc_ent[0].first = 1; nntp_data->newsrc_ent[0].last = 0; } } nntp_data->first_message = first; nntp_data->last_message = last; if (!update_stat) return 1; /* update counters */ else if (!last || (!nntp_data->newsrc_ent && !nntp_data->last_cached)) nntp_data->unread = count; else nntp_group_unread_stat(nntp_data); return 1; } /** * check_mailbox - Check current newsgroup for new articles * @param ctx Mailbox * @retval #MUTT_REOPENED Articles have been renumbered or removed from server * @retval #MUTT_NEW_MAIL New articles found * @retval 0 No change * @retval -1 Lost connection * * Leave newsrc locked */ static int check_mailbox(struct Context *ctx) { struct NntpData *nntp_data = ctx->data; struct NntpServer *nserv = nntp_data->nserv; time_t now = time(NULL); int rc, ret = 0; void *hc = NULL; if (nserv->check_time + NntpPoll > now) return 0; mutt_message(_("Checking for new messages...")); if (nntp_newsrc_parse(nserv) < 0) return -1; nserv->check_time = now; rc = nntp_group_poll(nntp_data, 0); if (rc < 0) { nntp_newsrc_close(nserv); return -1; } if (rc) nntp_active_save_cache(nserv); /* articles have been renumbered, remove all headers */ if (nntp_data->last_message < nntp_data->last_loaded) { for (int i = 0; i < ctx->msgcount; i++) mutt_header_free(&ctx->hdrs[i]); ctx->msgcount = 0; ctx->tagged = 0; if (nntp_data->last_message < nntp_data->last_loaded) { nntp_data->last_loaded = nntp_data->first_message - 1; if (NntpContext && nntp_data->last_message - nntp_data->last_loaded > NntpContext) nntp_data->last_loaded = nntp_data->last_message - NntpContext; } ret = MUTT_REOPENED; } /* .newsrc has been externally modified */ if (nserv->newsrc_modified) { #ifdef USE_HCACHE unsigned char *messages = NULL; char buf[16]; void *hdata = NULL; struct Header *hdr = NULL; anum_t first = nntp_data->first_message; if (NntpContext && nntp_data->last_message - first + 1 > NntpContext) first = nntp_data->last_message - NntpContext + 1; messages = mutt_mem_calloc(nntp_data->last_loaded - first + 1, sizeof(unsigned char)); hc = nntp_hcache_open(nntp_data); nntp_hcache_update(nntp_data, hc); #endif /* update flags according to .newsrc */ int j = 0; anum_t anum; for (int i = 0; i < ctx->msgcount; i++) { bool flagged = false; anum = NHDR(ctx->hdrs[i])->article_num; #ifdef USE_HCACHE /* check hcache for flagged and deleted flags */ if (hc) { if (anum >= first && anum <= nntp_data->last_loaded) messages[anum - first] = 1; snprintf(buf, sizeof(buf), "%u", anum); hdata = mutt_hcache_fetch(hc, buf, strlen(buf)); if (hdata) { bool deleted; mutt_debug(2, "#1 mutt_hcache_fetch %s\n", buf); hdr = mutt_hcache_restore(hdata); mutt_hcache_free(hc, &hdata); hdr->data = 0; deleted = hdr->deleted; flagged = hdr->flagged; mutt_header_free(&hdr); /* header marked as deleted, removing from context */ if (deleted) { mutt_set_flag(ctx, ctx->hdrs[i], MUTT_TAG, 0); mutt_header_free(&ctx->hdrs[i]); continue; } } } #endif if (!ctx->hdrs[i]->changed) { ctx->hdrs[i]->flagged = flagged; ctx->hdrs[i]->read = false; ctx->hdrs[i]->old = false; nntp_article_status(ctx, ctx->hdrs[i], NULL, anum); if (!ctx->hdrs[i]->read) nntp_parse_xref(ctx, ctx->hdrs[i]); } ctx->hdrs[j++] = ctx->hdrs[i]; } #ifdef USE_HCACHE ctx->msgcount = j; /* restore headers without "deleted" flag */ for (anum = first; anum <= nntp_data->last_loaded; anum++) { if (messages[anum - first]) continue; snprintf(buf, sizeof(buf), "%u", anum); hdata = mutt_hcache_fetch(hc, buf, strlen(buf)); if (hdata) { mutt_debug(2, "#2 mutt_hcache_fetch %s\n", buf); if (ctx->msgcount >= ctx->hdrmax) mx_alloc_memory(ctx); ctx->hdrs[ctx->msgcount] = hdr = mutt_hcache_restore(hdata); mutt_hcache_free(hc, &hdata); hdr->data = 0; if (hdr->deleted) { mutt_header_free(&hdr); if (nntp_data->bcache) { mutt_debug(2, "mutt_bcache_del %s\n", buf); mutt_bcache_del(nntp_data->bcache, buf); } continue; } ctx->msgcount++; hdr->read = false; hdr->old = false; hdr->data = mutt_mem_calloc(1, sizeof(struct NntpHeaderData)); NHDR(hdr)->article_num = anum; nntp_article_status(ctx, hdr, NULL, anum); if (!hdr->read) nntp_parse_xref(ctx, hdr); } } FREE(&messages); #endif nserv->newsrc_modified = false; ret = MUTT_REOPENED; } /* some headers were removed, context must be updated */ if (ret == MUTT_REOPENED) { if (ctx->subj_hash) mutt_hash_destroy(&ctx->subj_hash); if (ctx->id_hash) mutt_hash_destroy(&ctx->id_hash); mutt_clear_threads(ctx); ctx->vcount = 0; ctx->deleted = 0; ctx->new = 0; ctx->unread = 0; ctx->flagged = 0; ctx->changed = false; ctx->id_hash = NULL; ctx->subj_hash = NULL; mx_update_context(ctx, ctx->msgcount); } /* fetch headers of new articles */ if (nntp_data->last_message > nntp_data->last_loaded) { int oldmsgcount = ctx->msgcount; bool quiet = ctx->quiet; ctx->quiet = true; #ifdef USE_HCACHE if (!hc) { hc = nntp_hcache_open(nntp_data); nntp_hcache_update(nntp_data, hc); } #endif rc = nntp_fetch_headers(ctx, hc, nntp_data->last_loaded + 1, nntp_data->last_message, 0); ctx->quiet = quiet; if (rc >= 0) nntp_data->last_loaded = nntp_data->last_message; if (ret == 0 && ctx->msgcount > oldmsgcount) ret = MUTT_NEW_MAIL; } #ifdef USE_HCACHE mutt_hcache_close(hc); #endif if (ret) nntp_newsrc_close(nserv); mutt_clear_error(); return ret; } /** * nntp_mbox_check - Implements MxOps::mbox_check() * @param ctx Mailbox * @param index_hint Current message (UNUSED) * @retval #MUTT_REOPENED Articles have been renumbered or removed from server * @retval #MUTT_NEW_MAIL New articles found * @retval 0 No change * @retval -1 Lost connection */ static int nntp_mbox_check(struct Context *ctx, int *index_hint) { int ret = check_mailbox(ctx); if (ret == 0) { struct NntpData *nntp_data = ctx->data; struct NntpServer *nserv = nntp_data->nserv; nntp_newsrc_close(nserv); } return ret; } /** * nntp_mbox_sync - Implements MxOps::mbox_sync() * * @note May also return values from check_mailbox() */ static int nntp_mbox_sync(struct Context *ctx, int *index_hint) { struct NntpData *nntp_data = ctx->data; int rc; #ifdef USE_HCACHE header_cache_t *hc = NULL; #endif /* check for new articles */ nntp_data->nserv->check_time = 0; rc = check_mailbox(ctx); if (rc) return rc; #ifdef USE_HCACHE nntp_data->last_cached = 0; hc = nntp_hcache_open(nntp_data); #endif for (int i = 0; i < ctx->msgcount; i++) { struct Header *hdr = ctx->hdrs[i]; char buf[16]; snprintf(buf, sizeof(buf), "%d", NHDR(hdr)->article_num); if (nntp_data->bcache && hdr->deleted) { mutt_debug(2, "mutt_bcache_del %s\n", buf); mutt_bcache_del(nntp_data->bcache, buf); } #ifdef USE_HCACHE if (hc && (hdr->changed || hdr->deleted)) { if (hdr->deleted && !hdr->read) nntp_data->unread--; mutt_debug(2, "mutt_hcache_store %s\n", buf); mutt_hcache_store(hc, buf, strlen(buf), hdr, 0); } #endif } #ifdef USE_HCACHE if (hc) { mutt_hcache_close(hc); nntp_data->last_cached = nntp_data->last_loaded; } #endif /* save .newsrc entries */ nntp_newsrc_gen_entries(ctx); nntp_newsrc_update(nntp_data->nserv); nntp_newsrc_close(nntp_data->nserv); return 0; } /** * nntp_mbox_close - Implements MxOps::mbox_close() * @retval 0 Always */ static int nntp_mbox_close(struct Context *ctx) { struct NntpData *nntp_data = ctx->data, *nntp_tmp = NULL; if (!nntp_data) return 0; nntp_data->unread = ctx->unread; nntp_acache_free(nntp_data); if (!nntp_data->nserv || !nntp_data->nserv->groups_hash || !nntp_data->group) return 0; nntp_tmp = mutt_hash_find(nntp_data->nserv->groups_hash, nntp_data->group); if (nntp_tmp == NULL || nntp_tmp != nntp_data) nntp_data_free(nntp_data); return 0; } /** * nntp_date - Get date and time from server * @param nserv NNTP server * @param now Server time * @retval 0 Success * @retval -1 Failure */ static int nntp_date(struct NntpServer *nserv, time_t *now) { if (nserv->hasDATE) { struct NntpData nntp_data; char buf[LONG_STRING]; struct tm tm; memset(&tm, 0, sizeof(tm)); nntp_data.nserv = nserv; nntp_data.group = NULL; mutt_str_strfcpy(buf, "DATE\r\n", sizeof(buf)); if (nntp_query(&nntp_data, buf, sizeof(buf)) < 0) return -1; if (sscanf(buf, "111 %4d%2d%2d%2d%2d%2d%*s", &tm.tm_year, &tm.tm_mon, &tm.tm_mday, &tm.tm_hour, &tm.tm_min, &tm.tm_sec) == 6) { tm.tm_year -= 1900; tm.tm_mon--; *now = timegm(&tm); if (*now >= 0) { mutt_debug(1, "server time is %lu\n", *now); return 0; } } } time(now); return 0; } /** * nntp_active_fetch - Fetch list of all newsgroups from server * @param nserv NNTP server * @param new Mark the groups as new * @retval 0 Success * @retval -1 Failure */ int nntp_active_fetch(struct NntpServer *nserv, bool new) { struct NntpData nntp_data; char msg[STRING]; char buf[LONG_STRING]; unsigned int i; int rc; snprintf(msg, sizeof(msg), _("Loading list of groups from server %s..."), nserv->conn->account.host); mutt_message(msg); if (nntp_date(nserv, &nserv->newgroups_time) < 0) return -1; nntp_data.nserv = nserv; nntp_data.group = NULL; i = nserv->groups_num; mutt_str_strfcpy(buf, "LIST\r\n", sizeof(buf)); rc = nntp_fetch_lines(&nntp_data, buf, sizeof(buf), msg, nntp_add_group, nserv); if (rc) { if (rc > 0) { mutt_error("LIST: %s", buf); } return -1; } if (new) { for (; i < nserv->groups_num; i++) { struct NntpData *data = nserv->groups_list[i]; data->new = true; } } for (i = 0; i < nserv->groups_num; i++) { struct NntpData *data = nserv->groups_list[i]; if (data && data->deleted && !data->newsrc_ent) { nntp_delete_group_cache(data); mutt_hash_delete(nserv->groups_hash, data->group, NULL); nserv->groups_list[i] = NULL; } } if (NntpLoadDescription) rc = get_description(&nntp_data, "*", _("Loading descriptions...")); nntp_active_save_cache(nserv); if (rc < 0) return -1; mutt_clear_error(); return 0; } /** * nntp_check_new_groups - Check for new groups/articles in subscribed groups * @param nserv NNTP server * @retval 1 New groups found * @retval 0 No new groups * @retval -1 Error */ int nntp_check_new_groups(struct NntpServer *nserv) { struct NntpData nntp_data; time_t now; struct tm *tm = NULL; char buf[LONG_STRING]; char *msg = _("Checking for new newsgroups..."); unsigned int i; int rc, update_active = false; if (!nserv || !nserv->newgroups_time) return -1; /* check subscribed newsgroups for new articles */ if (ShowNewNews) { mutt_message(_("Checking for new messages...")); for (i = 0; i < nserv->groups_num; i++) { struct NntpData *data = nserv->groups_list[i]; if (data && data->subscribed) { rc = nntp_group_poll(data, 1); if (rc < 0) return -1; if (rc > 0) update_active = true; } } /* select current newsgroup */ if (Context && Context->magic == MUTT_NNTP) { buf[0] = '\0'; if (nntp_query((struct NntpData *) Context->data, buf, sizeof(buf)) < 0) return -1; } } else if (nserv->newgroups_time) return 0; /* get list of new groups */ mutt_message(msg); if (nntp_date(nserv, &now) < 0) return -1; nntp_data.nserv = nserv; if (Context && Context->magic == MUTT_NNTP) nntp_data.group = ((struct NntpData *) Context->data)->group; else nntp_data.group = NULL; i = nserv->groups_num; tm = gmtime(&nserv->newgroups_time); snprintf(buf, sizeof(buf), "NEWGROUPS %02d%02d%02d %02d%02d%02d GMT\r\n", tm->tm_year % 100, tm->tm_mon + 1, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec); rc = nntp_fetch_lines(&nntp_data, buf, sizeof(buf), msg, nntp_add_group, nserv); if (rc) { if (rc > 0) { mutt_error("NEWGROUPS: %s", buf); } return -1; } /* new groups found */ rc = 0; if (nserv->groups_num != i) { int groups_num = i; nserv->newgroups_time = now; for (; i < nserv->groups_num; i++) { struct NntpData *data = nserv->groups_list[i]; data->new = true; } /* loading descriptions */ if (NntpLoadDescription) { unsigned int count = 0; struct Progress progress; mutt_progress_init(&progress, _("Loading descriptions..."), MUTT_PROGRESS_MSG, ReadInc, nserv->groups_num - i); for (i = groups_num; i < nserv->groups_num; i++) { struct NntpData *data = nserv->groups_list[i]; if (get_description(data, NULL, NULL) < 0) return -1; mutt_progress_update(&progress, ++count, -1); } } update_active = true; rc = 1; } if (update_active) nntp_active_save_cache(nserv); mutt_clear_error(); return rc; } /** * nntp_check_msgid - Fetch article by Message-ID * @param ctx Mailbox * @param msgid Message ID * @retval 0 Success * @retval 1 No such article * @retval -1 Error */ int nntp_check_msgid(struct Context *ctx, const char *msgid) { struct NntpData *nntp_data = ctx->data; char buf[LONG_STRING]; FILE *fp = mutt_file_mkstemp(); if (!fp) { mutt_perror("mutt_file_mkstemp() failed!"); return -1; } snprintf(buf, sizeof(buf), "HEAD %s\r\n", msgid); int rc = nntp_fetch_lines(nntp_data, buf, sizeof(buf), NULL, fetch_tempfile, fp); if (rc) { mutt_file_fclose(&fp); if (rc < 0) return -1; if (mutt_str_strncmp("430", buf, 3) == 0) return 1; mutt_error("HEAD: %s", buf); return -1; } /* parse header */ if (ctx->msgcount == ctx->hdrmax) mx_alloc_memory(ctx); struct Header *hdr = ctx->hdrs[ctx->msgcount] = mutt_header_new(); hdr->data = mutt_mem_calloc(1, sizeof(struct NntpHeaderData)); hdr->env = mutt_rfc822_read_header(fp, hdr, 0, 0); mutt_file_fclose(&fp); /* get article number */ if (hdr->env->xref) nntp_parse_xref(ctx, hdr); else { snprintf(buf, sizeof(buf), "STAT %s\r\n", msgid); if (nntp_query(nntp_data, buf, sizeof(buf)) < 0) { mutt_header_free(&hdr); return -1; } sscanf(buf + 4, ANUM, &NHDR(hdr)->article_num); } /* reset flags */ hdr->read = false; hdr->old = false; hdr->deleted = false; hdr->changed = true; hdr->received = hdr->date_sent; hdr->index = ctx->msgcount++; mx_update_context(ctx, 1); return 0; } /** * struct ChildCtx - Keep track of the children of an article */ struct ChildCtx { struct Context *ctx; unsigned int num; unsigned int max; anum_t *child; }; /** * fetch_children - Parse XPAT line * @param line String to parse * @param data ChildCtx * @retval 0 Always */ static int fetch_children(char *line, void *data) { struct ChildCtx *cc = data; anum_t anum; if (!line || sscanf(line, ANUM, &anum) != 1) return 0; for (unsigned int i = 0; i < cc->ctx->msgcount; i++) if (NHDR(cc->ctx->hdrs[i])->article_num == anum) return 0; if (cc->num >= cc->max) { cc->max *= 2; mutt_mem_realloc(&cc->child, sizeof(anum_t) * cc->max); } cc->child[cc->num++] = anum; return 0; } /** * nntp_check_children - Fetch children of article with the Message-ID * @param ctx Mailbox * @param msgid Message ID to find * @retval 0 Success * @retval -1 Failure */ int nntp_check_children(struct Context *ctx, const char *msgid) { struct NntpData *nntp_data = ctx->data; struct ChildCtx cc; char buf[STRING]; int rc; bool quiet; void *hc = NULL; if (!nntp_data || !nntp_data->nserv) return -1; if (nntp_data->first_message > nntp_data->last_loaded) return 0; /* init context */ cc.ctx = ctx; cc.num = 0; cc.max = 10; cc.child = mutt_mem_malloc(sizeof(anum_t) * cc.max); /* fetch numbers of child messages */ snprintf(buf, sizeof(buf), "XPAT References %u-%u *%s*\r\n", nntp_data->first_message, nntp_data->last_loaded, msgid); rc = nntp_fetch_lines(nntp_data, buf, sizeof(buf), NULL, fetch_children, &cc); if (rc) { FREE(&cc.child); if (rc > 0) { if (mutt_str_strncmp("500", buf, 3) != 0) mutt_error("XPAT: %s", buf); else { mutt_error(_("Unable to find child articles because server does not " "support XPAT command.")); } } return -1; } /* fetch all found messages */ quiet = ctx->quiet; ctx->quiet = true; #ifdef USE_HCACHE hc = nntp_hcache_open(nntp_data); #endif for (int i = 0; i < cc.num; i++) { rc = nntp_fetch_headers(ctx, hc, cc.child[i], cc.child[i], 1); if (rc < 0) break; } #ifdef USE_HCACHE mutt_hcache_close(hc); #endif ctx->quiet = quiet; FREE(&cc.child); return (rc < 0) ? -1 : 0; } // clang-format off /** * struct mx_nntp_ops - Mailbox callback functions for NNTP mailboxes */ struct MxOps mx_nntp_ops = { .mbox_open = nntp_mbox_open, .mbox_open_append = NULL, .mbox_check = nntp_mbox_check, .mbox_sync = nntp_mbox_sync, .mbox_close = nntp_mbox_close, .msg_open = nntp_msg_open, .msg_open_new = NULL, .msg_commit = NULL, .msg_close = nntp_msg_close, .tags_edit = NULL, .tags_commit = NULL, }; // clang-format on
./CrossVul/dataset_final_sorted/CWE-20/c/good_254_0
crossvul-cpp_data_bad_5649_0
/****************************************************************************** * * Back-end of the driver for virtual block devices. This portion of the * driver exports a 'unified' block-device interface that can be accessed * by any operating system that implements a compatible front end. A * reference front-end implementation can be found in: * drivers/block/xen-blkfront.c * * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Copyright (c) 2005, Christopher Clark * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <linux/spinlock.h> #include <linux/kthread.h> #include <linux/list.h> #include <linux/delay.h> #include <linux/freezer.h> #include <linux/bitmap.h> #include <xen/events.h> #include <xen/page.h> #include <xen/xen.h> #include <asm/xen/hypervisor.h> #include <asm/xen/hypercall.h> #include <xen/balloon.h> #include "common.h" /* * Maximum number of unused free pages to keep in the internal buffer. * Setting this to a value too low will reduce memory used in each backend, * but can have a performance penalty. * * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can * be set to a lower value that might degrade performance on some intensive * IO workloads. */ static int xen_blkif_max_buffer_pages = 1024; module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644); MODULE_PARM_DESC(max_buffer_pages, "Maximum number of free pages to keep in each block backend buffer"); /* * Maximum number of grants to map persistently in blkback. For maximum * performance this should be the total numbers of grants that can be used * to fill the ring, but since this might become too high, specially with * the use of indirect descriptors, we set it to a value that provides good * performance without using too much memory. * * When the list of persistent grants is full we clean it up using a LRU * algorithm. */ static int xen_blkif_max_pgrants = 1056; module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644); MODULE_PARM_DESC(max_persistent_grants, "Maximum number of grants to map persistently"); /* * The LRU mechanism to clean the lists of persistent grants needs to * be executed periodically. The time interval between consecutive executions * of the purge mechanism is set in ms. */ #define LRU_INTERVAL 100 /* * When the persistent grants list is full we will remove unused grants * from the list. The percent number of grants to be removed at each LRU * execution. */ #define LRU_PERCENT_CLEAN 5 /* Run-time switchable: /sys/module/blkback/parameters/ */ static unsigned int log_stats; module_param(log_stats, int, 0644); #define BLKBACK_INVALID_HANDLE (~0) /* Number of free pages to remove on each call to free_xenballooned_pages */ #define NUM_BATCH_FREE_PAGES 10 static inline int get_free_page(struct xen_blkif *blkif, struct page **page) { unsigned long flags; spin_lock_irqsave(&blkif->free_pages_lock, flags); if (list_empty(&blkif->free_pages)) { BUG_ON(blkif->free_pages_num != 0); spin_unlock_irqrestore(&blkif->free_pages_lock, flags); return alloc_xenballooned_pages(1, page, false); } BUG_ON(blkif->free_pages_num == 0); page[0] = list_first_entry(&blkif->free_pages, struct page, lru); list_del(&page[0]->lru); blkif->free_pages_num--; spin_unlock_irqrestore(&blkif->free_pages_lock, flags); return 0; } static inline void put_free_pages(struct xen_blkif *blkif, struct page **page, int num) { unsigned long flags; int i; spin_lock_irqsave(&blkif->free_pages_lock, flags); for (i = 0; i < num; i++) list_add(&page[i]->lru, &blkif->free_pages); blkif->free_pages_num += num; spin_unlock_irqrestore(&blkif->free_pages_lock, flags); } static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num) { /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */ struct page *page[NUM_BATCH_FREE_PAGES]; unsigned int num_pages = 0; unsigned long flags; spin_lock_irqsave(&blkif->free_pages_lock, flags); while (blkif->free_pages_num > num) { BUG_ON(list_empty(&blkif->free_pages)); page[num_pages] = list_first_entry(&blkif->free_pages, struct page, lru); list_del(&page[num_pages]->lru); blkif->free_pages_num--; if (++num_pages == NUM_BATCH_FREE_PAGES) { spin_unlock_irqrestore(&blkif->free_pages_lock, flags); free_xenballooned_pages(num_pages, page); spin_lock_irqsave(&blkif->free_pages_lock, flags); num_pages = 0; } } spin_unlock_irqrestore(&blkif->free_pages_lock, flags); if (num_pages != 0) free_xenballooned_pages(num_pages, page); } #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page))) static int do_block_io_op(struct xen_blkif *blkif); static int dispatch_rw_block_io(struct xen_blkif *blkif, struct blkif_request *req, struct pending_req *pending_req); static void make_response(struct xen_blkif *blkif, u64 id, unsigned short op, int st); #define foreach_grant_safe(pos, n, rbtree, node) \ for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \ (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \ &(pos)->node != NULL; \ (pos) = container_of(n, typeof(*(pos)), node), \ (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL) /* * We don't need locking around the persistent grant helpers * because blkback uses a single-thread for each backed, so we * can be sure that this functions will never be called recursively. * * The only exception to that is put_persistent_grant, that can be called * from interrupt context (by xen_blkbk_unmap), so we have to use atomic * bit operations to modify the flags of a persistent grant and to count * the number of used grants. */ static int add_persistent_gnt(struct xen_blkif *blkif, struct persistent_gnt *persistent_gnt) { struct rb_node **new = NULL, *parent = NULL; struct persistent_gnt *this; if (blkif->persistent_gnt_c >= xen_blkif_max_pgrants) { if (!blkif->vbd.overflow_max_grants) blkif->vbd.overflow_max_grants = 1; return -EBUSY; } /* Figure out where to put new node */ new = &blkif->persistent_gnts.rb_node; while (*new) { this = container_of(*new, struct persistent_gnt, node); parent = *new; if (persistent_gnt->gnt < this->gnt) new = &((*new)->rb_left); else if (persistent_gnt->gnt > this->gnt) new = &((*new)->rb_right); else { pr_alert_ratelimited(DRV_PFX " trying to add a gref that's already in the tree\n"); return -EINVAL; } } bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE); set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); /* Add new node and rebalance tree. */ rb_link_node(&(persistent_gnt->node), parent, new); rb_insert_color(&(persistent_gnt->node), &blkif->persistent_gnts); blkif->persistent_gnt_c++; atomic_inc(&blkif->persistent_gnt_in_use); return 0; } static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif, grant_ref_t gref) { struct persistent_gnt *data; struct rb_node *node = NULL; node = blkif->persistent_gnts.rb_node; while (node) { data = container_of(node, struct persistent_gnt, node); if (gref < data->gnt) node = node->rb_left; else if (gref > data->gnt) node = node->rb_right; else { if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) { pr_alert_ratelimited(DRV_PFX " requesting a grant already in use\n"); return NULL; } set_bit(PERSISTENT_GNT_ACTIVE, data->flags); atomic_inc(&blkif->persistent_gnt_in_use); return data; } } return NULL; } static void put_persistent_gnt(struct xen_blkif *blkif, struct persistent_gnt *persistent_gnt) { if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) pr_alert_ratelimited(DRV_PFX " freeing a grant already unused"); set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags); atomic_dec(&blkif->persistent_gnt_in_use); } static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, unsigned int num) { struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct persistent_gnt *persistent_gnt; struct rb_node *n; int ret = 0; int segs_to_unmap = 0; foreach_grant_safe(persistent_gnt, n, root, node) { BUG_ON(persistent_gnt->handle == BLKBACK_INVALID_HANDLE); gnttab_set_unmap_op(&unmap[segs_to_unmap], (unsigned long) pfn_to_kaddr(page_to_pfn( persistent_gnt->page)), GNTMAP_host_map, persistent_gnt->handle); pages[segs_to_unmap] = persistent_gnt->page; if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST || !rb_next(&persistent_gnt->node)) { ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap); BUG_ON(ret); put_free_pages(blkif, pages, segs_to_unmap); segs_to_unmap = 0; } rb_erase(&persistent_gnt->node, root); kfree(persistent_gnt); num--; } BUG_ON(num != 0); } static void unmap_purged_grants(struct work_struct *work) { struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct persistent_gnt *persistent_gnt; int ret, segs_to_unmap = 0; struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work); while(!list_empty(&blkif->persistent_purge_list)) { persistent_gnt = list_first_entry(&blkif->persistent_purge_list, struct persistent_gnt, remove_node); list_del(&persistent_gnt->remove_node); gnttab_set_unmap_op(&unmap[segs_to_unmap], vaddr(persistent_gnt->page), GNTMAP_host_map, persistent_gnt->handle); pages[segs_to_unmap] = persistent_gnt->page; if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) { ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap); BUG_ON(ret); put_free_pages(blkif, pages, segs_to_unmap); segs_to_unmap = 0; } kfree(persistent_gnt); } if (segs_to_unmap > 0) { ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap); BUG_ON(ret); put_free_pages(blkif, pages, segs_to_unmap); } } static void purge_persistent_gnt(struct xen_blkif *blkif) { struct persistent_gnt *persistent_gnt; struct rb_node *n; unsigned int num_clean, total; bool scan_used = false; struct rb_root *root; if (blkif->persistent_gnt_c < xen_blkif_max_pgrants || (blkif->persistent_gnt_c == xen_blkif_max_pgrants && !blkif->vbd.overflow_max_grants)) { return; } if (work_pending(&blkif->persistent_purge_work)) { pr_alert_ratelimited(DRV_PFX "Scheduled work from previous purge is still pending, cannot purge list\n"); return; } num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN; num_clean = blkif->persistent_gnt_c - xen_blkif_max_pgrants + num_clean; num_clean = min(blkif->persistent_gnt_c, num_clean); if (num_clean > (blkif->persistent_gnt_c - atomic_read(&blkif->persistent_gnt_in_use))) return; /* * At this point, we can assure that there will be no calls * to get_persistent_grant (because we are executing this code from * xen_blkif_schedule), there can only be calls to put_persistent_gnt, * which means that the number of currently used grants will go down, * but never up, so we will always be able to remove the requested * number of grants. */ total = num_clean; pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean); INIT_LIST_HEAD(&blkif->persistent_purge_list); root = &blkif->persistent_gnts; purge_list: foreach_grant_safe(persistent_gnt, n, root, node) { BUG_ON(persistent_gnt->handle == BLKBACK_INVALID_HANDLE); if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags)) continue; if (!scan_used && (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags))) continue; rb_erase(&persistent_gnt->node, root); list_add(&persistent_gnt->remove_node, &blkif->persistent_purge_list); if (--num_clean == 0) goto finished; } /* * If we get here it means we also need to start cleaning * grants that were used since last purge in order to cope * with the requested num */ if (!scan_used) { pr_debug(DRV_PFX "Still missing %u purged frames\n", num_clean); scan_used = true; goto purge_list; } finished: /* Remove the "used" flag from all the persistent grants */ foreach_grant_safe(persistent_gnt, n, root, node) { BUG_ON(persistent_gnt->handle == BLKBACK_INVALID_HANDLE); clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags); } blkif->persistent_gnt_c -= (total - num_clean); blkif->vbd.overflow_max_grants = 0; /* We can defer this work */ INIT_WORK(&blkif->persistent_purge_work, unmap_purged_grants); schedule_work(&blkif->persistent_purge_work); pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total); return; } /* * Retrieve from the 'pending_reqs' a free pending_req structure to be used. */ static struct pending_req *alloc_req(struct xen_blkif *blkif) { struct pending_req *req = NULL; unsigned long flags; spin_lock_irqsave(&blkif->pending_free_lock, flags); if (!list_empty(&blkif->pending_free)) { req = list_entry(blkif->pending_free.next, struct pending_req, free_list); list_del(&req->free_list); } spin_unlock_irqrestore(&blkif->pending_free_lock, flags); return req; } /* * Return the 'pending_req' structure back to the freepool. We also * wake up the thread if it was waiting for a free page. */ static void free_req(struct xen_blkif *blkif, struct pending_req *req) { unsigned long flags; int was_empty; spin_lock_irqsave(&blkif->pending_free_lock, flags); was_empty = list_empty(&blkif->pending_free); list_add(&req->free_list, &blkif->pending_free); spin_unlock_irqrestore(&blkif->pending_free_lock, flags); if (was_empty) wake_up(&blkif->pending_free_wq); } /* * Routines for managing virtual block devices (vbds). */ static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif, int operation) { struct xen_vbd *vbd = &blkif->vbd; int rc = -EACCES; if ((operation != READ) && vbd->readonly) goto out; if (likely(req->nr_sects)) { blkif_sector_t end = req->sector_number + req->nr_sects; if (unlikely(end < req->sector_number)) goto out; if (unlikely(end > vbd_sz(vbd))) goto out; } req->dev = vbd->pdevice; req->bdev = vbd->bdev; rc = 0; out: return rc; } static void xen_vbd_resize(struct xen_blkif *blkif) { struct xen_vbd *vbd = &blkif->vbd; struct xenbus_transaction xbt; int err; struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be); unsigned long long new_size = vbd_sz(vbd); pr_info(DRV_PFX "VBD Resize: Domid: %d, Device: (%d, %d)\n", blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice)); pr_info(DRV_PFX "VBD Resize: new size %llu\n", new_size); vbd->size = new_size; again: err = xenbus_transaction_start(&xbt); if (err) { pr_warn(DRV_PFX "Error starting transaction"); return; } err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu", (unsigned long long)vbd_sz(vbd)); if (err) { pr_warn(DRV_PFX "Error writing new size"); goto abort; } /* * Write the current state; we will use this to synchronize * the front-end. If the current state is "connected" the * front-end will get the new size information online. */ err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state); if (err) { pr_warn(DRV_PFX "Error writing the state"); goto abort; } err = xenbus_transaction_end(xbt, 0); if (err == -EAGAIN) goto again; if (err) pr_warn(DRV_PFX "Error ending transaction"); return; abort: xenbus_transaction_end(xbt, 1); } /* * Notification from the guest OS. */ static void blkif_notify_work(struct xen_blkif *blkif) { blkif->waiting_reqs = 1; wake_up(&blkif->wq); } irqreturn_t xen_blkif_be_int(int irq, void *dev_id) { blkif_notify_work(dev_id); return IRQ_HANDLED; } /* * SCHEDULER FUNCTIONS */ static void print_stats(struct xen_blkif *blkif) { pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu" " | ds %4llu | pg: %4u/%4d\n", current->comm, blkif->st_oo_req, blkif->st_rd_req, blkif->st_wr_req, blkif->st_f_req, blkif->st_ds_req, blkif->persistent_gnt_c, xen_blkif_max_pgrants); blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000); blkif->st_rd_req = 0; blkif->st_wr_req = 0; blkif->st_oo_req = 0; blkif->st_ds_req = 0; } int xen_blkif_schedule(void *arg) { struct xen_blkif *blkif = arg; struct xen_vbd *vbd = &blkif->vbd; unsigned long timeout; xen_blkif_get(blkif); while (!kthread_should_stop()) { if (try_to_freeze()) continue; if (unlikely(vbd->size != vbd_sz(vbd))) xen_vbd_resize(blkif); timeout = msecs_to_jiffies(LRU_INTERVAL); timeout = wait_event_interruptible_timeout( blkif->wq, blkif->waiting_reqs || kthread_should_stop(), timeout); if (timeout == 0) goto purge_gnt_list; timeout = wait_event_interruptible_timeout( blkif->pending_free_wq, !list_empty(&blkif->pending_free) || kthread_should_stop(), timeout); if (timeout == 0) goto purge_gnt_list; blkif->waiting_reqs = 0; smp_mb(); /* clear flag *before* checking for work */ if (do_block_io_op(blkif)) blkif->waiting_reqs = 1; purge_gnt_list: if (blkif->vbd.feature_gnt_persistent && time_after(jiffies, blkif->next_lru)) { purge_persistent_gnt(blkif); blkif->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL); } /* Shrink if we have more than xen_blkif_max_buffer_pages */ shrink_free_pagepool(blkif, xen_blkif_max_buffer_pages); if (log_stats && time_after(jiffies, blkif->st_print)) print_stats(blkif); } /* Since we are shutting down remove all pages from the buffer */ shrink_free_pagepool(blkif, 0 /* All */); /* Free all persistent grant pages */ if (!RB_EMPTY_ROOT(&blkif->persistent_gnts)) free_persistent_gnts(blkif, &blkif->persistent_gnts, blkif->persistent_gnt_c); BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); blkif->persistent_gnt_c = 0; if (log_stats) print_stats(blkif); blkif->xenblkd = NULL; xen_blkif_put(blkif); return 0; } /* * Unmap the grant references, and also remove the M2P over-rides * used in the 'pending_req'. */ static void xen_blkbk_unmap(struct xen_blkif *blkif, struct grant_page *pages[], int num) { struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; unsigned int i, invcount = 0; int ret; for (i = 0; i < num; i++) { if (pages[i]->persistent_gnt != NULL) { put_persistent_gnt(blkif, pages[i]->persistent_gnt); continue; } if (pages[i]->handle == BLKBACK_INVALID_HANDLE) continue; unmap_pages[invcount] = pages[i]->page; gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[i]->page), GNTMAP_host_map, pages[i]->handle); pages[i]->handle = BLKBACK_INVALID_HANDLE; if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) { ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount); BUG_ON(ret); put_free_pages(blkif, unmap_pages, invcount); invcount = 0; } } if (invcount) { ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount); BUG_ON(ret); put_free_pages(blkif, unmap_pages, invcount); } } static int xen_blkbk_map(struct xen_blkif *blkif, struct grant_page *pages[], int num, bool ro) { struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST]; struct persistent_gnt *persistent_gnt = NULL; phys_addr_t addr = 0; int i, seg_idx, new_map_idx; int segs_to_map = 0; int ret = 0; int last_map = 0, map_until = 0; int use_persistent_gnts; use_persistent_gnts = (blkif->vbd.feature_gnt_persistent); /* * Fill out preq.nr_sects with proper amount of sectors, and setup * assign map[..] with the PFN of the page in our domain with the * corresponding grant reference for each page. */ again: for (i = map_until; i < num; i++) { uint32_t flags; if (use_persistent_gnts) persistent_gnt = get_persistent_gnt( blkif, pages[i]->gref); if (persistent_gnt) { /* * We are using persistent grants and * the grant is already mapped */ pages[i]->page = persistent_gnt->page; pages[i]->persistent_gnt = persistent_gnt; } else { if (get_free_page(blkif, &pages[i]->page)) goto out_of_memory; addr = vaddr(pages[i]->page); pages_to_gnt[segs_to_map] = pages[i]->page; pages[i]->persistent_gnt = NULL; flags = GNTMAP_host_map; if (!use_persistent_gnts && ro) flags |= GNTMAP_readonly; gnttab_set_map_op(&map[segs_to_map++], addr, flags, pages[i]->gref, blkif->domid); } map_until = i + 1; if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST) break; } if (segs_to_map) { ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map); BUG_ON(ret); } /* * Now swizzle the MFN in our domain with the MFN from the other domain * so that when we access vaddr(pending_req,i) it has the contents of * the page from the other domain. */ for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) { if (!pages[seg_idx]->persistent_gnt) { /* This is a newly mapped grant */ BUG_ON(new_map_idx >= segs_to_map); if (unlikely(map[new_map_idx].status != 0)) { pr_debug(DRV_PFX "invalid buffer -- could not remap it\n"); pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE; ret |= 1; goto next; } pages[seg_idx]->handle = map[new_map_idx].handle; } else { continue; } if (use_persistent_gnts && blkif->persistent_gnt_c < xen_blkif_max_pgrants) { /* * We are using persistent grants, the grant is * not mapped but we might have room for it. */ persistent_gnt = kmalloc(sizeof(struct persistent_gnt), GFP_KERNEL); if (!persistent_gnt) { /* * If we don't have enough memory to * allocate the persistent_gnt struct * map this grant non-persistenly */ goto next; } persistent_gnt->gnt = map[new_map_idx].ref; persistent_gnt->handle = map[new_map_idx].handle; persistent_gnt->page = pages[seg_idx]->page; if (add_persistent_gnt(blkif, persistent_gnt)) { kfree(persistent_gnt); persistent_gnt = NULL; goto next; } pages[seg_idx]->persistent_gnt = persistent_gnt; pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n", persistent_gnt->gnt, blkif->persistent_gnt_c, xen_blkif_max_pgrants); goto next; } if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) { blkif->vbd.overflow_max_grants = 1; pr_debug(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n", blkif->domid, blkif->vbd.handle); } /* * We could not map this grant persistently, so use it as * a non-persistent grant. */ next: new_map_idx++; } segs_to_map = 0; last_map = map_until; if (map_until != num) goto again; return ret; out_of_memory: pr_alert(DRV_PFX "%s: out of memory\n", __func__); put_free_pages(blkif, pages_to_gnt, segs_to_map); return -ENOMEM; } static int xen_blkbk_map_seg(struct pending_req *pending_req) { int rc; rc = xen_blkbk_map(pending_req->blkif, pending_req->segments, pending_req->nr_pages, (pending_req->operation != BLKIF_OP_READ)); return rc; } static int xen_blkbk_parse_indirect(struct blkif_request *req, struct pending_req *pending_req, struct seg_buf seg[], struct phys_req *preq) { struct grant_page **pages = pending_req->indirect_pages; struct xen_blkif *blkif = pending_req->blkif; int indirect_grefs, rc, n, nseg, i; struct blkif_request_segment_aligned *segments = NULL; nseg = pending_req->nr_pages; indirect_grefs = INDIRECT_PAGES(nseg); BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST); for (i = 0; i < indirect_grefs; i++) pages[i]->gref = req->u.indirect.indirect_grefs[i]; rc = xen_blkbk_map(blkif, pages, indirect_grefs, true); if (rc) goto unmap; for (n = 0, i = 0; n < nseg; n++) { if ((n % SEGS_PER_INDIRECT_FRAME) == 0) { /* Map indirect segments */ if (segments) kunmap_atomic(segments); segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page); } i = n % SEGS_PER_INDIRECT_FRAME; pending_req->segments[n]->gref = segments[i].gref; seg[n].nsec = segments[i].last_sect - segments[i].first_sect + 1; seg[n].offset = (segments[i].first_sect << 9); if ((segments[i].last_sect >= (PAGE_SIZE >> 9)) || (segments[i].last_sect < segments[i].first_sect)) { rc = -EINVAL; goto unmap; } preq->nr_sects += seg[n].nsec; } unmap: if (segments) kunmap_atomic(segments); xen_blkbk_unmap(blkif, pages, indirect_grefs); return rc; } static int dispatch_discard_io(struct xen_blkif *blkif, struct blkif_request *req) { int err = 0; int status = BLKIF_RSP_OKAY; struct block_device *bdev = blkif->vbd.bdev; unsigned long secure; blkif->st_ds_req++; xen_blkif_get(blkif); secure = (blkif->vbd.discard_secure && (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ? BLKDEV_DISCARD_SECURE : 0; err = blkdev_issue_discard(bdev, req->u.discard.sector_number, req->u.discard.nr_sectors, GFP_KERNEL, secure); if (err == -EOPNOTSUPP) { pr_debug(DRV_PFX "discard op failed, not supported\n"); status = BLKIF_RSP_EOPNOTSUPP; } else if (err) status = BLKIF_RSP_ERROR; make_response(blkif, req->u.discard.id, req->operation, status); xen_blkif_put(blkif); return err; } static int dispatch_other_io(struct xen_blkif *blkif, struct blkif_request *req, struct pending_req *pending_req) { free_req(blkif, pending_req); make_response(blkif, req->u.other.id, req->operation, BLKIF_RSP_EOPNOTSUPP); return -EIO; } static void xen_blk_drain_io(struct xen_blkif *blkif) { atomic_set(&blkif->drain, 1); do { /* The initial value is one, and one refcnt taken at the * start of the xen_blkif_schedule thread. */ if (atomic_read(&blkif->refcnt) <= 2) break; wait_for_completion_interruptible_timeout( &blkif->drain_complete, HZ); if (!atomic_read(&blkif->drain)) break; } while (!kthread_should_stop()); atomic_set(&blkif->drain, 0); } /* * Completion callback on the bio's. Called as bh->b_end_io() */ static void __end_block_io_op(struct pending_req *pending_req, int error) { /* An error fails the entire request. */ if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) && (error == -EOPNOTSUPP)) { pr_debug(DRV_PFX "flush diskcache op failed, not supported\n"); xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0); pending_req->status = BLKIF_RSP_EOPNOTSUPP; } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) && (error == -EOPNOTSUPP)) { pr_debug(DRV_PFX "write barrier op failed, not supported\n"); xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0); pending_req->status = BLKIF_RSP_EOPNOTSUPP; } else if (error) { pr_debug(DRV_PFX "Buffer not up-to-date at end of operation," " error=%d\n", error); pending_req->status = BLKIF_RSP_ERROR; } /* * If all of the bio's have completed it is time to unmap * the grant references associated with 'request' and provide * the proper response on the ring. */ if (atomic_dec_and_test(&pending_req->pendcnt)) { xen_blkbk_unmap(pending_req->blkif, pending_req->segments, pending_req->nr_pages); make_response(pending_req->blkif, pending_req->id, pending_req->operation, pending_req->status); xen_blkif_put(pending_req->blkif); if (atomic_read(&pending_req->blkif->refcnt) <= 2) { if (atomic_read(&pending_req->blkif->drain)) complete(&pending_req->blkif->drain_complete); } free_req(pending_req->blkif, pending_req); } } /* * bio callback. */ static void end_block_io_op(struct bio *bio, int error) { __end_block_io_op(bio->bi_private, error); bio_put(bio); } /* * Function to copy the from the ring buffer the 'struct blkif_request' * (which has the sectors we want, number of them, grant references, etc), * and transmute it to the block API to hand it over to the proper block disk. */ static int __do_block_io_op(struct xen_blkif *blkif) { union blkif_back_rings *blk_rings = &blkif->blk_rings; struct blkif_request req; struct pending_req *pending_req; RING_IDX rc, rp; int more_to_do = 0; rc = blk_rings->common.req_cons; rp = blk_rings->common.sring->req_prod; rmb(); /* Ensure we see queued requests up to 'rp'. */ while (rc != rp) { if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc)) break; if (kthread_should_stop()) { more_to_do = 1; break; } pending_req = alloc_req(blkif); if (NULL == pending_req) { blkif->st_oo_req++; more_to_do = 1; break; } switch (blkif->blk_protocol) { case BLKIF_PROTOCOL_NATIVE: memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req)); break; case BLKIF_PROTOCOL_X86_32: blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc)); break; case BLKIF_PROTOCOL_X86_64: blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc)); break; default: BUG(); } blk_rings->common.req_cons = ++rc; /* before make_response() */ /* Apply all sanity checks to /private copy/ of request. */ barrier(); switch (req.operation) { case BLKIF_OP_READ: case BLKIF_OP_WRITE: case BLKIF_OP_WRITE_BARRIER: case BLKIF_OP_FLUSH_DISKCACHE: case BLKIF_OP_INDIRECT: if (dispatch_rw_block_io(blkif, &req, pending_req)) goto done; break; case BLKIF_OP_DISCARD: free_req(blkif, pending_req); if (dispatch_discard_io(blkif, &req)) goto done; break; default: if (dispatch_other_io(blkif, &req, pending_req)) goto done; break; } /* Yield point for this unbounded loop. */ cond_resched(); } done: return more_to_do; } static int do_block_io_op(struct xen_blkif *blkif) { union blkif_back_rings *blk_rings = &blkif->blk_rings; int more_to_do; do { more_to_do = __do_block_io_op(blkif); if (more_to_do) break; RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do); } while (more_to_do); return more_to_do; } /* * Transmutation of the 'struct blkif_request' to a proper 'struct bio' * and call the 'submit_bio' to pass it to the underlying storage. */ static int dispatch_rw_block_io(struct xen_blkif *blkif, struct blkif_request *req, struct pending_req *pending_req) { struct phys_req preq; struct seg_buf *seg = pending_req->seg; unsigned int nseg; struct bio *bio = NULL; struct bio **biolist = pending_req->biolist; int i, nbio = 0; int operation; struct blk_plug plug; bool drain = false; struct grant_page **pages = pending_req->segments; unsigned short req_operation; req_operation = req->operation == BLKIF_OP_INDIRECT ? req->u.indirect.indirect_op : req->operation; if ((req->operation == BLKIF_OP_INDIRECT) && (req_operation != BLKIF_OP_READ) && (req_operation != BLKIF_OP_WRITE)) { pr_debug(DRV_PFX "Invalid indirect operation (%u)\n", req_operation); goto fail_response; } switch (req_operation) { case BLKIF_OP_READ: blkif->st_rd_req++; operation = READ; break; case BLKIF_OP_WRITE: blkif->st_wr_req++; operation = WRITE_ODIRECT; break; case BLKIF_OP_WRITE_BARRIER: drain = true; case BLKIF_OP_FLUSH_DISKCACHE: blkif->st_f_req++; operation = WRITE_FLUSH; break; default: operation = 0; /* make gcc happy */ goto fail_response; break; } /* Check that the number of segments is sane. */ nseg = req->operation == BLKIF_OP_INDIRECT ? req->u.indirect.nr_segments : req->u.rw.nr_segments; if (unlikely(nseg == 0 && operation != WRITE_FLUSH) || unlikely((req->operation != BLKIF_OP_INDIRECT) && (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) || unlikely((req->operation == BLKIF_OP_INDIRECT) && (nseg > MAX_INDIRECT_SEGMENTS))) { pr_debug(DRV_PFX "Bad number of segments in request (%d)\n", nseg); /* Haven't submitted any bio's yet. */ goto fail_response; } preq.nr_sects = 0; pending_req->blkif = blkif; pending_req->id = req->u.rw.id; pending_req->operation = req_operation; pending_req->status = BLKIF_RSP_OKAY; pending_req->nr_pages = nseg; if (req->operation != BLKIF_OP_INDIRECT) { preq.dev = req->u.rw.handle; preq.sector_number = req->u.rw.sector_number; for (i = 0; i < nseg; i++) { pages[i]->gref = req->u.rw.seg[i].gref; seg[i].nsec = req->u.rw.seg[i].last_sect - req->u.rw.seg[i].first_sect + 1; seg[i].offset = (req->u.rw.seg[i].first_sect << 9); if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) || (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect)) goto fail_response; preq.nr_sects += seg[i].nsec; } } else { preq.dev = req->u.indirect.handle; preq.sector_number = req->u.indirect.sector_number; if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq)) goto fail_response; } if (xen_vbd_translate(&preq, blkif, operation) != 0) { pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n", operation == READ ? "read" : "write", preq.sector_number, preq.sector_number + preq.nr_sects, blkif->vbd.pdevice); goto fail_response; } /* * This check _MUST_ be done after xen_vbd_translate as the preq.bdev * is set there. */ for (i = 0; i < nseg; i++) { if (((int)preq.sector_number|(int)seg[i].nsec) & ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) { pr_debug(DRV_PFX "Misaligned I/O request from domain %d", blkif->domid); goto fail_response; } } /* Wait on all outstanding I/O's and once that has been completed * issue the WRITE_FLUSH. */ if (drain) xen_blk_drain_io(pending_req->blkif); /* * If we have failed at this point, we need to undo the M2P override, * set gnttab_set_unmap_op on all of the grant references and perform * the hypercall to unmap the grants - that is all done in * xen_blkbk_unmap. */ if (xen_blkbk_map_seg(pending_req)) goto fail_flush; /* * This corresponding xen_blkif_put is done in __end_block_io_op, or * below (in "!bio") if we are handling a BLKIF_OP_DISCARD. */ xen_blkif_get(blkif); for (i = 0; i < nseg; i++) { while ((bio == NULL) || (bio_add_page(bio, pages[i]->page, seg[i].nsec << 9, seg[i].offset) == 0)) { bio = bio_alloc(GFP_KERNEL, nseg-i); if (unlikely(bio == NULL)) goto fail_put_bio; biolist[nbio++] = bio; bio->bi_bdev = preq.bdev; bio->bi_private = pending_req; bio->bi_end_io = end_block_io_op; bio->bi_sector = preq.sector_number; } preq.sector_number += seg[i].nsec; } /* This will be hit if the operation was a flush or discard. */ if (!bio) { BUG_ON(operation != WRITE_FLUSH); bio = bio_alloc(GFP_KERNEL, 0); if (unlikely(bio == NULL)) goto fail_put_bio; biolist[nbio++] = bio; bio->bi_bdev = preq.bdev; bio->bi_private = pending_req; bio->bi_end_io = end_block_io_op; } atomic_set(&pending_req->pendcnt, nbio); blk_start_plug(&plug); for (i = 0; i < nbio; i++) submit_bio(operation, biolist[i]); /* Let the I/Os go.. */ blk_finish_plug(&plug); if (operation == READ) blkif->st_rd_sect += preq.nr_sects; else if (operation & WRITE) blkif->st_wr_sect += preq.nr_sects; return 0; fail_flush: xen_blkbk_unmap(blkif, pending_req->segments, pending_req->nr_pages); fail_response: /* Haven't submitted any bio's yet. */ make_response(blkif, req->u.rw.id, req_operation, BLKIF_RSP_ERROR); free_req(blkif, pending_req); msleep(1); /* back off a bit */ return -EIO; fail_put_bio: for (i = 0; i < nbio; i++) bio_put(biolist[i]); atomic_set(&pending_req->pendcnt, 1); __end_block_io_op(pending_req, -EINVAL); msleep(1); /* back off a bit */ return -EIO; } /* * Put a response on the ring on how the operation fared. */ static void make_response(struct xen_blkif *blkif, u64 id, unsigned short op, int st) { struct blkif_response resp; unsigned long flags; union blkif_back_rings *blk_rings = &blkif->blk_rings; int notify; resp.id = id; resp.operation = op; resp.status = st; spin_lock_irqsave(&blkif->blk_ring_lock, flags); /* Place on the response ring for the relevant domain. */ switch (blkif->blk_protocol) { case BLKIF_PROTOCOL_NATIVE: memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt), &resp, sizeof(resp)); break; case BLKIF_PROTOCOL_X86_32: memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt), &resp, sizeof(resp)); break; case BLKIF_PROTOCOL_X86_64: memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt), &resp, sizeof(resp)); break; default: BUG(); } blk_rings->common.rsp_prod_pvt++; RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify); spin_unlock_irqrestore(&blkif->blk_ring_lock, flags); if (notify) notify_remote_via_irq(blkif->irq); } static int __init xen_blkif_init(void) { int rc = 0; if (!xen_domain()) return -ENODEV; rc = xen_blkif_interface_init(); if (rc) goto failed_init; rc = xen_blkif_xenbus_init(); if (rc) goto failed_init; failed_init: return rc; } module_init(xen_blkif_init); MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS("xen-backend:vbd");
./CrossVul/dataset_final_sorted/CWE-20/c/bad_5649_0
crossvul-cpp_data_bad_2891_11
/* Request key authorisation token key definition. * * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * See Documentation/security/keys/request-key.rst */ #include <linux/module.h> #include <linux/sched.h> #include <linux/err.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/uaccess.h> #include "internal.h" #include <keys/user-type.h> static int request_key_auth_preparse(struct key_preparsed_payload *); static void request_key_auth_free_preparse(struct key_preparsed_payload *); static int request_key_auth_instantiate(struct key *, struct key_preparsed_payload *); static void request_key_auth_describe(const struct key *, struct seq_file *); static void request_key_auth_revoke(struct key *); static void request_key_auth_destroy(struct key *); static long request_key_auth_read(const struct key *, char __user *, size_t); /* * The request-key authorisation key type definition. */ struct key_type key_type_request_key_auth = { .name = ".request_key_auth", .def_datalen = sizeof(struct request_key_auth), .preparse = request_key_auth_preparse, .free_preparse = request_key_auth_free_preparse, .instantiate = request_key_auth_instantiate, .describe = request_key_auth_describe, .revoke = request_key_auth_revoke, .destroy = request_key_auth_destroy, .read = request_key_auth_read, }; static int request_key_auth_preparse(struct key_preparsed_payload *prep) { return 0; } static void request_key_auth_free_preparse(struct key_preparsed_payload *prep) { } /* * Instantiate a request-key authorisation key. */ static int request_key_auth_instantiate(struct key *key, struct key_preparsed_payload *prep) { key->payload.data[0] = (struct request_key_auth *)prep->data; return 0; } /* * Describe an authorisation token. */ static void request_key_auth_describe(const struct key *key, struct seq_file *m) { struct request_key_auth *rka = key->payload.data[0]; seq_puts(m, "key:"); seq_puts(m, key->description); if (key_is_instantiated(key)) seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len); } /* * Read the callout_info data (retrieves the callout information). * - the key's semaphore is read-locked */ static long request_key_auth_read(const struct key *key, char __user *buffer, size_t buflen) { struct request_key_auth *rka = key->payload.data[0]; size_t datalen; long ret; datalen = rka->callout_len; ret = datalen; /* we can return the data as is */ if (buffer && buflen > 0) { if (buflen > datalen) buflen = datalen; if (copy_to_user(buffer, rka->callout_info, buflen) != 0) ret = -EFAULT; } return ret; } /* * Handle revocation of an authorisation token key. * * Called with the key sem write-locked. */ static void request_key_auth_revoke(struct key *key) { struct request_key_auth *rka = key->payload.data[0]; kenter("{%d}", key->serial); if (rka->cred) { put_cred(rka->cred); rka->cred = NULL; } } static void free_request_key_auth(struct request_key_auth *rka) { if (!rka) return; key_put(rka->target_key); key_put(rka->dest_keyring); if (rka->cred) put_cred(rka->cred); kfree(rka->callout_info); kfree(rka); } /* * Destroy an instantiation authorisation token key. */ static void request_key_auth_destroy(struct key *key) { struct request_key_auth *rka = key->payload.data[0]; kenter("{%d}", key->serial); free_request_key_auth(rka); } /* * Create an authorisation token for /sbin/request-key or whoever to gain * access to the caller's security data. */ struct key *request_key_auth_new(struct key *target, const void *callout_info, size_t callout_len, struct key *dest_keyring) { struct request_key_auth *rka, *irka; const struct cred *cred = current->cred; struct key *authkey = NULL; char desc[20]; int ret = -ENOMEM; kenter("%d,", target->serial); /* allocate a auth record */ rka = kzalloc(sizeof(*rka), GFP_KERNEL); if (!rka) goto error; rka->callout_info = kmemdup(callout_info, callout_len, GFP_KERNEL); if (!rka->callout_info) goto error_free_rka; rka->callout_len = callout_len; /* see if the calling process is already servicing the key request of * another process */ if (cred->request_key_auth) { /* it is - use that instantiation context here too */ down_read(&cred->request_key_auth->sem); /* if the auth key has been revoked, then the key we're * servicing is already instantiated */ if (test_bit(KEY_FLAG_REVOKED, &cred->request_key_auth->flags)) { up_read(&cred->request_key_auth->sem); ret = -EKEYREVOKED; goto error_free_rka; } irka = cred->request_key_auth->payload.data[0]; rka->cred = get_cred(irka->cred); rka->pid = irka->pid; up_read(&cred->request_key_auth->sem); } else { /* it isn't - use this process as the context */ rka->cred = get_cred(cred); rka->pid = current->pid; } rka->target_key = key_get(target); rka->dest_keyring = key_get(dest_keyring); /* allocate the auth key */ sprintf(desc, "%x", target->serial); authkey = key_alloc(&key_type_request_key_auth, desc, cred->fsuid, cred->fsgid, cred, KEY_POS_VIEW | KEY_POS_READ | KEY_POS_SEARCH | KEY_USR_VIEW, KEY_ALLOC_NOT_IN_QUOTA, NULL); if (IS_ERR(authkey)) { ret = PTR_ERR(authkey); goto error_free_rka; } /* construct the auth key */ ret = key_instantiate_and_link(authkey, rka, 0, NULL, NULL); if (ret < 0) goto error_put_authkey; kleave(" = {%d,%d}", authkey->serial, refcount_read(&authkey->usage)); return authkey; error_put_authkey: key_put(authkey); error_free_rka: free_request_key_auth(rka); error: kleave("= %d", ret); return ERR_PTR(ret); } /* * Search the current process's keyrings for the authorisation key for * instantiation of a key. */ struct key *key_get_instantiation_authkey(key_serial_t target_id) { char description[16]; struct keyring_search_context ctx = { .index_key.type = &key_type_request_key_auth, .index_key.description = description, .cred = current_cred(), .match_data.cmp = key_default_cmp, .match_data.raw_data = description, .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, .flags = KEYRING_SEARCH_DO_STATE_CHECK, }; struct key *authkey; key_ref_t authkey_ref; sprintf(description, "%x", target_id); authkey_ref = search_process_keyrings(&ctx); if (IS_ERR(authkey_ref)) { authkey = ERR_CAST(authkey_ref); if (authkey == ERR_PTR(-EAGAIN)) authkey = ERR_PTR(-ENOKEY); goto error; } authkey = key_ref_to_ptr(authkey_ref); if (test_bit(KEY_FLAG_REVOKED, &authkey->flags)) { key_put(authkey); authkey = ERR_PTR(-EKEYREVOKED); } error: return authkey; }
./CrossVul/dataset_final_sorted/CWE-20/c/bad_2891_11
crossvul-cpp_data_good_3244_1
/* +----------------------------------------------------------------------+ | PHP Version 7 | +----------------------------------------------------------------------+ | Copyright (c) 1997-2017 The PHP Group | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Author: Wez Furlong <wez@thebrainroom.com> | +----------------------------------------------------------------------+ */ /* $Id$ */ #include "php.h" #include "ext/standard/file.h" #include "streams/php_streams_int.h" #include "php_network.h" #if defined(PHP_WIN32) || defined(__riscos__) || defined(NETWARE) # undef AF_UNIX #endif #if defined(AF_UNIX) #include <sys/un.h> #endif #ifndef MSG_DONTWAIT # define MSG_DONTWAIT 0 #endif #ifndef MSG_PEEK # define MSG_PEEK 0 #endif #ifdef PHP_WIN32 /* send/recv family on windows expects int */ # define XP_SOCK_BUF_SIZE(sz) (((sz) > INT_MAX) ? INT_MAX : (int)(sz)) #else # define XP_SOCK_BUF_SIZE(sz) (sz) #endif php_stream_ops php_stream_generic_socket_ops; PHPAPI php_stream_ops php_stream_socket_ops; php_stream_ops php_stream_udp_socket_ops; #ifdef AF_UNIX php_stream_ops php_stream_unix_socket_ops; php_stream_ops php_stream_unixdg_socket_ops; #endif static int php_tcp_sockop_set_option(php_stream *stream, int option, int value, void *ptrparam); /* {{{ Generic socket stream operations */ static size_t php_sockop_write(php_stream *stream, const char *buf, size_t count) { php_netstream_data_t *sock = (php_netstream_data_t*)stream->abstract; int didwrite; struct timeval *ptimeout; if (!sock || sock->socket == -1) { return 0; } if (sock->timeout.tv_sec == -1) ptimeout = NULL; else ptimeout = &sock->timeout; retry: didwrite = send(sock->socket, buf, XP_SOCK_BUF_SIZE(count), (sock->is_blocked && ptimeout) ? MSG_DONTWAIT : 0); if (didwrite <= 0) { int err = php_socket_errno(); char *estr; if (sock->is_blocked && (err == EWOULDBLOCK || err == EAGAIN)) { int retval; sock->timeout_event = 0; do { retval = php_pollfd_for(sock->socket, POLLOUT, ptimeout); if (retval == 0) { sock->timeout_event = 1; break; } if (retval > 0) { /* writable now; retry */ goto retry; } err = php_socket_errno(); } while (err == EINTR); } estr = php_socket_strerror(err, NULL, 0); php_error_docref(NULL, E_NOTICE, "send of " ZEND_LONG_FMT " bytes failed with errno=%ld %s", (zend_long)count, err, estr); efree(estr); } if (didwrite > 0) { php_stream_notify_progress_increment(PHP_STREAM_CONTEXT(stream), didwrite, 0); } if (didwrite < 0) { didwrite = 0; } return didwrite; } static void php_sock_stream_wait_for_data(php_stream *stream, php_netstream_data_t *sock) { int retval; struct timeval *ptimeout; if (!sock || sock->socket == -1) { return; } sock->timeout_event = 0; if (sock->timeout.tv_sec == -1) ptimeout = NULL; else ptimeout = &sock->timeout; while(1) { retval = php_pollfd_for(sock->socket, PHP_POLLREADABLE, ptimeout); if (retval == 0) sock->timeout_event = 1; if (retval >= 0) break; if (php_socket_errno() != EINTR) break; } } static size_t php_sockop_read(php_stream *stream, char *buf, size_t count) { php_netstream_data_t *sock = (php_netstream_data_t*)stream->abstract; ssize_t nr_bytes = 0; int err; if (!sock || sock->socket == -1) { return 0; } if (sock->is_blocked) { php_sock_stream_wait_for_data(stream, sock); if (sock->timeout_event) return 0; } nr_bytes = recv(sock->socket, buf, XP_SOCK_BUF_SIZE(count), (sock->is_blocked && sock->timeout.tv_sec != -1) ? MSG_DONTWAIT : 0); err = php_socket_errno(); stream->eof = (nr_bytes == 0 || (nr_bytes == -1 && err != EWOULDBLOCK && err != EAGAIN)); if (nr_bytes > 0) { php_stream_notify_progress_increment(PHP_STREAM_CONTEXT(stream), nr_bytes, 0); } if (nr_bytes < 0) { nr_bytes = 0; } return nr_bytes; } static int php_sockop_close(php_stream *stream, int close_handle) { php_netstream_data_t *sock = (php_netstream_data_t*)stream->abstract; #ifdef PHP_WIN32 int n; #endif if (!sock) { return 0; } if (close_handle) { #ifdef PHP_WIN32 if (sock->socket == -1) sock->socket = SOCK_ERR; #endif if (sock->socket != SOCK_ERR) { #ifdef PHP_WIN32 /* prevent more data from coming in */ shutdown(sock->socket, SHUT_RD); /* try to make sure that the OS sends all data before we close the connection. * Essentially, we are waiting for the socket to become writeable, which means * that all pending data has been sent. * We use a small timeout which should encourage the OS to send the data, * but at the same time avoid hanging indefinitely. * */ do { n = php_pollfd_for_ms(sock->socket, POLLOUT, 500); } while (n == -1 && php_socket_errno() == EINTR); #endif closesocket(sock->socket); sock->socket = SOCK_ERR; } } pefree(sock, php_stream_is_persistent(stream)); return 0; } static int php_sockop_flush(php_stream *stream) { #if 0 php_netstream_data_t *sock = (php_netstream_data_t*)stream->abstract; return fsync(sock->socket); #endif return 0; } static int php_sockop_stat(php_stream *stream, php_stream_statbuf *ssb) { #if ZEND_WIN32 return 0; #else php_netstream_data_t *sock = (php_netstream_data_t*)stream->abstract; return zend_fstat(sock->socket, &ssb->sb); #endif } static inline int sock_sendto(php_netstream_data_t *sock, const char *buf, size_t buflen, int flags, struct sockaddr *addr, socklen_t addrlen ) { int ret; if (addr) { ret = sendto(sock->socket, buf, XP_SOCK_BUF_SIZE(buflen), flags, addr, XP_SOCK_BUF_SIZE(addrlen)); return (ret == SOCK_CONN_ERR) ? -1 : ret; } #ifdef PHP_WIN32 return ((ret = send(sock->socket, buf, buflen > INT_MAX ? INT_MAX : (int)buflen, flags)) == SOCK_CONN_ERR) ? -1 : ret; #else return ((ret = send(sock->socket, buf, buflen, flags)) == SOCK_CONN_ERR) ? -1 : ret; #endif } static inline int sock_recvfrom(php_netstream_data_t *sock, char *buf, size_t buflen, int flags, zend_string **textaddr, struct sockaddr **addr, socklen_t *addrlen ) { int ret; int want_addr = textaddr || addr; if (want_addr) { php_sockaddr_storage sa; socklen_t sl = sizeof(sa); ret = recvfrom(sock->socket, buf, XP_SOCK_BUF_SIZE(buflen), flags, (struct sockaddr*)&sa, &sl); ret = (ret == SOCK_CONN_ERR) ? -1 : ret; if (sl) { php_network_populate_name_from_sockaddr((struct sockaddr*)&sa, sl, textaddr, addr, addrlen); } else { if (textaddr) { *textaddr = ZSTR_EMPTY_ALLOC(); } if (addr) { *addr = NULL; *addrlen = 0; } } } else { ret = recv(sock->socket, buf, XP_SOCK_BUF_SIZE(buflen), flags); ret = (ret == SOCK_CONN_ERR) ? -1 : ret; } return ret; } static int php_sockop_set_option(php_stream *stream, int option, int value, void *ptrparam) { int oldmode, flags; php_netstream_data_t *sock = (php_netstream_data_t*)stream->abstract; php_stream_xport_param *xparam; if (!sock) { return PHP_STREAM_OPTION_RETURN_NOTIMPL; } switch(option) { case PHP_STREAM_OPTION_CHECK_LIVENESS: { struct timeval tv; char buf; int alive = 1; if (value == -1) { if (sock->timeout.tv_sec == -1) { tv.tv_sec = FG(default_socket_timeout); tv.tv_usec = 0; } else { tv = sock->timeout; } } else { tv.tv_sec = value; tv.tv_usec = 0; } if (sock->socket == -1) { alive = 0; } else if (php_pollfd_for(sock->socket, PHP_POLLREADABLE|POLLPRI, &tv) > 0) { #ifdef PHP_WIN32 int ret; #else ssize_t ret; #endif int err; ret = recv(sock->socket, &buf, sizeof(buf), MSG_PEEK); err = php_socket_errno(); if (0 == ret || /* the counterpart did properly shutdown*/ (0 > ret && err != EWOULDBLOCK && err != EAGAIN && err != EMSGSIZE)) { /* there was an unrecoverable error */ alive = 0; } } return alive ? PHP_STREAM_OPTION_RETURN_OK : PHP_STREAM_OPTION_RETURN_ERR; } case PHP_STREAM_OPTION_BLOCKING: oldmode = sock->is_blocked; if (SUCCESS == php_set_sock_blocking(sock->socket, value)) { sock->is_blocked = value; return oldmode; } return PHP_STREAM_OPTION_RETURN_ERR; case PHP_STREAM_OPTION_READ_TIMEOUT: sock->timeout = *(struct timeval*)ptrparam; sock->timeout_event = 0; return PHP_STREAM_OPTION_RETURN_OK; case PHP_STREAM_OPTION_META_DATA_API: add_assoc_bool((zval *)ptrparam, "timed_out", sock->timeout_event); add_assoc_bool((zval *)ptrparam, "blocked", sock->is_blocked); add_assoc_bool((zval *)ptrparam, "eof", stream->eof); return PHP_STREAM_OPTION_RETURN_OK; case PHP_STREAM_OPTION_XPORT_API: xparam = (php_stream_xport_param *)ptrparam; switch (xparam->op) { case STREAM_XPORT_OP_LISTEN: xparam->outputs.returncode = (listen(sock->socket, xparam->inputs.backlog) == 0) ? 0: -1; return PHP_STREAM_OPTION_RETURN_OK; case STREAM_XPORT_OP_GET_NAME: xparam->outputs.returncode = php_network_get_sock_name(sock->socket, xparam->want_textaddr ? &xparam->outputs.textaddr : NULL, xparam->want_addr ? &xparam->outputs.addr : NULL, xparam->want_addr ? &xparam->outputs.addrlen : NULL ); return PHP_STREAM_OPTION_RETURN_OK; case STREAM_XPORT_OP_GET_PEER_NAME: xparam->outputs.returncode = php_network_get_peer_name(sock->socket, xparam->want_textaddr ? &xparam->outputs.textaddr : NULL, xparam->want_addr ? &xparam->outputs.addr : NULL, xparam->want_addr ? &xparam->outputs.addrlen : NULL ); return PHP_STREAM_OPTION_RETURN_OK; case STREAM_XPORT_OP_SEND: flags = 0; if ((xparam->inputs.flags & STREAM_OOB) == STREAM_OOB) { flags |= MSG_OOB; } xparam->outputs.returncode = sock_sendto(sock, xparam->inputs.buf, xparam->inputs.buflen, flags, xparam->inputs.addr, xparam->inputs.addrlen); if (xparam->outputs.returncode == -1) { char *err = php_socket_strerror(php_socket_errno(), NULL, 0); php_error_docref(NULL, E_WARNING, "%s\n", err); efree(err); } return PHP_STREAM_OPTION_RETURN_OK; case STREAM_XPORT_OP_RECV: flags = 0; if ((xparam->inputs.flags & STREAM_OOB) == STREAM_OOB) { flags |= MSG_OOB; } if ((xparam->inputs.flags & STREAM_PEEK) == STREAM_PEEK) { flags |= MSG_PEEK; } xparam->outputs.returncode = sock_recvfrom(sock, xparam->inputs.buf, xparam->inputs.buflen, flags, xparam->want_textaddr ? &xparam->outputs.textaddr : NULL, xparam->want_addr ? &xparam->outputs.addr : NULL, xparam->want_addr ? &xparam->outputs.addrlen : NULL ); return PHP_STREAM_OPTION_RETURN_OK; #ifdef HAVE_SHUTDOWN # ifndef SHUT_RD # define SHUT_RD 0 # endif # ifndef SHUT_WR # define SHUT_WR 1 # endif # ifndef SHUT_RDWR # define SHUT_RDWR 2 # endif case STREAM_XPORT_OP_SHUTDOWN: { static const int shutdown_how[] = {SHUT_RD, SHUT_WR, SHUT_RDWR}; xparam->outputs.returncode = shutdown(sock->socket, shutdown_how[xparam->how]); return PHP_STREAM_OPTION_RETURN_OK; } #endif default: return PHP_STREAM_OPTION_RETURN_NOTIMPL; } default: return PHP_STREAM_OPTION_RETURN_NOTIMPL; } } static int php_sockop_cast(php_stream *stream, int castas, void **ret) { php_netstream_data_t *sock = (php_netstream_data_t*)stream->abstract; if (!sock) { return FAILURE; } switch(castas) { case PHP_STREAM_AS_STDIO: if (ret) { *(FILE**)ret = fdopen(sock->socket, stream->mode); if (*ret) return SUCCESS; return FAILURE; } return SUCCESS; case PHP_STREAM_AS_FD_FOR_SELECT: case PHP_STREAM_AS_FD: case PHP_STREAM_AS_SOCKETD: if (ret) *(php_socket_t *)ret = sock->socket; return SUCCESS; default: return FAILURE; } } /* }}} */ /* These may look identical, but we need them this way so that * we can determine which type of socket we are dealing with * by inspecting stream->ops. * A "useful" side-effect is that the user's scripts can then * make similar decisions using stream_get_meta_data. * */ php_stream_ops php_stream_generic_socket_ops = { php_sockop_write, php_sockop_read, php_sockop_close, php_sockop_flush, "generic_socket", NULL, /* seek */ php_sockop_cast, php_sockop_stat, php_sockop_set_option, }; php_stream_ops php_stream_socket_ops = { php_sockop_write, php_sockop_read, php_sockop_close, php_sockop_flush, "tcp_socket", NULL, /* seek */ php_sockop_cast, php_sockop_stat, php_tcp_sockop_set_option, }; php_stream_ops php_stream_udp_socket_ops = { php_sockop_write, php_sockop_read, php_sockop_close, php_sockop_flush, "udp_socket", NULL, /* seek */ php_sockop_cast, php_sockop_stat, php_tcp_sockop_set_option, }; #ifdef AF_UNIX php_stream_ops php_stream_unix_socket_ops = { php_sockop_write, php_sockop_read, php_sockop_close, php_sockop_flush, "unix_socket", NULL, /* seek */ php_sockop_cast, php_sockop_stat, php_tcp_sockop_set_option, }; php_stream_ops php_stream_unixdg_socket_ops = { php_sockop_write, php_sockop_read, php_sockop_close, php_sockop_flush, "udg_socket", NULL, /* seek */ php_sockop_cast, php_sockop_stat, php_tcp_sockop_set_option, }; #endif /* network socket operations */ #ifdef AF_UNIX static inline int parse_unix_address(php_stream_xport_param *xparam, struct sockaddr_un *unix_addr) { memset(unix_addr, 0, sizeof(*unix_addr)); unix_addr->sun_family = AF_UNIX; /* we need to be binary safe on systems that support an abstract * namespace */ if (xparam->inputs.namelen >= sizeof(unix_addr->sun_path)) { /* On linux, when the path begins with a NUL byte we are * referring to an abstract namespace. In theory we should * allow an extra byte below, since we don't need the NULL. * BUT, to get into this branch of code, the name is too long, * so we don't care. */ xparam->inputs.namelen = sizeof(unix_addr->sun_path) - 1; php_error_docref(NULL, E_NOTICE, "socket path exceeded the maximum allowed length of %lu bytes " "and was truncated", (unsigned long)sizeof(unix_addr->sun_path)); } memcpy(unix_addr->sun_path, xparam->inputs.name, xparam->inputs.namelen); return 1; } #endif static inline char *parse_ip_address_ex(const char *str, size_t str_len, int *portno, int get_err, zend_string **err) { char *colon; char *host = NULL; #ifdef HAVE_IPV6 if (*(str) == '[' && str_len > 1) { /* IPV6 notation to specify raw address with port (i.e. [fe80::1]:80) */ char *p = memchr(str + 1, ']', str_len - 2), *e = NULL; if (!p || *(p + 1) != ':') { if (get_err) { *err = strpprintf(0, "Failed to parse IPv6 address \"%s\"", str); } return NULL; } *portno = strtol(p + 2, &e, 10); if (e && *e) { if (get_err) { *err = strpprintf(0, "Failed to parse address \"%s\"", str); } return NULL; } return estrndup(str + 1, p - str - 1); } #endif if (str_len) { colon = memchr(str, ':', str_len - 1); } else { colon = NULL; } if (colon) { char *e = NULL; *portno = strtol(colon + 1, &e, 10); if (!e || !*e) { return estrndup(str, colon - str); } } if (get_err) { *err = strpprintf(0, "Failed to parse address \"%s\"", str); } return NULL; } static inline char *parse_ip_address(php_stream_xport_param *xparam, int *portno) { return parse_ip_address_ex(xparam->inputs.name, xparam->inputs.namelen, portno, xparam->want_errortext, &xparam->outputs.error_text); } static inline int php_tcp_sockop_bind(php_stream *stream, php_netstream_data_t *sock, php_stream_xport_param *xparam) { char *host = NULL; int portno, err; long sockopts = STREAM_SOCKOP_NONE; zval *tmpzval = NULL; #ifdef AF_UNIX if (stream->ops == &php_stream_unix_socket_ops || stream->ops == &php_stream_unixdg_socket_ops) { struct sockaddr_un unix_addr; sock->socket = socket(PF_UNIX, stream->ops == &php_stream_unix_socket_ops ? SOCK_STREAM : SOCK_DGRAM, 0); if (sock->socket == SOCK_ERR) { if (xparam->want_errortext) { xparam->outputs.error_text = strpprintf(0, "Failed to create unix%s socket %s", stream->ops == &php_stream_unix_socket_ops ? "" : "datagram", strerror(errno)); } return -1; } parse_unix_address(xparam, &unix_addr); return bind(sock->socket, (const struct sockaddr *)&unix_addr, (socklen_t) XtOffsetOf(struct sockaddr_un, sun_path) + xparam->inputs.namelen); } #endif host = parse_ip_address(xparam, &portno); if (host == NULL) { return -1; } #ifdef IPV6_V6ONLY if (PHP_STREAM_CONTEXT(stream) && (tmpzval = php_stream_context_get_option(PHP_STREAM_CONTEXT(stream), "socket", "ipv6_v6only")) != NULL && Z_TYPE_P(tmpzval) != IS_NULL ) { sockopts |= STREAM_SOCKOP_IPV6_V6ONLY; sockopts |= STREAM_SOCKOP_IPV6_V6ONLY_ENABLED * zend_is_true(tmpzval); } #endif #ifdef SO_REUSEPORT if (PHP_STREAM_CONTEXT(stream) && (tmpzval = php_stream_context_get_option(PHP_STREAM_CONTEXT(stream), "socket", "so_reuseport")) != NULL && zend_is_true(tmpzval) ) { sockopts |= STREAM_SOCKOP_SO_REUSEPORT; } #endif #ifdef SO_BROADCAST if (stream->ops == &php_stream_udp_socket_ops /* SO_BROADCAST is only applicable for UDP */ && PHP_STREAM_CONTEXT(stream) && (tmpzval = php_stream_context_get_option(PHP_STREAM_CONTEXT(stream), "socket", "so_broadcast")) != NULL && zend_is_true(tmpzval) ) { sockopts |= STREAM_SOCKOP_SO_BROADCAST; } #endif sock->socket = php_network_bind_socket_to_local_addr(host, portno, stream->ops == &php_stream_udp_socket_ops ? SOCK_DGRAM : SOCK_STREAM, sockopts, xparam->want_errortext ? &xparam->outputs.error_text : NULL, &err ); if (host) { efree(host); } return sock->socket == -1 ? -1 : 0; } static inline int php_tcp_sockop_connect(php_stream *stream, php_netstream_data_t *sock, php_stream_xport_param *xparam) { char *host = NULL, *bindto = NULL; int portno, bindport = 0; int err = 0; int ret; zval *tmpzval = NULL; long sockopts = STREAM_SOCKOP_NONE; #ifdef AF_UNIX if (stream->ops == &php_stream_unix_socket_ops || stream->ops == &php_stream_unixdg_socket_ops) { struct sockaddr_un unix_addr; sock->socket = socket(PF_UNIX, stream->ops == &php_stream_unix_socket_ops ? SOCK_STREAM : SOCK_DGRAM, 0); if (sock->socket == SOCK_ERR) { if (xparam->want_errortext) { xparam->outputs.error_text = strpprintf(0, "Failed to create unix socket"); } return -1; } parse_unix_address(xparam, &unix_addr); ret = php_network_connect_socket(sock->socket, (const struct sockaddr *)&unix_addr, (socklen_t) XtOffsetOf(struct sockaddr_un, sun_path) + xparam->inputs.namelen, xparam->op == STREAM_XPORT_OP_CONNECT_ASYNC, xparam->inputs.timeout, xparam->want_errortext ? &xparam->outputs.error_text : NULL, &err); xparam->outputs.error_code = err; goto out; } #endif host = parse_ip_address(xparam, &portno); if (host == NULL) { return -1; } if (PHP_STREAM_CONTEXT(stream) && (tmpzval = php_stream_context_get_option(PHP_STREAM_CONTEXT(stream), "socket", "bindto")) != NULL) { if (Z_TYPE_P(tmpzval) != IS_STRING) { if (xparam->want_errortext) { xparam->outputs.error_text = strpprintf(0, "local_addr context option is not a string."); } efree(host); return -1; } bindto = parse_ip_address_ex(Z_STRVAL_P(tmpzval), Z_STRLEN_P(tmpzval), &bindport, xparam->want_errortext, &xparam->outputs.error_text); } #ifdef SO_BROADCAST if (stream->ops == &php_stream_udp_socket_ops /* SO_BROADCAST is only applicable for UDP */ && PHP_STREAM_CONTEXT(stream) && (tmpzval = php_stream_context_get_option(PHP_STREAM_CONTEXT(stream), "socket", "so_broadcast")) != NULL && zend_is_true(tmpzval) ) { sockopts |= STREAM_SOCKOP_SO_BROADCAST; } #endif /* Note: the test here for php_stream_udp_socket_ops is important, because we * want the default to be TCP sockets so that the openssl extension can * re-use this code. */ sock->socket = php_network_connect_socket_to_host(host, portno, stream->ops == &php_stream_udp_socket_ops ? SOCK_DGRAM : SOCK_STREAM, xparam->op == STREAM_XPORT_OP_CONNECT_ASYNC, xparam->inputs.timeout, xparam->want_errortext ? &xparam->outputs.error_text : NULL, &err, bindto, bindport, sockopts ); ret = sock->socket == -1 ? -1 : 0; xparam->outputs.error_code = err; if (host) { efree(host); } if (bindto) { efree(bindto); } #ifdef AF_UNIX out: #endif if (ret >= 0 && xparam->op == STREAM_XPORT_OP_CONNECT_ASYNC && err == EINPROGRESS) { /* indicates pending connection */ return 1; } return ret; } static inline int php_tcp_sockop_accept(php_stream *stream, php_netstream_data_t *sock, php_stream_xport_param *xparam STREAMS_DC) { int clisock; xparam->outputs.client = NULL; clisock = php_network_accept_incoming(sock->socket, xparam->want_textaddr ? &xparam->outputs.textaddr : NULL, xparam->want_addr ? &xparam->outputs.addr : NULL, xparam->want_addr ? &xparam->outputs.addrlen : NULL, xparam->inputs.timeout, xparam->want_errortext ? &xparam->outputs.error_text : NULL, &xparam->outputs.error_code ); if (clisock >= 0) { php_netstream_data_t *clisockdata; clisockdata = emalloc(sizeof(*clisockdata)); if (clisockdata == NULL) { close(clisock); /* technically a fatal error */ } else { memcpy(clisockdata, sock, sizeof(*clisockdata)); clisockdata->socket = clisock; xparam->outputs.client = php_stream_alloc_rel(stream->ops, clisockdata, NULL, "r+"); if (xparam->outputs.client) { xparam->outputs.client->ctx = stream->ctx; if (stream->ctx) { GC_REFCOUNT(stream->ctx)++; } } } } return xparam->outputs.client == NULL ? -1 : 0; } static int php_tcp_sockop_set_option(php_stream *stream, int option, int value, void *ptrparam) { php_netstream_data_t *sock = (php_netstream_data_t*)stream->abstract; php_stream_xport_param *xparam; switch(option) { case PHP_STREAM_OPTION_XPORT_API: xparam = (php_stream_xport_param *)ptrparam; switch(xparam->op) { case STREAM_XPORT_OP_CONNECT: case STREAM_XPORT_OP_CONNECT_ASYNC: xparam->outputs.returncode = php_tcp_sockop_connect(stream, sock, xparam); return PHP_STREAM_OPTION_RETURN_OK; case STREAM_XPORT_OP_BIND: xparam->outputs.returncode = php_tcp_sockop_bind(stream, sock, xparam); return PHP_STREAM_OPTION_RETURN_OK; case STREAM_XPORT_OP_ACCEPT: xparam->outputs.returncode = php_tcp_sockop_accept(stream, sock, xparam STREAMS_CC); return PHP_STREAM_OPTION_RETURN_OK; default: /* fall through */ ; } } return php_sockop_set_option(stream, option, value, ptrparam); } PHPAPI php_stream *php_stream_generic_socket_factory(const char *proto, size_t protolen, const char *resourcename, size_t resourcenamelen, const char *persistent_id, int options, int flags, struct timeval *timeout, php_stream_context *context STREAMS_DC) { php_stream *stream = NULL; php_netstream_data_t *sock; php_stream_ops *ops; /* which type of socket ? */ if (strncmp(proto, "tcp", protolen) == 0) { ops = &php_stream_socket_ops; } else if (strncmp(proto, "udp", protolen) == 0) { ops = &php_stream_udp_socket_ops; } #ifdef AF_UNIX else if (strncmp(proto, "unix", protolen) == 0) { ops = &php_stream_unix_socket_ops; } else if (strncmp(proto, "udg", protolen) == 0) { ops = &php_stream_unixdg_socket_ops; } #endif else { /* should never happen */ return NULL; } sock = pemalloc(sizeof(php_netstream_data_t), persistent_id ? 1 : 0); memset(sock, 0, sizeof(php_netstream_data_t)); sock->is_blocked = 1; sock->timeout.tv_sec = FG(default_socket_timeout); sock->timeout.tv_usec = 0; /* we don't know the socket until we have determined if we are binding or * connecting */ sock->socket = -1; stream = php_stream_alloc_rel(ops, sock, persistent_id, "r+"); if (stream == NULL) { pefree(sock, persistent_id ? 1 : 0); return NULL; } if (flags == 0) { return stream; } return stream; } /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */
./CrossVul/dataset_final_sorted/CWE-20/c/good_3244_1
crossvul-cpp_data_good_763_0
/* Copyright 2016 Christian Hoene, Symonics GmbH */ #include <stdlib.h> #include <string.h> #include <errno.h> #include "reader.h" /* * 00000370 42 54 4c 46 00 08 00 5b 01 00 00 00 2d 00 00 07 |BTLF...[....-...| 00000380 00 00 00 f8 ea 72 15 00 c9 03 00 00 00 26 00 00 |.....r.......&..| 00000390 14 00 00 00 32 32 7c 17 00 22 02 00 00 00 32 00 |....22|.."....2.| 000003a0 00 0b 00 00 00 07 ef 9c 26 00 bb 01 00 00 00 46 |........&......F| 000003b0 00 00 09 00 00 00 e5 f6 ba 26 00 45 03 00 00 00 |.........&.E....| 000003c0 34 00 00 11 00 00 00 f6 71 f0 2e 00 a3 02 00 00 |4.......q.......| 000003d0 00 3e 00 00 0d 00 00 00 61 36 dc 36 00 79 03 00 |.>......a6.6.y..| 000003e0 00 00 35 00 00 12 00 00 00 97 1b 4e 45 00 88 01 |..5........NE...| 000003f0 00 00 00 33 00 00 08 00 00 00 56 d7 d0 47 00 ae |...3......V..G..| 00000400 03 00 00 00 1b 00 00 13 00 00 00 2f 03 50 5a 00 |.........../.PZ.| 00000410 22 01 00 00 00 39 00 00 06 00 00 00 b7 88 37 66 |"....9........7f| 00000420 00 01 03 00 00 00 28 00 00 0f 00 00 00 dc aa 47 |......(........G| 00000430 66 00 16 04 00 00 00 2c 00 00 15 00 00 00 6b 54 |f......,......kT| 00000440 7d 77 00 fd 00 00 00 00 25 00 00 05 00 00 00 7d |}w......%......}| 00000450 0c 8c 9e 00 29 03 00 00 00 1c 00 00 10 00 00 00 |....)...........| 00000460 4c f3 0e a0 00 16 00 00 00 00 25 00 00 00 00 00 |L.........%.....| 00000470 00 e7 30 2d ab 00 01 02 00 00 00 21 00 00 0a 00 |..0-.......!....| 00000480 00 00 35 b5 69 b0 00 e1 02 00 00 00 20 00 00 0e |..5.i....... ...| 00000490 00 00 00 2b c5 8b c4 00 3b 00 00 00 00 20 00 00 |...+....;.... ..| 000004a0 01 00 00 00 09 a0 74 cc 00 93 00 00 00 00 2f 00 |......t......./.| 000004b0 00 03 00 00 00 3f 48 ef d6 00 5b 00 00 00 00 38 |.....?H...[....8| 000004c0 00 00 02 00 00 00 f1 7e 7d dd 00 54 02 00 00 00 |.......~}..T....| 000004d0 4f 00 00 0c 00 00 00 48 35 ff f5 00 c2 00 00 00 |O......H5.......| 000004e0 00 3b 00 00 04 00 00 00 ad 61 4e ff 63 42 f7 73 |.;.......aN.cB.s| 000004f0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| * 00000570 42 54 4c 46 00 09 00 16 00 00 00 00 25 00 00 00 |BTLF........%...| 00000580 00 00 00 00 3b 00 00 00 00 20 00 00 01 00 00 00 |....;.... ......| 00000590 00 5b 00 00 00 00 38 00 00 02 00 00 00 00 93 00 |.[....8.........| 000005a0 00 00 00 2f 00 00 03 00 00 00 00 c2 00 00 00 00 |.../............| 000005b0 3b 00 00 04 00 00 00 00 fd 00 00 00 00 25 00 00 |;............%..| 000005c0 05 00 00 00 00 22 01 00 00 00 39 00 00 06 00 00 |....."....9.....| 000005d0 00 00 5b 01 00 00 00 2d 00 00 07 00 00 00 00 88 |..[....-........| 000005e0 01 00 00 00 33 00 00 08 00 00 00 00 bb 01 00 00 |....3...........| 000005f0 00 46 00 00 09 00 00 00 00 01 02 00 00 00 21 00 |.F............!.| 00000600 00 0a 00 00 00 00 22 02 00 00 00 32 00 00 0b 00 |......"....2....| 00000610 00 00 00 54 02 00 00 00 4f 00 00 0c 00 00 00 00 |...T....O.......| 00000620 a3 02 00 00 00 3e 00 00 0d 00 00 00 00 e1 02 00 |.....>..........| 00000630 00 00 20 00 00 0e 00 00 00 00 01 03 00 00 00 28 |.. ............(| 00000640 00 00 0f 00 00 00 00 29 03 00 00 00 1c 00 00 10 |.......)........| 00000650 00 00 00 00 45 03 00 00 00 34 00 00 11 00 00 00 |....E....4......| 00000660 00 79 03 00 00 00 35 00 00 12 00 00 00 00 ae 03 |.y....5.........| 00000670 00 00 00 1b 00 00 13 00 00 00 00 c9 03 00 00 00 |................| 00000680 26 00 00 14 00 00 00 00 16 04 00 00 00 2c 00 00 |&............,..| 00000690 15 00 00 00 d3 c7 19 a0 00 00 00 00 00 00 00 00 |................| 000006a0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| */ static int readBTLF(struct READER *reader, struct BTREE *btree, int number_of_records, union RECORD *records) { int i; uint8_t type, message_flags; uint32_t creation_order, hash_of_name; uint64_t heap_id; char buf[4]; UNUSED(heap_id); UNUSED(hash_of_name); UNUSED(creation_order); UNUSED(message_flags); /* read signature */ if (fread(buf, 1, 4, reader->fhd) != 4 || strncmp(buf, "BTLF", 4)) { log("cannot read signature of BTLF\n"); return MYSOFA_INVALID_FORMAT; } log("%08lX %.4s\n", (uint64_t )ftell(reader->fhd) - 4, buf); if (fgetc(reader->fhd) != 0) { log("object BTLF must have version 0\n"); return MYSOFA_INVALID_FORMAT; } type = (uint8_t)fgetc(reader->fhd); for (i = 0; i < number_of_records; i++) { switch (type) { case 5: records->type5.hash_of_name = (uint32_t)readValue(reader, 4); records->type5.heap_id = readValue(reader, 7); log(" type5 %08X %14lX\n", records->type5.hash_of_name, records->type5.heap_id); records++; break; case 6: /*creation_order = */readValue(reader, 8); /*heap_id = */readValue(reader, 7); break; case 8: /*heap_id = */readValue(reader, 8); /*message_flags = */fgetc(reader->fhd); /*creation_order = */readValue(reader, 4); /*hash_of_name = */readValue(reader, 4); break; case 9: /*heap_id = */readValue(reader, 8); /*message_flags = */fgetc(reader->fhd); /*creation_order = */readValue(reader, 4); break; default: log("object BTLF has unknown type %d\n", type); return MYSOFA_INVALID_FORMAT; } } /* fseeko(reader->fhd, bthd->root_node_address + bthd->node_size, SEEK_SET); skip checksum */ return MYSOFA_OK; } /* III.A.2. Disk Format: Level 1A2 - Version 2 B-trees 000002d0 32 1d 42 54 48 44 00 08 00 02 00 00 11 00 00 00 |2.BTHD..........| 000002e0 64 28 70 03 00 00 00 00 00 00 16 00 16 00 00 00 |d(p.............| 000002f0 00 00 00 00 30 12 d9 6e 42 54 48 44 00 09 00 02 |....0..nBTHD....| 00000300 00 00 0d 00 00 00 64 28 70 05 00 00 00 00 00 00 |......d(p.......| 00000310 16 00 16 00 00 00 00 00 00 00 e2 0d 76 5c 46 53 |............v\FS| */ int btreeRead(struct READER *reader, struct BTREE *btree) { char buf[4]; /* read signature */ if (fread(buf, 1, 4, reader->fhd) != 4 || strncmp(buf, "BTHD", 4)) { log("cannot read signature of BTHD\n"); return MYSOFA_INVALID_FORMAT; } log("%08lX %.4s\n", (uint64_t )ftell(reader->fhd) - 4, buf); if (fgetc(reader->fhd) != 0) { log("object BTHD must have version 0\n"); return MYSOFA_INVALID_FORMAT; } btree->type = (uint8_t)fgetc(reader->fhd); btree->node_size = (uint32_t)readValue(reader, 4); btree->record_size = (uint16_t)readValue(reader, 2); btree->depth = (uint16_t)readValue(reader, 2); btree->split_percent = (uint8_t)fgetc(reader->fhd); btree->merge_percent = (uint8_t)fgetc(reader->fhd); btree->root_node_address = (uint64_t)readValue(reader, reader->superblock.size_of_offsets); btree->number_of_records = (uint16_t)readValue(reader, 2); if(btree->number_of_records>0x1000) return MYSOFA_UNSUPPORTED_FORMAT; btree->total_number = (uint64_t)readValue(reader, reader->superblock.size_of_lengths); /* fseek(reader->fhd, 4, SEEK_CUR); skip checksum */ if(btree->total_number > 0x10000000) return MYSOFA_NO_MEMORY; btree->records = malloc(sizeof(btree->records[0]) * btree->total_number); if (!btree->records) return MYSOFA_NO_MEMORY; memset(btree->records, 0, sizeof(btree->records[0]) * btree->total_number); /* read records */ if(fseek(reader->fhd, btree->root_node_address, SEEK_SET)<0) return errno; return readBTLF(reader, btree, btree->number_of_records, btree->records); } void btreeFree(struct BTREE *btree) { free(btree->records); } /* III.A.1. Disk Format: Level 1A1 - Version 1 B-trees * */ int treeRead(struct READER *reader, struct DATAOBJECT *data) { int i, j, err, olen, elements, size, x, y, z, b, e, dy, dz, sx, sy, sz, dzy, szy; char *input, *output; uint8_t node_type, node_level; uint16_t entries_used; uint32_t size_of_chunk; uint32_t filter_mask; uint64_t address_of_left_sibling, address_of_right_sibling, start[4], child_pointer, key, store; char buf[4]; UNUSED(node_level); UNUSED(address_of_right_sibling); UNUSED(address_of_left_sibling); UNUSED(key); if (data->ds.dimensionality > 3) { log("TREE dimensions > 3"); return MYSOFA_INVALID_FORMAT; } /* read signature */ if (fread(buf, 1, 4, reader->fhd) != 4 || strncmp(buf, "TREE", 4)) { log("cannot read signature of TREE\n"); return MYSOFA_INVALID_FORMAT; } log("%08lX %.4s\n", (uint64_t )ftell(reader->fhd) - 4, buf); node_type = (uint8_t)fgetc(reader->fhd); node_level = (uint8_t)fgetc(reader->fhd); entries_used = (uint16_t)readValue(reader, 2); if(entries_used>0x1000) return MYSOFA_UNSUPPORTED_FORMAT; address_of_left_sibling = readValue(reader, reader->superblock.size_of_offsets); address_of_right_sibling = readValue(reader, reader->superblock.size_of_offsets); elements = 1; for (j = 0; j < data->ds.dimensionality; j++) elements *= data->datalayout_chunk[j]; dy = data->datalayout_chunk[1]; dz = data->datalayout_chunk[2]; sx = data->ds.dimension_size[0]; sy = data->ds.dimension_size[1]; sz = data->ds.dimension_size[2]; dzy = dz * dy; szy = sz * sy; size = data->datalayout_chunk[data->ds.dimensionality]; log("elements %d size %d\n",elements,size); if (!(output = malloc(elements * size))) { return MYSOFA_NO_MEMORY; } for (e = 0; e < entries_used * 2; e++) { if (node_type == 0) { key = readValue(reader, reader->superblock.size_of_lengths); } else { size_of_chunk = (uint32_t)readValue(reader, 4); filter_mask = (uint32_t)readValue(reader, 4); if (filter_mask) { log("TREE all filters must be enabled\n"); free(output); return MYSOFA_INVALID_FORMAT; } for (j = 0; j < data->ds.dimensionality; j++) { start[j] = readValue(reader, 8); log("start %d %lu\n",j,start[j]); } if (readValue(reader, 8)) { break; } child_pointer = readValue(reader, reader->superblock.size_of_offsets); log(" data at %lX len %u\n", child_pointer, size_of_chunk); /* read data */ store = ftell(reader->fhd); if (fseek(reader->fhd, child_pointer, SEEK_SET)<0) { free(output); return errno; } if (!(input = malloc(size_of_chunk))) { free(output); return MYSOFA_NO_MEMORY; } if (fread(input, 1, size_of_chunk, reader->fhd) != size_of_chunk) { free(output); free(input); return MYSOFA_INVALID_FORMAT; } olen = elements * size; err = gunzip(size_of_chunk, input, &olen, output); free(input); log(" gunzip %d %d %d\n",err, olen, elements*size); if (err || olen != elements * size) { free(output); return MYSOFA_INVALID_FORMAT; } switch (data->ds.dimensionality) { case 1: for (i = 0; i < olen; i++) { b = i / elements; x = i % elements + start[0]; j = x * size + b; if (j>=0 && j < elements * size) { ((char*)data->data)[j] = output[i]; } } break; case 2: for (i = 0; i < olen; i++) { b = i / elements; x = i % elements; y = x % dy + start[1]; x = x / dy + start[0]; j = ((x * sy + y) * size) + b; if (j>=0 && j < elements * size) { ((char*)data->data)[j] = output[i]; } } break; case 3: for (i = 0; i < olen; i++) { b = i / elements; x = i % elements; z = x % dz + start[2]; y = (x / dz) % dy + start[1]; x = (x / dzy) + start[0]; j = (x * szy + y * sz + z) * size + b; if (j>=0 && j < elements * size) { ((char*)data->data)[j] = output[i]; } } break; default: log("invalid dim\n"); return MYSOFA_INTERNAL_ERROR; } if(fseek(reader->fhd, store, SEEK_SET)<0) { free(output); return errno; } } } free(output); if(fseek(reader->fhd, 4, SEEK_CUR)<0) /* skip checksum */ return errno; return MYSOFA_OK; }
./CrossVul/dataset_final_sorted/CWE-20/c/good_763_0
crossvul-cpp_data_good_365_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD CCCC M M % % D D C MM MM % % D D C M M M % % D D C M M % % DDDD CCCC M M % % % % % % Read DICOM Image Format % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/constitute.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/resource_.h" #include "MagickCore/quantum-private.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/module.h" /* Dicom medical image declarations. */ typedef struct _DicomInfo { const unsigned short group, element; const char *vr, *description; } DicomInfo; static const DicomInfo dicom_info[] = { { 0x0000, 0x0000, "UL", "Group Length" }, { 0x0000, 0x0001, "UL", "Command Length to End" }, { 0x0000, 0x0002, "UI", "Affected SOP Class UID" }, { 0x0000, 0x0003, "UI", "Requested SOP Class UID" }, { 0x0000, 0x0010, "LO", "Command Recognition Code" }, { 0x0000, 0x0100, "US", "Command Field" }, { 0x0000, 0x0110, "US", "Message ID" }, { 0x0000, 0x0120, "US", "Message ID Being Responded To" }, { 0x0000, 0x0200, "AE", "Initiator" }, { 0x0000, 0x0300, "AE", "Receiver" }, { 0x0000, 0x0400, "AE", "Find Location" }, { 0x0000, 0x0600, "AE", "Move Destination" }, { 0x0000, 0x0700, "US", "Priority" }, { 0x0000, 0x0800, "US", "Data Set Type" }, { 0x0000, 0x0850, "US", "Number of Matches" }, { 0x0000, 0x0860, "US", "Response Sequence Number" }, { 0x0000, 0x0900, "US", "Status" }, { 0x0000, 0x0901, "AT", "Offending Element" }, { 0x0000, 0x0902, "LO", "Exception Comment" }, { 0x0000, 0x0903, "US", "Exception ID" }, { 0x0000, 0x1000, "UI", "Affected SOP Instance UID" }, { 0x0000, 0x1001, "UI", "Requested SOP Instance UID" }, { 0x0000, 0x1002, "US", "Event Type ID" }, { 0x0000, 0x1005, "AT", "Attribute Identifier List" }, { 0x0000, 0x1008, "US", "Action Type ID" }, { 0x0000, 0x1020, "US", "Number of Remaining Suboperations" }, { 0x0000, 0x1021, "US", "Number of Completed Suboperations" }, { 0x0000, 0x1022, "US", "Number of Failed Suboperations" }, { 0x0000, 0x1023, "US", "Number of Warning Suboperations" }, { 0x0000, 0x1030, "AE", "Move Originator Application Entity Title" }, { 0x0000, 0x1031, "US", "Move Originator Message ID" }, { 0x0000, 0x4000, "LO", "Dialog Receiver" }, { 0x0000, 0x4010, "LO", "Terminal Type" }, { 0x0000, 0x5010, "SH", "Message Set ID" }, { 0x0000, 0x5020, "SH", "End Message Set" }, { 0x0000, 0x5110, "LO", "Display Format" }, { 0x0000, 0x5120, "LO", "Page Position ID" }, { 0x0000, 0x5130, "LO", "Text Format ID" }, { 0x0000, 0x5140, "LO", "Normal Reverse" }, { 0x0000, 0x5150, "LO", "Add Gray Scale" }, { 0x0000, 0x5160, "LO", "Borders" }, { 0x0000, 0x5170, "IS", "Copies" }, { 0x0000, 0x5180, "LO", "OldMagnificationType" }, { 0x0000, 0x5190, "LO", "Erase" }, { 0x0000, 0x51a0, "LO", "Print" }, { 0x0000, 0x51b0, "US", "Overlays" }, { 0x0002, 0x0000, "UL", "Meta Element Group Length" }, { 0x0002, 0x0001, "OB", "File Meta Information Version" }, { 0x0002, 0x0002, "UI", "Media Storage SOP Class UID" }, { 0x0002, 0x0003, "UI", "Media Storage SOP Instance UID" }, { 0x0002, 0x0010, "UI", "Transfer Syntax UID" }, { 0x0002, 0x0012, "UI", "Implementation Class UID" }, { 0x0002, 0x0013, "SH", "Implementation Version Name" }, { 0x0002, 0x0016, "AE", "Source Application Entity Title" }, { 0x0002, 0x0100, "UI", "Private Information Creator UID" }, { 0x0002, 0x0102, "OB", "Private Information" }, { 0x0003, 0x0000, "US", "?" }, { 0x0003, 0x0008, "US", "ISI Command Field" }, { 0x0003, 0x0011, "US", "Attach ID Application Code" }, { 0x0003, 0x0012, "UL", "Attach ID Message Count" }, { 0x0003, 0x0013, "DA", "Attach ID Date" }, { 0x0003, 0x0014, "TM", "Attach ID Time" }, { 0x0003, 0x0020, "US", "Message Type" }, { 0x0003, 0x0030, "DA", "Max Waiting Date" }, { 0x0003, 0x0031, "TM", "Max Waiting Time" }, { 0x0004, 0x0000, "UL", "File Set Group Length" }, { 0x0004, 0x1130, "CS", "File Set ID" }, { 0x0004, 0x1141, "CS", "File Set Descriptor File ID" }, { 0x0004, 0x1142, "CS", "File Set Descriptor File Specific Character Set" }, { 0x0004, 0x1200, "UL", "Root Directory Entity First Directory Record Offset" }, { 0x0004, 0x1202, "UL", "Root Directory Entity Last Directory Record Offset" }, { 0x0004, 0x1212, "US", "File Set Consistency Flag" }, { 0x0004, 0x1220, "SQ", "Directory Record Sequence" }, { 0x0004, 0x1400, "UL", "Next Directory Record Offset" }, { 0x0004, 0x1410, "US", "Record In Use Flag" }, { 0x0004, 0x1420, "UL", "Referenced Lower Level Directory Entity Offset" }, { 0x0004, 0x1430, "CS", "Directory Record Type" }, { 0x0004, 0x1432, "UI", "Private Record UID" }, { 0x0004, 0x1500, "CS", "Referenced File ID" }, { 0x0004, 0x1504, "UL", "MRDR Directory Record Offset" }, { 0x0004, 0x1510, "UI", "Referenced SOP Class UID In File" }, { 0x0004, 0x1511, "UI", "Referenced SOP Instance UID In File" }, { 0x0004, 0x1512, "UI", "Referenced Transfer Syntax UID In File" }, { 0x0004, 0x1600, "UL", "Number of References" }, { 0x0005, 0x0000, "US", "?" }, { 0x0006, 0x0000, "US", "?" }, { 0x0008, 0x0000, "UL", "Identifying Group Length" }, { 0x0008, 0x0001, "UL", "Length to End" }, { 0x0008, 0x0005, "CS", "Specific Character Set" }, { 0x0008, 0x0008, "CS", "Image Type" }, { 0x0008, 0x0010, "LO", "Recognition Code" }, { 0x0008, 0x0012, "DA", "Instance Creation Date" }, { 0x0008, 0x0013, "TM", "Instance Creation Time" }, { 0x0008, 0x0014, "UI", "Instance Creator UID" }, { 0x0008, 0x0016, "UI", "SOP Class UID" }, { 0x0008, 0x0018, "UI", "SOP Instance UID" }, { 0x0008, 0x0020, "DA", "Study Date" }, { 0x0008, 0x0021, "DA", "Series Date" }, { 0x0008, 0x0022, "DA", "Acquisition Date" }, { 0x0008, 0x0023, "DA", "Image Date" }, { 0x0008, 0x0024, "DA", "Overlay Date" }, { 0x0008, 0x0025, "DA", "Curve Date" }, { 0x0008, 0x002A, "DT", "Acquisition DateTime" }, { 0x0008, 0x0030, "TM", "Study Time" }, { 0x0008, 0x0031, "TM", "Series Time" }, { 0x0008, 0x0032, "TM", "Acquisition Time" }, { 0x0008, 0x0033, "TM", "Image Time" }, { 0x0008, 0x0034, "TM", "Overlay Time" }, { 0x0008, 0x0035, "TM", "Curve Time" }, { 0x0008, 0x0040, "xs", "Old Data Set Type" }, { 0x0008, 0x0041, "xs", "Old Data Set Subtype" }, { 0x0008, 0x0042, "CS", "Nuclear Medicine Series Type" }, { 0x0008, 0x0050, "SH", "Accession Number" }, { 0x0008, 0x0052, "CS", "Query/Retrieve Level" }, { 0x0008, 0x0054, "AE", "Retrieve AE Title" }, { 0x0008, 0x0058, "UI", "Failed SOP Instance UID List" }, { 0x0008, 0x0060, "CS", "Modality" }, { 0x0008, 0x0062, "SQ", "Modality Subtype" }, { 0x0008, 0x0064, "CS", "Conversion Type" }, { 0x0008, 0x0068, "CS", "Presentation Intent Type" }, { 0x0008, 0x0070, "LO", "Manufacturer" }, { 0x0008, 0x0080, "LO", "Institution Name" }, { 0x0008, 0x0081, "ST", "Institution Address" }, { 0x0008, 0x0082, "SQ", "Institution Code Sequence" }, { 0x0008, 0x0090, "PN", "Referring Physician's Name" }, { 0x0008, 0x0092, "ST", "Referring Physician's Address" }, { 0x0008, 0x0094, "SH", "Referring Physician's Telephone Numbers" }, { 0x0008, 0x0100, "SH", "Code Value" }, { 0x0008, 0x0102, "SH", "Coding Scheme Designator" }, { 0x0008, 0x0103, "SH", "Coding Scheme Version" }, { 0x0008, 0x0104, "LO", "Code Meaning" }, { 0x0008, 0x0105, "CS", "Mapping Resource" }, { 0x0008, 0x0106, "DT", "Context Group Version" }, { 0x0008, 0x010b, "CS", "Code Set Extension Flag" }, { 0x0008, 0x010c, "UI", "Private Coding Scheme Creator UID" }, { 0x0008, 0x010d, "UI", "Code Set Extension Creator UID" }, { 0x0008, 0x010f, "CS", "Context Identifier" }, { 0x0008, 0x1000, "LT", "Network ID" }, { 0x0008, 0x1010, "SH", "Station Name" }, { 0x0008, 0x1030, "LO", "Study Description" }, { 0x0008, 0x1032, "SQ", "Procedure Code Sequence" }, { 0x0008, 0x103e, "LO", "Series Description" }, { 0x0008, 0x1040, "LO", "Institutional Department Name" }, { 0x0008, 0x1048, "PN", "Physician of Record" }, { 0x0008, 0x1050, "PN", "Performing Physician's Name" }, { 0x0008, 0x1060, "PN", "Name of Physician(s) Reading Study" }, { 0x0008, 0x1070, "PN", "Operator's Name" }, { 0x0008, 0x1080, "LO", "Admitting Diagnosis Description" }, { 0x0008, 0x1084, "SQ", "Admitting Diagnosis Code Sequence" }, { 0x0008, 0x1090, "LO", "Manufacturer's Model Name" }, { 0x0008, 0x1100, "SQ", "Referenced Results Sequence" }, { 0x0008, 0x1110, "SQ", "Referenced Study Sequence" }, { 0x0008, 0x1111, "SQ", "Referenced Study Component Sequence" }, { 0x0008, 0x1115, "SQ", "Referenced Series Sequence" }, { 0x0008, 0x1120, "SQ", "Referenced Patient Sequence" }, { 0x0008, 0x1125, "SQ", "Referenced Visit Sequence" }, { 0x0008, 0x1130, "SQ", "Referenced Overlay Sequence" }, { 0x0008, 0x1140, "SQ", "Referenced Image Sequence" }, { 0x0008, 0x1145, "SQ", "Referenced Curve Sequence" }, { 0x0008, 0x1148, "SQ", "Referenced Previous Waveform" }, { 0x0008, 0x114a, "SQ", "Referenced Simultaneous Waveforms" }, { 0x0008, 0x114c, "SQ", "Referenced Subsequent Waveform" }, { 0x0008, 0x1150, "UI", "Referenced SOP Class UID" }, { 0x0008, 0x1155, "UI", "Referenced SOP Instance UID" }, { 0x0008, 0x1160, "IS", "Referenced Frame Number" }, { 0x0008, 0x1195, "UI", "Transaction UID" }, { 0x0008, 0x1197, "US", "Failure Reason" }, { 0x0008, 0x1198, "SQ", "Failed SOP Sequence" }, { 0x0008, 0x1199, "SQ", "Referenced SOP Sequence" }, { 0x0008, 0x2110, "CS", "Old Lossy Image Compression" }, { 0x0008, 0x2111, "ST", "Derivation Description" }, { 0x0008, 0x2112, "SQ", "Source Image Sequence" }, { 0x0008, 0x2120, "SH", "Stage Name" }, { 0x0008, 0x2122, "IS", "Stage Number" }, { 0x0008, 0x2124, "IS", "Number of Stages" }, { 0x0008, 0x2128, "IS", "View Number" }, { 0x0008, 0x2129, "IS", "Number of Event Timers" }, { 0x0008, 0x212a, "IS", "Number of Views in Stage" }, { 0x0008, 0x2130, "DS", "Event Elapsed Time(s)" }, { 0x0008, 0x2132, "LO", "Event Timer Name(s)" }, { 0x0008, 0x2142, "IS", "Start Trim" }, { 0x0008, 0x2143, "IS", "Stop Trim" }, { 0x0008, 0x2144, "IS", "Recommended Display Frame Rate" }, { 0x0008, 0x2200, "CS", "Transducer Position" }, { 0x0008, 0x2204, "CS", "Transducer Orientation" }, { 0x0008, 0x2208, "CS", "Anatomic Structure" }, { 0x0008, 0x2218, "SQ", "Anatomic Region Sequence" }, { 0x0008, 0x2220, "SQ", "Anatomic Region Modifier Sequence" }, { 0x0008, 0x2228, "SQ", "Primary Anatomic Structure Sequence" }, { 0x0008, 0x2230, "SQ", "Primary Anatomic Structure Modifier Sequence" }, { 0x0008, 0x2240, "SQ", "Transducer Position Sequence" }, { 0x0008, 0x2242, "SQ", "Transducer Position Modifier Sequence" }, { 0x0008, 0x2244, "SQ", "Transducer Orientation Sequence" }, { 0x0008, 0x2246, "SQ", "Transducer Orientation Modifier Sequence" }, { 0x0008, 0x2251, "SQ", "Anatomic Structure Space Or Region Code Sequence" }, { 0x0008, 0x2253, "SQ", "Anatomic Portal Of Entrance Code Sequence" }, { 0x0008, 0x2255, "SQ", "Anatomic Approach Direction Code Sequence" }, { 0x0008, 0x2256, "ST", "Anatomic Perspective Description" }, { 0x0008, 0x2257, "SQ", "Anatomic Perspective Code Sequence" }, { 0x0008, 0x2258, "ST", "Anatomic Location Of Examining Instrument Description" }, { 0x0008, 0x2259, "SQ", "Anatomic Location Of Examining Instrument Code Sequence" }, { 0x0008, 0x225a, "SQ", "Anatomic Structure Space Or Region Modifier Code Sequence" }, { 0x0008, 0x225c, "SQ", "OnAxis Background Anatomic Structure Code Sequence" }, { 0x0008, 0x4000, "LT", "Identifying Comments" }, { 0x0009, 0x0000, "xs", "?" }, { 0x0009, 0x0001, "xs", "?" }, { 0x0009, 0x0002, "xs", "?" }, { 0x0009, 0x0003, "xs", "?" }, { 0x0009, 0x0004, "xs", "?" }, { 0x0009, 0x0005, "UN", "?" }, { 0x0009, 0x0006, "UN", "?" }, { 0x0009, 0x0007, "UN", "?" }, { 0x0009, 0x0008, "xs", "?" }, { 0x0009, 0x0009, "LT", "?" }, { 0x0009, 0x000a, "IS", "?" }, { 0x0009, 0x000b, "IS", "?" }, { 0x0009, 0x000c, "IS", "?" }, { 0x0009, 0x000d, "IS", "?" }, { 0x0009, 0x000e, "IS", "?" }, { 0x0009, 0x000f, "UN", "?" }, { 0x0009, 0x0010, "xs", "?" }, { 0x0009, 0x0011, "xs", "?" }, { 0x0009, 0x0012, "xs", "?" }, { 0x0009, 0x0013, "xs", "?" }, { 0x0009, 0x0014, "xs", "?" }, { 0x0009, 0x0015, "xs", "?" }, { 0x0009, 0x0016, "xs", "?" }, { 0x0009, 0x0017, "LT", "?" }, { 0x0009, 0x0018, "LT", "Data Set Identifier" }, { 0x0009, 0x001a, "US", "?" }, { 0x0009, 0x001e, "UI", "?" }, { 0x0009, 0x0020, "xs", "?" }, { 0x0009, 0x0021, "xs", "?" }, { 0x0009, 0x0022, "SH", "User Orientation" }, { 0x0009, 0x0023, "SL", "Initiation Type" }, { 0x0009, 0x0024, "xs", "?" }, { 0x0009, 0x0025, "xs", "?" }, { 0x0009, 0x0026, "xs", "?" }, { 0x0009, 0x0027, "xs", "?" }, { 0x0009, 0x0029, "xs", "?" }, { 0x0009, 0x002a, "SL", "?" }, { 0x0009, 0x002c, "LO", "Series Comments" }, { 0x0009, 0x002d, "SL", "Track Beat Average" }, { 0x0009, 0x002e, "FD", "Distance Prescribed" }, { 0x0009, 0x002f, "LT", "?" }, { 0x0009, 0x0030, "xs", "?" }, { 0x0009, 0x0031, "xs", "?" }, { 0x0009, 0x0032, "LT", "?" }, { 0x0009, 0x0034, "xs", "?" }, { 0x0009, 0x0035, "SL", "Gantry Locus Type" }, { 0x0009, 0x0037, "SL", "Starting Heart Rate" }, { 0x0009, 0x0038, "xs", "?" }, { 0x0009, 0x0039, "SL", "RR Window Offset" }, { 0x0009, 0x003a, "SL", "Percent Cycle Imaged" }, { 0x0009, 0x003e, "US", "?" }, { 0x0009, 0x003f, "US", "?" }, { 0x0009, 0x0040, "xs", "?" }, { 0x0009, 0x0041, "xs", "?" }, { 0x0009, 0x0042, "xs", "?" }, { 0x0009, 0x0043, "xs", "?" }, { 0x0009, 0x0050, "LT", "?" }, { 0x0009, 0x0051, "xs", "?" }, { 0x0009, 0x0060, "LT", "?" }, { 0x0009, 0x0061, "LT", "Series Unique Identifier" }, { 0x0009, 0x0070, "LT", "?" }, { 0x0009, 0x0080, "LT", "?" }, { 0x0009, 0x0091, "LT", "?" }, { 0x0009, 0x00e2, "LT", "?" }, { 0x0009, 0x00e3, "UI", "Equipment UID" }, { 0x0009, 0x00e6, "SH", "Genesis Version Now" }, { 0x0009, 0x00e7, "UL", "Exam Record Checksum" }, { 0x0009, 0x00e8, "UL", "?" }, { 0x0009, 0x00e9, "SL", "Actual Series Data Time Stamp" }, { 0x0009, 0x00f2, "UN", "?" }, { 0x0009, 0x00f3, "UN", "?" }, { 0x0009, 0x00f4, "LT", "?" }, { 0x0009, 0x00f5, "xs", "?" }, { 0x0009, 0x00f6, "LT", "PDM Data Object Type Extension" }, { 0x0009, 0x00f8, "US", "?" }, { 0x0009, 0x00fb, "IS", "?" }, { 0x0009, 0x1002, "OB", "?" }, { 0x0009, 0x1003, "OB", "?" }, { 0x0009, 0x1010, "UN", "?" }, { 0x0010, 0x0000, "UL", "Patient Group Length" }, { 0x0010, 0x0010, "PN", "Patient's Name" }, { 0x0010, 0x0020, "LO", "Patient's ID" }, { 0x0010, 0x0021, "LO", "Issuer of Patient's ID" }, { 0x0010, 0x0030, "DA", "Patient's Birth Date" }, { 0x0010, 0x0032, "TM", "Patient's Birth Time" }, { 0x0010, 0x0040, "CS", "Patient's Sex" }, { 0x0010, 0x0050, "SQ", "Patient's Insurance Plan Code Sequence" }, { 0x0010, 0x1000, "LO", "Other Patient's ID's" }, { 0x0010, 0x1001, "PN", "Other Patient's Names" }, { 0x0010, 0x1005, "PN", "Patient's Birth Name" }, { 0x0010, 0x1010, "AS", "Patient's Age" }, { 0x0010, 0x1020, "DS", "Patient's Size" }, { 0x0010, 0x1030, "DS", "Patient's Weight" }, { 0x0010, 0x1040, "LO", "Patient's Address" }, { 0x0010, 0x1050, "LT", "Insurance Plan Identification" }, { 0x0010, 0x1060, "PN", "Patient's Mother's Birth Name" }, { 0x0010, 0x1080, "LO", "Military Rank" }, { 0x0010, 0x1081, "LO", "Branch of Service" }, { 0x0010, 0x1090, "LO", "Medical Record Locator" }, { 0x0010, 0x2000, "LO", "Medical Alerts" }, { 0x0010, 0x2110, "LO", "Contrast Allergies" }, { 0x0010, 0x2150, "LO", "Country of Residence" }, { 0x0010, 0x2152, "LO", "Region of Residence" }, { 0x0010, 0x2154, "SH", "Patients Telephone Numbers" }, { 0x0010, 0x2160, "SH", "Ethnic Group" }, { 0x0010, 0x2180, "SH", "Occupation" }, { 0x0010, 0x21a0, "CS", "Smoking Status" }, { 0x0010, 0x21b0, "LT", "Additional Patient History" }, { 0x0010, 0x21c0, "US", "Pregnancy Status" }, { 0x0010, 0x21d0, "DA", "Last Menstrual Date" }, { 0x0010, 0x21f0, "LO", "Patients Religious Preference" }, { 0x0010, 0x4000, "LT", "Patient Comments" }, { 0x0011, 0x0001, "xs", "?" }, { 0x0011, 0x0002, "US", "?" }, { 0x0011, 0x0003, "LT", "Patient UID" }, { 0x0011, 0x0004, "LT", "Patient ID" }, { 0x0011, 0x000a, "xs", "?" }, { 0x0011, 0x000b, "SL", "Effective Series Duration" }, { 0x0011, 0x000c, "SL", "Num Beats" }, { 0x0011, 0x000d, "LO", "Radio Nuclide Name" }, { 0x0011, 0x0010, "xs", "?" }, { 0x0011, 0x0011, "xs", "?" }, { 0x0011, 0x0012, "LO", "Dataset Name" }, { 0x0011, 0x0013, "LO", "Dataset Type" }, { 0x0011, 0x0015, "xs", "?" }, { 0x0011, 0x0016, "SL", "Energy Number" }, { 0x0011, 0x0017, "SL", "RR Interval Window Number" }, { 0x0011, 0x0018, "SL", "MG Bin Number" }, { 0x0011, 0x0019, "FD", "Radius Of Rotation" }, { 0x0011, 0x001a, "SL", "Detector Count Zone" }, { 0x0011, 0x001b, "SL", "Num Energy Windows" }, { 0x0011, 0x001c, "SL", "Energy Offset" }, { 0x0011, 0x001d, "SL", "Energy Range" }, { 0x0011, 0x001f, "SL", "Image Orientation" }, { 0x0011, 0x0020, "xs", "?" }, { 0x0011, 0x0021, "xs", "?" }, { 0x0011, 0x0022, "xs", "?" }, { 0x0011, 0x0023, "xs", "?" }, { 0x0011, 0x0024, "SL", "FOV Mask Y Cutoff Angle" }, { 0x0011, 0x0025, "xs", "?" }, { 0x0011, 0x0026, "SL", "Table Orientation" }, { 0x0011, 0x0027, "SL", "ROI Top Left" }, { 0x0011, 0x0028, "SL", "ROI Bottom Right" }, { 0x0011, 0x0030, "xs", "?" }, { 0x0011, 0x0031, "xs", "?" }, { 0x0011, 0x0032, "UN", "?" }, { 0x0011, 0x0033, "LO", "Energy Correct Name" }, { 0x0011, 0x0034, "LO", "Spatial Correct Name" }, { 0x0011, 0x0035, "xs", "?" }, { 0x0011, 0x0036, "LO", "Uniformity Correct Name" }, { 0x0011, 0x0037, "LO", "Acquisition Specific Correct Name" }, { 0x0011, 0x0038, "SL", "Byte Order" }, { 0x0011, 0x003a, "SL", "Picture Format" }, { 0x0011, 0x003b, "FD", "Pixel Scale" }, { 0x0011, 0x003c, "FD", "Pixel Offset" }, { 0x0011, 0x003e, "SL", "FOV Shape" }, { 0x0011, 0x003f, "SL", "Dataset Flags" }, { 0x0011, 0x0040, "xs", "?" }, { 0x0011, 0x0041, "LT", "Medical Alerts" }, { 0x0011, 0x0042, "LT", "Contrast Allergies" }, { 0x0011, 0x0044, "FD", "Threshold Center" }, { 0x0011, 0x0045, "FD", "Threshold Width" }, { 0x0011, 0x0046, "SL", "Interpolation Type" }, { 0x0011, 0x0055, "FD", "Period" }, { 0x0011, 0x0056, "FD", "ElapsedTime" }, { 0x0011, 0x00a1, "DA", "Patient Registration Date" }, { 0x0011, 0x00a2, "TM", "Patient Registration Time" }, { 0x0011, 0x00b0, "LT", "Patient Last Name" }, { 0x0011, 0x00b2, "LT", "Patient First Name" }, { 0x0011, 0x00b4, "LT", "Patient Hospital Status" }, { 0x0011, 0x00bc, "TM", "Current Location Time" }, { 0x0011, 0x00c0, "LT", "Patient Insurance Status" }, { 0x0011, 0x00d0, "LT", "Patient Billing Type" }, { 0x0011, 0x00d2, "LT", "Patient Billing Address" }, { 0x0013, 0x0000, "LT", "Modifying Physician" }, { 0x0013, 0x0010, "xs", "?" }, { 0x0013, 0x0011, "SL", "?" }, { 0x0013, 0x0012, "xs", "?" }, { 0x0013, 0x0016, "SL", "AutoTrack Peak" }, { 0x0013, 0x0017, "SL", "AutoTrack Width" }, { 0x0013, 0x0018, "FD", "Transmission Scan Time" }, { 0x0013, 0x0019, "FD", "Transmission Mask Width" }, { 0x0013, 0x001a, "FD", "Copper Attenuator Thickness" }, { 0x0013, 0x001c, "FD", "?" }, { 0x0013, 0x001d, "FD", "?" }, { 0x0013, 0x001e, "FD", "Tomo View Offset" }, { 0x0013, 0x0020, "LT", "Patient Name" }, { 0x0013, 0x0022, "LT", "Patient Id" }, { 0x0013, 0x0026, "LT", "Study Comments" }, { 0x0013, 0x0030, "DA", "Patient Birthdate" }, { 0x0013, 0x0031, "DS", "Patient Weight" }, { 0x0013, 0x0032, "LT", "Patients Maiden Name" }, { 0x0013, 0x0033, "LT", "Referring Physician" }, { 0x0013, 0x0034, "LT", "Admitting Diagnosis" }, { 0x0013, 0x0035, "LT", "Patient Sex" }, { 0x0013, 0x0040, "LT", "Procedure Description" }, { 0x0013, 0x0042, "LT", "Patient Rest Direction" }, { 0x0013, 0x0044, "LT", "Patient Position" }, { 0x0013, 0x0046, "LT", "View Direction" }, { 0x0015, 0x0001, "DS", "Stenosis Calibration Ratio" }, { 0x0015, 0x0002, "DS", "Stenosis Magnification" }, { 0x0015, 0x0003, "DS", "Cardiac Calibration Ratio" }, { 0x0018, 0x0000, "UL", "Acquisition Group Length" }, { 0x0018, 0x0010, "LO", "Contrast/Bolus Agent" }, { 0x0018, 0x0012, "SQ", "Contrast/Bolus Agent Sequence" }, { 0x0018, 0x0014, "SQ", "Contrast/Bolus Administration Route Sequence" }, { 0x0018, 0x0015, "CS", "Body Part Examined" }, { 0x0018, 0x0020, "CS", "Scanning Sequence" }, { 0x0018, 0x0021, "CS", "Sequence Variant" }, { 0x0018, 0x0022, "CS", "Scan Options" }, { 0x0018, 0x0023, "CS", "MR Acquisition Type" }, { 0x0018, 0x0024, "SH", "Sequence Name" }, { 0x0018, 0x0025, "CS", "Angio Flag" }, { 0x0018, 0x0026, "SQ", "Intervention Drug Information Sequence" }, { 0x0018, 0x0027, "TM", "Intervention Drug Stop Time" }, { 0x0018, 0x0028, "DS", "Intervention Drug Dose" }, { 0x0018, 0x0029, "SQ", "Intervention Drug Code Sequence" }, { 0x0018, 0x002a, "SQ", "Additional Drug Sequence" }, { 0x0018, 0x0030, "LO", "Radionuclide" }, { 0x0018, 0x0031, "LO", "Radiopharmaceutical" }, { 0x0018, 0x0032, "DS", "Energy Window Centerline" }, { 0x0018, 0x0033, "DS", "Energy Window Total Width" }, { 0x0018, 0x0034, "LO", "Intervention Drug Name" }, { 0x0018, 0x0035, "TM", "Intervention Drug Start Time" }, { 0x0018, 0x0036, "SQ", "Intervention Therapy Sequence" }, { 0x0018, 0x0037, "CS", "Therapy Type" }, { 0x0018, 0x0038, "CS", "Intervention Status" }, { 0x0018, 0x0039, "CS", "Therapy Description" }, { 0x0018, 0x0040, "IS", "Cine Rate" }, { 0x0018, 0x0050, "DS", "Slice Thickness" }, { 0x0018, 0x0060, "DS", "KVP" }, { 0x0018, 0x0070, "IS", "Counts Accumulated" }, { 0x0018, 0x0071, "CS", "Acquisition Termination Condition" }, { 0x0018, 0x0072, "DS", "Effective Series Duration" }, { 0x0018, 0x0073, "CS", "Acquisition Start Condition" }, { 0x0018, 0x0074, "IS", "Acquisition Start Condition Data" }, { 0x0018, 0x0075, "IS", "Acquisition Termination Condition Data" }, { 0x0018, 0x0080, "DS", "Repetition Time" }, { 0x0018, 0x0081, "DS", "Echo Time" }, { 0x0018, 0x0082, "DS", "Inversion Time" }, { 0x0018, 0x0083, "DS", "Number of Averages" }, { 0x0018, 0x0084, "DS", "Imaging Frequency" }, { 0x0018, 0x0085, "SH", "Imaged Nucleus" }, { 0x0018, 0x0086, "IS", "Echo Number(s)" }, { 0x0018, 0x0087, "DS", "Magnetic Field Strength" }, { 0x0018, 0x0088, "DS", "Spacing Between Slices" }, { 0x0018, 0x0089, "IS", "Number of Phase Encoding Steps" }, { 0x0018, 0x0090, "DS", "Data Collection Diameter" }, { 0x0018, 0x0091, "IS", "Echo Train Length" }, { 0x0018, 0x0093, "DS", "Percent Sampling" }, { 0x0018, 0x0094, "DS", "Percent Phase Field of View" }, { 0x0018, 0x0095, "DS", "Pixel Bandwidth" }, { 0x0018, 0x1000, "LO", "Device Serial Number" }, { 0x0018, 0x1004, "LO", "Plate ID" }, { 0x0018, 0x1010, "LO", "Secondary Capture Device ID" }, { 0x0018, 0x1012, "DA", "Date of Secondary Capture" }, { 0x0018, 0x1014, "TM", "Time of Secondary Capture" }, { 0x0018, 0x1016, "LO", "Secondary Capture Device Manufacturer" }, { 0x0018, 0x1018, "LO", "Secondary Capture Device Manufacturer Model Name" }, { 0x0018, 0x1019, "LO", "Secondary Capture Device Software Version(s)" }, { 0x0018, 0x1020, "LO", "Software Version(s)" }, { 0x0018, 0x1022, "SH", "Video Image Format Acquired" }, { 0x0018, 0x1023, "LO", "Digital Image Format Acquired" }, { 0x0018, 0x1030, "LO", "Protocol Name" }, { 0x0018, 0x1040, "LO", "Contrast/Bolus Route" }, { 0x0018, 0x1041, "DS", "Contrast/Bolus Volume" }, { 0x0018, 0x1042, "TM", "Contrast/Bolus Start Time" }, { 0x0018, 0x1043, "TM", "Contrast/Bolus Stop Time" }, { 0x0018, 0x1044, "DS", "Contrast/Bolus Total Dose" }, { 0x0018, 0x1045, "IS", "Syringe Counts" }, { 0x0018, 0x1046, "DS", "Contrast Flow Rate" }, { 0x0018, 0x1047, "DS", "Contrast Flow Duration" }, { 0x0018, 0x1048, "CS", "Contrast/Bolus Ingredient" }, { 0x0018, 0x1049, "DS", "Contrast/Bolus Ingredient Concentration" }, { 0x0018, 0x1050, "DS", "Spatial Resolution" }, { 0x0018, 0x1060, "DS", "Trigger Time" }, { 0x0018, 0x1061, "LO", "Trigger Source or Type" }, { 0x0018, 0x1062, "IS", "Nominal Interval" }, { 0x0018, 0x1063, "DS", "Frame Time" }, { 0x0018, 0x1064, "LO", "Framing Type" }, { 0x0018, 0x1065, "DS", "Frame Time Vector" }, { 0x0018, 0x1066, "DS", "Frame Delay" }, { 0x0018, 0x1067, "DS", "Image Trigger Delay" }, { 0x0018, 0x1068, "DS", "Group Time Offset" }, { 0x0018, 0x1069, "DS", "Trigger Time Offset" }, { 0x0018, 0x106a, "CS", "Synchronization Trigger" }, { 0x0018, 0x106b, "UI", "Synchronization Frame of Reference" }, { 0x0018, 0x106e, "UL", "Trigger Sample Position" }, { 0x0018, 0x1070, "LO", "Radiopharmaceutical Route" }, { 0x0018, 0x1071, "DS", "Radiopharmaceutical Volume" }, { 0x0018, 0x1072, "TM", "Radiopharmaceutical Start Time" }, { 0x0018, 0x1073, "TM", "Radiopharmaceutical Stop Time" }, { 0x0018, 0x1074, "DS", "Radionuclide Total Dose" }, { 0x0018, 0x1075, "DS", "Radionuclide Half Life" }, { 0x0018, 0x1076, "DS", "Radionuclide Positron Fraction" }, { 0x0018, 0x1077, "DS", "Radiopharmaceutical Specific Activity" }, { 0x0018, 0x1080, "CS", "Beat Rejection Flag" }, { 0x0018, 0x1081, "IS", "Low R-R Value" }, { 0x0018, 0x1082, "IS", "High R-R Value" }, { 0x0018, 0x1083, "IS", "Intervals Acquired" }, { 0x0018, 0x1084, "IS", "Intervals Rejected" }, { 0x0018, 0x1085, "LO", "PVC Rejection" }, { 0x0018, 0x1086, "IS", "Skip Beats" }, { 0x0018, 0x1088, "IS", "Heart Rate" }, { 0x0018, 0x1090, "IS", "Cardiac Number of Images" }, { 0x0018, 0x1094, "IS", "Trigger Window" }, { 0x0018, 0x1100, "DS", "Reconstruction Diameter" }, { 0x0018, 0x1110, "DS", "Distance Source to Detector" }, { 0x0018, 0x1111, "DS", "Distance Source to Patient" }, { 0x0018, 0x1114, "DS", "Estimated Radiographic Magnification Factor" }, { 0x0018, 0x1120, "DS", "Gantry/Detector Tilt" }, { 0x0018, 0x1121, "DS", "Gantry/Detector Slew" }, { 0x0018, 0x1130, "DS", "Table Height" }, { 0x0018, 0x1131, "DS", "Table Traverse" }, { 0x0018, 0x1134, "CS", "Table Motion" }, { 0x0018, 0x1135, "DS", "Table Vertical Increment" }, { 0x0018, 0x1136, "DS", "Table Lateral Increment" }, { 0x0018, 0x1137, "DS", "Table Longitudinal Increment" }, { 0x0018, 0x1138, "DS", "Table Angle" }, { 0x0018, 0x113a, "CS", "Table Type" }, { 0x0018, 0x1140, "CS", "Rotation Direction" }, { 0x0018, 0x1141, "DS", "Angular Position" }, { 0x0018, 0x1142, "DS", "Radial Position" }, { 0x0018, 0x1143, "DS", "Scan Arc" }, { 0x0018, 0x1144, "DS", "Angular Step" }, { 0x0018, 0x1145, "DS", "Center of Rotation Offset" }, { 0x0018, 0x1146, "DS", "Rotation Offset" }, { 0x0018, 0x1147, "CS", "Field of View Shape" }, { 0x0018, 0x1149, "IS", "Field of View Dimension(s)" }, { 0x0018, 0x1150, "IS", "Exposure Time" }, { 0x0018, 0x1151, "IS", "X-ray Tube Current" }, { 0x0018, 0x1152, "IS", "Exposure" }, { 0x0018, 0x1153, "IS", "Exposure in uAs" }, { 0x0018, 0x1154, "DS", "AveragePulseWidth" }, { 0x0018, 0x1155, "CS", "RadiationSetting" }, { 0x0018, 0x1156, "CS", "Rectification Type" }, { 0x0018, 0x115a, "CS", "RadiationMode" }, { 0x0018, 0x115e, "DS", "ImageAreaDoseProduct" }, { 0x0018, 0x1160, "SH", "Filter Type" }, { 0x0018, 0x1161, "LO", "TypeOfFilters" }, { 0x0018, 0x1162, "DS", "IntensifierSize" }, { 0x0018, 0x1164, "DS", "ImagerPixelSpacing" }, { 0x0018, 0x1166, "CS", "Grid" }, { 0x0018, 0x1170, "IS", "Generator Power" }, { 0x0018, 0x1180, "SH", "Collimator/Grid Name" }, { 0x0018, 0x1181, "CS", "Collimator Type" }, { 0x0018, 0x1182, "IS", "Focal Distance" }, { 0x0018, 0x1183, "DS", "X Focus Center" }, { 0x0018, 0x1184, "DS", "Y Focus Center" }, { 0x0018, 0x1190, "DS", "Focal Spot(s)" }, { 0x0018, 0x1191, "CS", "Anode Target Material" }, { 0x0018, 0x11a0, "DS", "Body Part Thickness" }, { 0x0018, 0x11a2, "DS", "Compression Force" }, { 0x0018, 0x1200, "DA", "Date of Last Calibration" }, { 0x0018, 0x1201, "TM", "Time of Last Calibration" }, { 0x0018, 0x1210, "SH", "Convolution Kernel" }, { 0x0018, 0x1240, "IS", "Upper/Lower Pixel Values" }, { 0x0018, 0x1242, "IS", "Actual Frame Duration" }, { 0x0018, 0x1243, "IS", "Count Rate" }, { 0x0018, 0x1244, "US", "Preferred Playback Sequencing" }, { 0x0018, 0x1250, "SH", "Receiving Coil" }, { 0x0018, 0x1251, "SH", "Transmitting Coil" }, { 0x0018, 0x1260, "SH", "Plate Type" }, { 0x0018, 0x1261, "LO", "Phosphor Type" }, { 0x0018, 0x1300, "DS", "Scan Velocity" }, { 0x0018, 0x1301, "CS", "Whole Body Technique" }, { 0x0018, 0x1302, "IS", "Scan Length" }, { 0x0018, 0x1310, "US", "Acquisition Matrix" }, { 0x0018, 0x1312, "CS", "Phase Encoding Direction" }, { 0x0018, 0x1314, "DS", "Flip Angle" }, { 0x0018, 0x1315, "CS", "Variable Flip Angle Flag" }, { 0x0018, 0x1316, "DS", "SAR" }, { 0x0018, 0x1318, "DS", "dB/dt" }, { 0x0018, 0x1400, "LO", "Acquisition Device Processing Description" }, { 0x0018, 0x1401, "LO", "Acquisition Device Processing Code" }, { 0x0018, 0x1402, "CS", "Cassette Orientation" }, { 0x0018, 0x1403, "CS", "Cassette Size" }, { 0x0018, 0x1404, "US", "Exposures on Plate" }, { 0x0018, 0x1405, "IS", "Relative X-ray Exposure" }, { 0x0018, 0x1450, "DS", "Column Angulation" }, { 0x0018, 0x1460, "DS", "Tomo Layer Height" }, { 0x0018, 0x1470, "DS", "Tomo Angle" }, { 0x0018, 0x1480, "DS", "Tomo Time" }, { 0x0018, 0x1490, "CS", "Tomo Type" }, { 0x0018, 0x1491, "CS", "Tomo Class" }, { 0x0018, 0x1495, "IS", "Number of Tomosynthesis Source Images" }, { 0x0018, 0x1500, "CS", "PositionerMotion" }, { 0x0018, 0x1508, "CS", "Positioner Type" }, { 0x0018, 0x1510, "DS", "PositionerPrimaryAngle" }, { 0x0018, 0x1511, "DS", "PositionerSecondaryAngle" }, { 0x0018, 0x1520, "DS", "PositionerPrimaryAngleIncrement" }, { 0x0018, 0x1521, "DS", "PositionerSecondaryAngleIncrement" }, { 0x0018, 0x1530, "DS", "DetectorPrimaryAngle" }, { 0x0018, 0x1531, "DS", "DetectorSecondaryAngle" }, { 0x0018, 0x1600, "CS", "Shutter Shape" }, { 0x0018, 0x1602, "IS", "Shutter Left Vertical Edge" }, { 0x0018, 0x1604, "IS", "Shutter Right Vertical Edge" }, { 0x0018, 0x1606, "IS", "Shutter Upper Horizontal Edge" }, { 0x0018, 0x1608, "IS", "Shutter Lower Horizonta lEdge" }, { 0x0018, 0x1610, "IS", "Center of Circular Shutter" }, { 0x0018, 0x1612, "IS", "Radius of Circular Shutter" }, { 0x0018, 0x1620, "IS", "Vertices of Polygonal Shutter" }, { 0x0018, 0x1622, "US", "Shutter Presentation Value" }, { 0x0018, 0x1623, "US", "Shutter Overlay Group" }, { 0x0018, 0x1700, "CS", "Collimator Shape" }, { 0x0018, 0x1702, "IS", "Collimator Left Vertical Edge" }, { 0x0018, 0x1704, "IS", "Collimator Right Vertical Edge" }, { 0x0018, 0x1706, "IS", "Collimator Upper Horizontal Edge" }, { 0x0018, 0x1708, "IS", "Collimator Lower Horizontal Edge" }, { 0x0018, 0x1710, "IS", "Center of Circular Collimator" }, { 0x0018, 0x1712, "IS", "Radius of Circular Collimator" }, { 0x0018, 0x1720, "IS", "Vertices of Polygonal Collimator" }, { 0x0018, 0x1800, "CS", "Acquisition Time Synchronized" }, { 0x0018, 0x1801, "SH", "Time Source" }, { 0x0018, 0x1802, "CS", "Time Distribution Protocol" }, { 0x0018, 0x4000, "LT", "Acquisition Comments" }, { 0x0018, 0x5000, "SH", "Output Power" }, { 0x0018, 0x5010, "LO", "Transducer Data" }, { 0x0018, 0x5012, "DS", "Focus Depth" }, { 0x0018, 0x5020, "LO", "Processing Function" }, { 0x0018, 0x5021, "LO", "Postprocessing Function" }, { 0x0018, 0x5022, "DS", "Mechanical Index" }, { 0x0018, 0x5024, "DS", "Thermal Index" }, { 0x0018, 0x5026, "DS", "Cranial Thermal Index" }, { 0x0018, 0x5027, "DS", "Soft Tissue Thermal Index" }, { 0x0018, 0x5028, "DS", "Soft Tissue-Focus Thermal Index" }, { 0x0018, 0x5029, "DS", "Soft Tissue-Surface Thermal Index" }, { 0x0018, 0x5030, "DS", "Dynamic Range" }, { 0x0018, 0x5040, "DS", "Total Gain" }, { 0x0018, 0x5050, "IS", "Depth of Scan Field" }, { 0x0018, 0x5100, "CS", "Patient Position" }, { 0x0018, 0x5101, "CS", "View Position" }, { 0x0018, 0x5104, "SQ", "Projection Eponymous Name Code Sequence" }, { 0x0018, 0x5210, "DS", "Image Transformation Matrix" }, { 0x0018, 0x5212, "DS", "Image Translation Vector" }, { 0x0018, 0x6000, "DS", "Sensitivity" }, { 0x0018, 0x6011, "IS", "Sequence of Ultrasound Regions" }, { 0x0018, 0x6012, "US", "Region Spatial Format" }, { 0x0018, 0x6014, "US", "Region Data Type" }, { 0x0018, 0x6016, "UL", "Region Flags" }, { 0x0018, 0x6018, "UL", "Region Location Min X0" }, { 0x0018, 0x601a, "UL", "Region Location Min Y0" }, { 0x0018, 0x601c, "UL", "Region Location Max X1" }, { 0x0018, 0x601e, "UL", "Region Location Max Y1" }, { 0x0018, 0x6020, "SL", "Reference Pixel X0" }, { 0x0018, 0x6022, "SL", "Reference Pixel Y0" }, { 0x0018, 0x6024, "US", "Physical Units X Direction" }, { 0x0018, 0x6026, "US", "Physical Units Y Direction" }, { 0x0018, 0x6028, "FD", "Reference Pixel Physical Value X" }, { 0x0018, 0x602a, "US", "Reference Pixel Physical Value Y" }, { 0x0018, 0x602c, "US", "Physical Delta X" }, { 0x0018, 0x602e, "US", "Physical Delta Y" }, { 0x0018, 0x6030, "UL", "Transducer Frequency" }, { 0x0018, 0x6031, "CS", "Transducer Type" }, { 0x0018, 0x6032, "UL", "Pulse Repetition Frequency" }, { 0x0018, 0x6034, "FD", "Doppler Correction Angle" }, { 0x0018, 0x6036, "FD", "Steering Angle" }, { 0x0018, 0x6038, "UL", "Doppler Sample Volume X Position" }, { 0x0018, 0x603a, "UL", "Doppler Sample Volume Y Position" }, { 0x0018, 0x603c, "UL", "TM-Line Position X0" }, { 0x0018, 0x603e, "UL", "TM-Line Position Y0" }, { 0x0018, 0x6040, "UL", "TM-Line Position X1" }, { 0x0018, 0x6042, "UL", "TM-Line Position Y1" }, { 0x0018, 0x6044, "US", "Pixel Component Organization" }, { 0x0018, 0x6046, "UL", "Pixel Component Mask" }, { 0x0018, 0x6048, "UL", "Pixel Component Range Start" }, { 0x0018, 0x604a, "UL", "Pixel Component Range Stop" }, { 0x0018, 0x604c, "US", "Pixel Component Physical Units" }, { 0x0018, 0x604e, "US", "Pixel Component Data Type" }, { 0x0018, 0x6050, "UL", "Number of Table Break Points" }, { 0x0018, 0x6052, "UL", "Table of X Break Points" }, { 0x0018, 0x6054, "FD", "Table of Y Break Points" }, { 0x0018, 0x6056, "UL", "Number of Table Entries" }, { 0x0018, 0x6058, "UL", "Table of Pixel Values" }, { 0x0018, 0x605a, "FL", "Table of Parameter Values" }, { 0x0018, 0x7000, "CS", "Detector Conditions Nominal Flag" }, { 0x0018, 0x7001, "DS", "Detector Temperature" }, { 0x0018, 0x7004, "CS", "Detector Type" }, { 0x0018, 0x7005, "CS", "Detector Configuration" }, { 0x0018, 0x7006, "LT", "Detector Description" }, { 0x0018, 0x7008, "LT", "Detector Mode" }, { 0x0018, 0x700a, "SH", "Detector ID" }, { 0x0018, 0x700c, "DA", "Date of Last Detector Calibration " }, { 0x0018, 0x700e, "TM", "Time of Last Detector Calibration" }, { 0x0018, 0x7010, "IS", "Exposures on Detector Since Last Calibration" }, { 0x0018, 0x7011, "IS", "Exposures on Detector Since Manufactured" }, { 0x0018, 0x7012, "DS", "Detector Time Since Last Exposure" }, { 0x0018, 0x7014, "DS", "Detector Active Time" }, { 0x0018, 0x7016, "DS", "Detector Activation Offset From Exposure" }, { 0x0018, 0x701a, "DS", "Detector Binning" }, { 0x0018, 0x7020, "DS", "Detector Element Physical Size" }, { 0x0018, 0x7022, "DS", "Detector Element Spacing" }, { 0x0018, 0x7024, "CS", "Detector Active Shape" }, { 0x0018, 0x7026, "DS", "Detector Active Dimensions" }, { 0x0018, 0x7028, "DS", "Detector Active Origin" }, { 0x0018, 0x7030, "DS", "Field of View Origin" }, { 0x0018, 0x7032, "DS", "Field of View Rotation" }, { 0x0018, 0x7034, "CS", "Field of View Horizontal Flip" }, { 0x0018, 0x7040, "LT", "Grid Absorbing Material" }, { 0x0018, 0x7041, "LT", "Grid Spacing Material" }, { 0x0018, 0x7042, "DS", "Grid Thickness" }, { 0x0018, 0x7044, "DS", "Grid Pitch" }, { 0x0018, 0x7046, "IS", "Grid Aspect Ratio" }, { 0x0018, 0x7048, "DS", "Grid Period" }, { 0x0018, 0x704c, "DS", "Grid Focal Distance" }, { 0x0018, 0x7050, "LT", "Filter Material" }, { 0x0018, 0x7052, "DS", "Filter Thickness Minimum" }, { 0x0018, 0x7054, "DS", "Filter Thickness Maximum" }, { 0x0018, 0x7060, "CS", "Exposure Control Mode" }, { 0x0018, 0x7062, "LT", "Exposure Control Mode Description" }, { 0x0018, 0x7064, "CS", "Exposure Status" }, { 0x0018, 0x7065, "DS", "Phototimer Setting" }, { 0x0019, 0x0000, "xs", "?" }, { 0x0019, 0x0001, "xs", "?" }, { 0x0019, 0x0002, "xs", "?" }, { 0x0019, 0x0003, "xs", "?" }, { 0x0019, 0x0004, "xs", "?" }, { 0x0019, 0x0005, "xs", "?" }, { 0x0019, 0x0006, "xs", "?" }, { 0x0019, 0x0007, "xs", "?" }, { 0x0019, 0x0008, "xs", "?" }, { 0x0019, 0x0009, "xs", "?" }, { 0x0019, 0x000a, "xs", "?" }, { 0x0019, 0x000b, "DS", "?" }, { 0x0019, 0x000c, "US", "?" }, { 0x0019, 0x000d, "TM", "Time" }, { 0x0019, 0x000e, "xs", "?" }, { 0x0019, 0x000f, "DS", "Horizontal Frame Of Reference" }, { 0x0019, 0x0010, "xs", "?" }, { 0x0019, 0x0011, "xs", "?" }, { 0x0019, 0x0012, "xs", "?" }, { 0x0019, 0x0013, "xs", "?" }, { 0x0019, 0x0014, "xs", "?" }, { 0x0019, 0x0015, "xs", "?" }, { 0x0019, 0x0016, "xs", "?" }, { 0x0019, 0x0017, "xs", "?" }, { 0x0019, 0x0018, "xs", "?" }, { 0x0019, 0x0019, "xs", "?" }, { 0x0019, 0x001a, "xs", "?" }, { 0x0019, 0x001b, "xs", "?" }, { 0x0019, 0x001c, "CS", "Dose" }, { 0x0019, 0x001d, "IS", "Side Mark" }, { 0x0019, 0x001e, "xs", "?" }, { 0x0019, 0x001f, "DS", "Exposure Duration" }, { 0x0019, 0x0020, "xs", "?" }, { 0x0019, 0x0021, "xs", "?" }, { 0x0019, 0x0022, "xs", "?" }, { 0x0019, 0x0023, "xs", "?" }, { 0x0019, 0x0024, "xs", "?" }, { 0x0019, 0x0025, "xs", "?" }, { 0x0019, 0x0026, "xs", "?" }, { 0x0019, 0x0027, "xs", "?" }, { 0x0019, 0x0028, "xs", "?" }, { 0x0019, 0x0029, "IS", "?" }, { 0x0019, 0x002a, "xs", "?" }, { 0x0019, 0x002b, "DS", "Xray Off Position" }, { 0x0019, 0x002c, "xs", "?" }, { 0x0019, 0x002d, "US", "?" }, { 0x0019, 0x002e, "xs", "?" }, { 0x0019, 0x002f, "DS", "Trigger Frequency" }, { 0x0019, 0x0030, "xs", "?" }, { 0x0019, 0x0031, "xs", "?" }, { 0x0019, 0x0032, "xs", "?" }, { 0x0019, 0x0033, "UN", "ECG 2 Offset 2" }, { 0x0019, 0x0034, "US", "?" }, { 0x0019, 0x0036, "US", "?" }, { 0x0019, 0x0038, "US", "?" }, { 0x0019, 0x0039, "xs", "?" }, { 0x0019, 0x003a, "xs", "?" }, { 0x0019, 0x003b, "LT", "?" }, { 0x0019, 0x003c, "xs", "?" }, { 0x0019, 0x003e, "xs", "?" }, { 0x0019, 0x003f, "UN", "?" }, { 0x0019, 0x0040, "xs", "?" }, { 0x0019, 0x0041, "xs", "?" }, { 0x0019, 0x0042, "xs", "?" }, { 0x0019, 0x0043, "xs", "?" }, { 0x0019, 0x0044, "xs", "?" }, { 0x0019, 0x0045, "xs", "?" }, { 0x0019, 0x0046, "xs", "?" }, { 0x0019, 0x0047, "xs", "?" }, { 0x0019, 0x0048, "xs", "?" }, { 0x0019, 0x0049, "US", "?" }, { 0x0019, 0x004a, "xs", "?" }, { 0x0019, 0x004b, "SL", "Data Size For Scan Data" }, { 0x0019, 0x004c, "US", "?" }, { 0x0019, 0x004e, "US", "?" }, { 0x0019, 0x0050, "xs", "?" }, { 0x0019, 0x0051, "xs", "?" }, { 0x0019, 0x0052, "xs", "?" }, { 0x0019, 0x0053, "LT", "Barcode" }, { 0x0019, 0x0054, "xs", "?" }, { 0x0019, 0x0055, "DS", "Receiver Reference Gain" }, { 0x0019, 0x0056, "xs", "?" }, { 0x0019, 0x0057, "SS", "CT Water Number" }, { 0x0019, 0x0058, "xs", "?" }, { 0x0019, 0x005a, "xs", "?" }, { 0x0019, 0x005c, "xs", "?" }, { 0x0019, 0x005d, "US", "?" }, { 0x0019, 0x005e, "xs", "?" }, { 0x0019, 0x005f, "SL", "Increment Between Channels" }, { 0x0019, 0x0060, "xs", "?" }, { 0x0019, 0x0061, "xs", "?" }, { 0x0019, 0x0062, "xs", "?" }, { 0x0019, 0x0063, "xs", "?" }, { 0x0019, 0x0064, "xs", "?" }, { 0x0019, 0x0065, "xs", "?" }, { 0x0019, 0x0066, "xs", "?" }, { 0x0019, 0x0067, "xs", "?" }, { 0x0019, 0x0068, "xs", "?" }, { 0x0019, 0x0069, "UL", "Convolution Mode" }, { 0x0019, 0x006a, "xs", "?" }, { 0x0019, 0x006b, "SS", "Field Of View In Detector Cells" }, { 0x0019, 0x006c, "US", "?" }, { 0x0019, 0x006e, "US", "?" }, { 0x0019, 0x0070, "xs", "?" }, { 0x0019, 0x0071, "xs", "?" }, { 0x0019, 0x0072, "xs", "?" }, { 0x0019, 0x0073, "xs", "?" }, { 0x0019, 0x0074, "xs", "?" }, { 0x0019, 0x0075, "xs", "?" }, { 0x0019, 0x0076, "xs", "?" }, { 0x0019, 0x0077, "US", "?" }, { 0x0019, 0x0078, "US", "?" }, { 0x0019, 0x007a, "US", "?" }, { 0x0019, 0x007c, "US", "?" }, { 0x0019, 0x007d, "DS", "Second Echo" }, { 0x0019, 0x007e, "xs", "?" }, { 0x0019, 0x007f, "DS", "Table Delta" }, { 0x0019, 0x0080, "xs", "?" }, { 0x0019, 0x0081, "xs", "?" }, { 0x0019, 0x0082, "xs", "?" }, { 0x0019, 0x0083, "xs", "?" }, { 0x0019, 0x0084, "xs", "?" }, { 0x0019, 0x0085, "xs", "?" }, { 0x0019, 0x0086, "xs", "?" }, { 0x0019, 0x0087, "xs", "?" }, { 0x0019, 0x0088, "xs", "?" }, { 0x0019, 0x008a, "xs", "?" }, { 0x0019, 0x008b, "SS", "Actual Receive Gain Digital" }, { 0x0019, 0x008c, "US", "?" }, { 0x0019, 0x008d, "DS", "Delay After Trigger" }, { 0x0019, 0x008e, "US", "?" }, { 0x0019, 0x008f, "SS", "Swap Phase Frequency" }, { 0x0019, 0x0090, "xs", "?" }, { 0x0019, 0x0091, "xs", "?" }, { 0x0019, 0x0092, "xs", "?" }, { 0x0019, 0x0093, "xs", "?" }, { 0x0019, 0x0094, "xs", "?" }, { 0x0019, 0x0095, "SS", "Analog Receiver Gain" }, { 0x0019, 0x0096, "xs", "?" }, { 0x0019, 0x0097, "xs", "?" }, { 0x0019, 0x0098, "xs", "?" }, { 0x0019, 0x0099, "US", "?" }, { 0x0019, 0x009a, "US", "?" }, { 0x0019, 0x009b, "SS", "Pulse Sequence Mode" }, { 0x0019, 0x009c, "xs", "?" }, { 0x0019, 0x009d, "DT", "Pulse Sequence Date" }, { 0x0019, 0x009e, "xs", "?" }, { 0x0019, 0x009f, "xs", "?" }, { 0x0019, 0x00a0, "xs", "?" }, { 0x0019, 0x00a1, "xs", "?" }, { 0x0019, 0x00a2, "xs", "?" }, { 0x0019, 0x00a3, "xs", "?" }, { 0x0019, 0x00a4, "xs", "?" }, { 0x0019, 0x00a5, "xs", "?" }, { 0x0019, 0x00a6, "xs", "?" }, { 0x0019, 0x00a7, "xs", "?" }, { 0x0019, 0x00a8, "xs", "?" }, { 0x0019, 0x00a9, "xs", "?" }, { 0x0019, 0x00aa, "xs", "?" }, { 0x0019, 0x00ab, "xs", "?" }, { 0x0019, 0x00ac, "xs", "?" }, { 0x0019, 0x00ad, "xs", "?" }, { 0x0019, 0x00ae, "xs", "?" }, { 0x0019, 0x00af, "xs", "?" }, { 0x0019, 0x00b0, "xs", "?" }, { 0x0019, 0x00b1, "xs", "?" }, { 0x0019, 0x00b2, "xs", "?" }, { 0x0019, 0x00b3, "xs", "?" }, { 0x0019, 0x00b4, "xs", "?" }, { 0x0019, 0x00b5, "xs", "?" }, { 0x0019, 0x00b6, "DS", "User Data" }, { 0x0019, 0x00b7, "DS", "User Data" }, { 0x0019, 0x00b8, "DS", "User Data" }, { 0x0019, 0x00b9, "DS", "User Data" }, { 0x0019, 0x00ba, "DS", "User Data" }, { 0x0019, 0x00bb, "DS", "User Data" }, { 0x0019, 0x00bc, "DS", "User Data" }, { 0x0019, 0x00bd, "DS", "User Data" }, { 0x0019, 0x00be, "DS", "Projection Angle" }, { 0x0019, 0x00c0, "xs", "?" }, { 0x0019, 0x00c1, "xs", "?" }, { 0x0019, 0x00c2, "xs", "?" }, { 0x0019, 0x00c3, "xs", "?" }, { 0x0019, 0x00c4, "xs", "?" }, { 0x0019, 0x00c5, "xs", "?" }, { 0x0019, 0x00c6, "SS", "SAT Location H" }, { 0x0019, 0x00c7, "SS", "SAT Location F" }, { 0x0019, 0x00c8, "SS", "SAT Thickness R L" }, { 0x0019, 0x00c9, "SS", "SAT Thickness A P" }, { 0x0019, 0x00ca, "SS", "SAT Thickness H F" }, { 0x0019, 0x00cb, "xs", "?" }, { 0x0019, 0x00cc, "xs", "?" }, { 0x0019, 0x00cd, "SS", "Thickness Disclaimer" }, { 0x0019, 0x00ce, "SS", "Prescan Type" }, { 0x0019, 0x00cf, "SS", "Prescan Status" }, { 0x0019, 0x00d0, "SH", "Raw Data Type" }, { 0x0019, 0x00d1, "DS", "Flow Sensitivity" }, { 0x0019, 0x00d2, "xs", "?" }, { 0x0019, 0x00d3, "xs", "?" }, { 0x0019, 0x00d4, "xs", "?" }, { 0x0019, 0x00d5, "xs", "?" }, { 0x0019, 0x00d6, "xs", "?" }, { 0x0019, 0x00d7, "xs", "?" }, { 0x0019, 0x00d8, "xs", "?" }, { 0x0019, 0x00d9, "xs", "?" }, { 0x0019, 0x00da, "xs", "?" }, { 0x0019, 0x00db, "DS", "Back Projector Coefficient" }, { 0x0019, 0x00dc, "SS", "Primary Speed Correction Used" }, { 0x0019, 0x00dd, "SS", "Overrange Correction Used" }, { 0x0019, 0x00de, "DS", "Dynamic Z Alpha Value" }, { 0x0019, 0x00df, "DS", "User Data" }, { 0x0019, 0x00e0, "DS", "User Data" }, { 0x0019, 0x00e1, "xs", "?" }, { 0x0019, 0x00e2, "xs", "?" }, { 0x0019, 0x00e3, "xs", "?" }, { 0x0019, 0x00e4, "LT", "?" }, { 0x0019, 0x00e5, "IS", "?" }, { 0x0019, 0x00e6, "US", "?" }, { 0x0019, 0x00e8, "DS", "?" }, { 0x0019, 0x00e9, "DS", "?" }, { 0x0019, 0x00eb, "DS", "?" }, { 0x0019, 0x00ec, "US", "?" }, { 0x0019, 0x00f0, "xs", "?" }, { 0x0019, 0x00f1, "xs", "?" }, { 0x0019, 0x00f2, "xs", "?" }, { 0x0019, 0x00f3, "xs", "?" }, { 0x0019, 0x00f4, "LT", "?" }, { 0x0019, 0x00f9, "DS", "Transmission Gain" }, { 0x0019, 0x1015, "UN", "?" }, { 0x0020, 0x0000, "UL", "Relationship Group Length" }, { 0x0020, 0x000d, "UI", "Study Instance UID" }, { 0x0020, 0x000e, "UI", "Series Instance UID" }, { 0x0020, 0x0010, "SH", "Study ID" }, { 0x0020, 0x0011, "IS", "Series Number" }, { 0x0020, 0x0012, "IS", "Acquisition Number" }, { 0x0020, 0x0013, "IS", "Instance (formerly Image) Number" }, { 0x0020, 0x0014, "IS", "Isotope Number" }, { 0x0020, 0x0015, "IS", "Phase Number" }, { 0x0020, 0x0016, "IS", "Interval Number" }, { 0x0020, 0x0017, "IS", "Time Slot Number" }, { 0x0020, 0x0018, "IS", "Angle Number" }, { 0x0020, 0x0020, "CS", "Patient Orientation" }, { 0x0020, 0x0022, "IS", "Overlay Number" }, { 0x0020, 0x0024, "IS", "Curve Number" }, { 0x0020, 0x0026, "IS", "LUT Number" }, { 0x0020, 0x0030, "DS", "Image Position" }, { 0x0020, 0x0032, "DS", "Image Position (Patient)" }, { 0x0020, 0x0035, "DS", "Image Orientation" }, { 0x0020, 0x0037, "DS", "Image Orientation (Patient)" }, { 0x0020, 0x0050, "DS", "Location" }, { 0x0020, 0x0052, "UI", "Frame of Reference UID" }, { 0x0020, 0x0060, "CS", "Laterality" }, { 0x0020, 0x0062, "CS", "Image Laterality" }, { 0x0020, 0x0070, "LT", "Image Geometry Type" }, { 0x0020, 0x0080, "LO", "Masking Image" }, { 0x0020, 0x0100, "IS", "Temporal Position Identifier" }, { 0x0020, 0x0105, "IS", "Number of Temporal Positions" }, { 0x0020, 0x0110, "DS", "Temporal Resolution" }, { 0x0020, 0x1000, "IS", "Series in Study" }, { 0x0020, 0x1001, "DS", "Acquisitions in Series" }, { 0x0020, 0x1002, "IS", "Images in Acquisition" }, { 0x0020, 0x1003, "IS", "Images in Series" }, { 0x0020, 0x1004, "IS", "Acquisitions in Study" }, { 0x0020, 0x1005, "IS", "Images in Study" }, { 0x0020, 0x1020, "LO", "Reference" }, { 0x0020, 0x1040, "LO", "Position Reference Indicator" }, { 0x0020, 0x1041, "DS", "Slice Location" }, { 0x0020, 0x1070, "IS", "Other Study Numbers" }, { 0x0020, 0x1200, "IS", "Number of Patient Related Studies" }, { 0x0020, 0x1202, "IS", "Number of Patient Related Series" }, { 0x0020, 0x1204, "IS", "Number of Patient Related Images" }, { 0x0020, 0x1206, "IS", "Number of Study Related Series" }, { 0x0020, 0x1208, "IS", "Number of Study Related Series" }, { 0x0020, 0x3100, "LO", "Source Image IDs" }, { 0x0020, 0x3401, "LO", "Modifying Device ID" }, { 0x0020, 0x3402, "LO", "Modified Image ID" }, { 0x0020, 0x3403, "xs", "Modified Image Date" }, { 0x0020, 0x3404, "LO", "Modifying Device Manufacturer" }, { 0x0020, 0x3405, "xs", "Modified Image Time" }, { 0x0020, 0x3406, "xs", "Modified Image Description" }, { 0x0020, 0x4000, "LT", "Image Comments" }, { 0x0020, 0x5000, "AT", "Original Image Identification" }, { 0x0020, 0x5002, "LO", "Original Image Identification Nomenclature" }, { 0x0021, 0x0000, "xs", "?" }, { 0x0021, 0x0001, "xs", "?" }, { 0x0021, 0x0002, "xs", "?" }, { 0x0021, 0x0003, "xs", "?" }, { 0x0021, 0x0004, "DS", "VOI Position" }, { 0x0021, 0x0005, "xs", "?" }, { 0x0021, 0x0006, "IS", "CSI Matrix Size Original" }, { 0x0021, 0x0007, "xs", "?" }, { 0x0021, 0x0008, "DS", "Spatial Grid Shift" }, { 0x0021, 0x0009, "DS", "Signal Limits Minimum" }, { 0x0021, 0x0010, "xs", "?" }, { 0x0021, 0x0011, "xs", "?" }, { 0x0021, 0x0012, "xs", "?" }, { 0x0021, 0x0013, "xs", "?" }, { 0x0021, 0x0014, "xs", "?" }, { 0x0021, 0x0015, "xs", "?" }, { 0x0021, 0x0016, "xs", "?" }, { 0x0021, 0x0017, "DS", "EPI Operation Mode Flag" }, { 0x0021, 0x0018, "xs", "?" }, { 0x0021, 0x0019, "xs", "?" }, { 0x0021, 0x0020, "xs", "?" }, { 0x0021, 0x0021, "xs", "?" }, { 0x0021, 0x0022, "xs", "?" }, { 0x0021, 0x0024, "xs", "?" }, { 0x0021, 0x0025, "US", "?" }, { 0x0021, 0x0026, "IS", "Image Pixel Offset" }, { 0x0021, 0x0030, "xs", "?" }, { 0x0021, 0x0031, "xs", "?" }, { 0x0021, 0x0032, "xs", "?" }, { 0x0021, 0x0034, "xs", "?" }, { 0x0021, 0x0035, "SS", "Series From Which Prescribed" }, { 0x0021, 0x0036, "xs", "?" }, { 0x0021, 0x0037, "SS", "Screen Format" }, { 0x0021, 0x0039, "DS", "Slab Thickness" }, { 0x0021, 0x0040, "xs", "?" }, { 0x0021, 0x0041, "xs", "?" }, { 0x0021, 0x0042, "xs", "?" }, { 0x0021, 0x0043, "xs", "?" }, { 0x0021, 0x0044, "xs", "?" }, { 0x0021, 0x0045, "xs", "?" }, { 0x0021, 0x0046, "xs", "?" }, { 0x0021, 0x0047, "xs", "?" }, { 0x0021, 0x0048, "xs", "?" }, { 0x0021, 0x0049, "xs", "?" }, { 0x0021, 0x004a, "xs", "?" }, { 0x0021, 0x004e, "US", "?" }, { 0x0021, 0x004f, "xs", "?" }, { 0x0021, 0x0050, "xs", "?" }, { 0x0021, 0x0051, "xs", "?" }, { 0x0021, 0x0052, "xs", "?" }, { 0x0021, 0x0053, "xs", "?" }, { 0x0021, 0x0054, "xs", "?" }, { 0x0021, 0x0055, "xs", "?" }, { 0x0021, 0x0056, "xs", "?" }, { 0x0021, 0x0057, "xs", "?" }, { 0x0021, 0x0058, "xs", "?" }, { 0x0021, 0x0059, "xs", "?" }, { 0x0021, 0x005a, "SL", "Integer Slop" }, { 0x0021, 0x005b, "DS", "Float Slop" }, { 0x0021, 0x005c, "DS", "Float Slop" }, { 0x0021, 0x005d, "DS", "Float Slop" }, { 0x0021, 0x005e, "DS", "Float Slop" }, { 0x0021, 0x005f, "DS", "Float Slop" }, { 0x0021, 0x0060, "xs", "?" }, { 0x0021, 0x0061, "DS", "Image Normal" }, { 0x0021, 0x0062, "IS", "Reference Type Code" }, { 0x0021, 0x0063, "DS", "Image Distance" }, { 0x0021, 0x0065, "US", "Image Positioning History Mask" }, { 0x0021, 0x006a, "DS", "Image Row" }, { 0x0021, 0x006b, "DS", "Image Column" }, { 0x0021, 0x0070, "xs", "?" }, { 0x0021, 0x0071, "xs", "?" }, { 0x0021, 0x0072, "xs", "?" }, { 0x0021, 0x0073, "DS", "Second Repetition Time" }, { 0x0021, 0x0075, "DS", "Light Brightness" }, { 0x0021, 0x0076, "DS", "Light Contrast" }, { 0x0021, 0x007a, "IS", "Overlay Threshold" }, { 0x0021, 0x007b, "IS", "Surface Threshold" }, { 0x0021, 0x007c, "IS", "Grey Scale Threshold" }, { 0x0021, 0x0080, "xs", "?" }, { 0x0021, 0x0081, "DS", "Auto Window Level Alpha" }, { 0x0021, 0x0082, "xs", "?" }, { 0x0021, 0x0083, "DS", "Auto Window Level Window" }, { 0x0021, 0x0084, "DS", "Auto Window Level Level" }, { 0x0021, 0x0090, "xs", "?" }, { 0x0021, 0x0091, "xs", "?" }, { 0x0021, 0x0092, "xs", "?" }, { 0x0021, 0x0093, "xs", "?" }, { 0x0021, 0x0094, "DS", "EPI Change Value of X Component" }, { 0x0021, 0x0095, "DS", "EPI Change Value of Y Component" }, { 0x0021, 0x0096, "DS", "EPI Change Value of Z Component" }, { 0x0021, 0x00a0, "xs", "?" }, { 0x0021, 0x00a1, "DS", "?" }, { 0x0021, 0x00a2, "xs", "?" }, { 0x0021, 0x00a3, "LT", "?" }, { 0x0021, 0x00a4, "LT", "?" }, { 0x0021, 0x00a7, "LT", "?" }, { 0x0021, 0x00b0, "IS", "?" }, { 0x0021, 0x00c0, "IS", "?" }, { 0x0023, 0x0000, "xs", "?" }, { 0x0023, 0x0001, "SL", "Number Of Series In Study" }, { 0x0023, 0x0002, "SL", "Number Of Unarchived Series" }, { 0x0023, 0x0010, "xs", "?" }, { 0x0023, 0x0020, "xs", "?" }, { 0x0023, 0x0030, "xs", "?" }, { 0x0023, 0x0040, "xs", "?" }, { 0x0023, 0x0050, "xs", "?" }, { 0x0023, 0x0060, "xs", "?" }, { 0x0023, 0x0070, "xs", "?" }, { 0x0023, 0x0074, "SL", "Number Of Updates To Info" }, { 0x0023, 0x007d, "SS", "Indicates If Study Has Complete Info" }, { 0x0023, 0x0080, "xs", "?" }, { 0x0023, 0x0090, "xs", "?" }, { 0x0023, 0x00ff, "US", "?" }, { 0x0025, 0x0000, "UL", "Group Length" }, { 0x0025, 0x0006, "SS", "Last Pulse Sequence Used" }, { 0x0025, 0x0007, "SL", "Images In Series" }, { 0x0025, 0x0010, "SS", "Landmark Counter" }, { 0x0025, 0x0011, "SS", "Number Of Acquisitions" }, { 0x0025, 0x0014, "SL", "Indicates Number Of Updates To Info" }, { 0x0025, 0x0017, "SL", "Series Complete Flag" }, { 0x0025, 0x0018, "SL", "Number Of Images Archived" }, { 0x0025, 0x0019, "SL", "Last Image Number Used" }, { 0x0025, 0x001a, "SH", "Primary Receiver Suite And Host" }, { 0x0027, 0x0000, "US", "?" }, { 0x0027, 0x0006, "SL", "Image Archive Flag" }, { 0x0027, 0x0010, "SS", "Scout Type" }, { 0x0027, 0x0011, "UN", "?" }, { 0x0027, 0x0012, "IS", "?" }, { 0x0027, 0x0013, "IS", "?" }, { 0x0027, 0x0014, "IS", "?" }, { 0x0027, 0x0015, "IS", "?" }, { 0x0027, 0x0016, "LT", "?" }, { 0x0027, 0x001c, "SL", "Vma Mamp" }, { 0x0027, 0x001d, "SS", "Vma Phase" }, { 0x0027, 0x001e, "SL", "Vma Mod" }, { 0x0027, 0x001f, "SL", "Vma Clip" }, { 0x0027, 0x0020, "SS", "Smart Scan On Off Flag" }, { 0x0027, 0x0030, "SH", "Foreign Image Revision" }, { 0x0027, 0x0031, "SS", "Imaging Mode" }, { 0x0027, 0x0032, "SS", "Pulse Sequence" }, { 0x0027, 0x0033, "SL", "Imaging Options" }, { 0x0027, 0x0035, "SS", "Plane Type" }, { 0x0027, 0x0036, "SL", "Oblique Plane" }, { 0x0027, 0x0040, "SH", "RAS Letter Of Image Location" }, { 0x0027, 0x0041, "FL", "Image Location" }, { 0x0027, 0x0042, "FL", "Center R Coord Of Plane Image" }, { 0x0027, 0x0043, "FL", "Center A Coord Of Plane Image" }, { 0x0027, 0x0044, "FL", "Center S Coord Of Plane Image" }, { 0x0027, 0x0045, "FL", "Normal R Coord" }, { 0x0027, 0x0046, "FL", "Normal A Coord" }, { 0x0027, 0x0047, "FL", "Normal S Coord" }, { 0x0027, 0x0048, "FL", "R Coord Of Top Right Corner" }, { 0x0027, 0x0049, "FL", "A Coord Of Top Right Corner" }, { 0x0027, 0x004a, "FL", "S Coord Of Top Right Corner" }, { 0x0027, 0x004b, "FL", "R Coord Of Bottom Right Corner" }, { 0x0027, 0x004c, "FL", "A Coord Of Bottom Right Corner" }, { 0x0027, 0x004d, "FL", "S Coord Of Bottom Right Corner" }, { 0x0027, 0x0050, "FL", "Table Start Location" }, { 0x0027, 0x0051, "FL", "Table End Location" }, { 0x0027, 0x0052, "SH", "RAS Letter For Side Of Image" }, { 0x0027, 0x0053, "SH", "RAS Letter For Anterior Posterior" }, { 0x0027, 0x0054, "SH", "RAS Letter For Scout Start Loc" }, { 0x0027, 0x0055, "SH", "RAS Letter For Scout End Loc" }, { 0x0027, 0x0060, "FL", "Image Dimension X" }, { 0x0027, 0x0061, "FL", "Image Dimension Y" }, { 0x0027, 0x0062, "FL", "Number Of Excitations" }, { 0x0028, 0x0000, "UL", "Image Presentation Group Length" }, { 0x0028, 0x0002, "US", "Samples per Pixel" }, { 0x0028, 0x0004, "CS", "Photometric Interpretation" }, { 0x0028, 0x0005, "US", "Image Dimensions" }, { 0x0028, 0x0006, "US", "Planar Configuration" }, { 0x0028, 0x0008, "IS", "Number of Frames" }, { 0x0028, 0x0009, "AT", "Frame Increment Pointer" }, { 0x0028, 0x0010, "US", "Rows" }, { 0x0028, 0x0011, "US", "Columns" }, { 0x0028, 0x0012, "US", "Planes" }, { 0x0028, 0x0014, "US", "Ultrasound Color Data Present" }, { 0x0028, 0x0030, "DS", "Pixel Spacing" }, { 0x0028, 0x0031, "DS", "Zoom Factor" }, { 0x0028, 0x0032, "DS", "Zoom Center" }, { 0x0028, 0x0034, "IS", "Pixel Aspect Ratio" }, { 0x0028, 0x0040, "LO", "Image Format" }, { 0x0028, 0x0050, "LT", "Manipulated Image" }, { 0x0028, 0x0051, "CS", "Corrected Image" }, { 0x0028, 0x005f, "LO", "Compression Recognition Code" }, { 0x0028, 0x0060, "LO", "Compression Code" }, { 0x0028, 0x0061, "SH", "Compression Originator" }, { 0x0028, 0x0062, "SH", "Compression Label" }, { 0x0028, 0x0063, "SH", "Compression Description" }, { 0x0028, 0x0065, "LO", "Compression Sequence" }, { 0x0028, 0x0066, "AT", "Compression Step Pointers" }, { 0x0028, 0x0068, "US", "Repeat Interval" }, { 0x0028, 0x0069, "US", "Bits Grouped" }, { 0x0028, 0x0070, "US", "Perimeter Table" }, { 0x0028, 0x0071, "xs", "Perimeter Value" }, { 0x0028, 0x0080, "US", "Predictor Rows" }, { 0x0028, 0x0081, "US", "Predictor Columns" }, { 0x0028, 0x0082, "US", "Predictor Constants" }, { 0x0028, 0x0090, "LO", "Blocked Pixels" }, { 0x0028, 0x0091, "US", "Block Rows" }, { 0x0028, 0x0092, "US", "Block Columns" }, { 0x0028, 0x0093, "US", "Row Overlap" }, { 0x0028, 0x0094, "US", "Column Overlap" }, { 0x0028, 0x0100, "US", "Bits Allocated" }, { 0x0028, 0x0101, "US", "Bits Stored" }, { 0x0028, 0x0102, "US", "High Bit" }, { 0x0028, 0x0103, "US", "Pixel Representation" }, { 0x0028, 0x0104, "xs", "Smallest Valid Pixel Value" }, { 0x0028, 0x0105, "xs", "Largest Valid Pixel Value" }, { 0x0028, 0x0106, "xs", "Smallest Image Pixel Value" }, { 0x0028, 0x0107, "xs", "Largest Image Pixel Value" }, { 0x0028, 0x0108, "xs", "Smallest Pixel Value in Series" }, { 0x0028, 0x0109, "xs", "Largest Pixel Value in Series" }, { 0x0028, 0x0110, "xs", "Smallest Pixel Value in Plane" }, { 0x0028, 0x0111, "xs", "Largest Pixel Value in Plane" }, { 0x0028, 0x0120, "xs", "Pixel Padding Value" }, { 0x0028, 0x0200, "xs", "Image Location" }, { 0x0028, 0x0300, "CS", "Quality Control Image" }, { 0x0028, 0x0301, "CS", "Burned In Annotation" }, { 0x0028, 0x0400, "xs", "?" }, { 0x0028, 0x0401, "xs", "?" }, { 0x0028, 0x0402, "xs", "?" }, { 0x0028, 0x0403, "xs", "?" }, { 0x0028, 0x0404, "AT", "Details of Coefficients" }, { 0x0028, 0x0700, "LO", "DCT Label" }, { 0x0028, 0x0701, "LO", "Data Block Description" }, { 0x0028, 0x0702, "AT", "Data Block" }, { 0x0028, 0x0710, "US", "Normalization Factor Format" }, { 0x0028, 0x0720, "US", "Zonal Map Number Format" }, { 0x0028, 0x0721, "AT", "Zonal Map Location" }, { 0x0028, 0x0722, "US", "Zonal Map Format" }, { 0x0028, 0x0730, "US", "Adaptive Map Format" }, { 0x0028, 0x0740, "US", "Code Number Format" }, { 0x0028, 0x0800, "LO", "Code Label" }, { 0x0028, 0x0802, "US", "Number of Tables" }, { 0x0028, 0x0803, "AT", "Code Table Location" }, { 0x0028, 0x0804, "US", "Bits For Code Word" }, { 0x0028, 0x0808, "AT", "Image Data Location" }, { 0x0028, 0x1040, "CS", "Pixel Intensity Relationship" }, { 0x0028, 0x1041, "SS", "Pixel Intensity Relationship Sign" }, { 0x0028, 0x1050, "DS", "Window Center" }, { 0x0028, 0x1051, "DS", "Window Width" }, { 0x0028, 0x1052, "DS", "Rescale Intercept" }, { 0x0028, 0x1053, "DS", "Rescale Slope" }, { 0x0028, 0x1054, "LO", "Rescale Type" }, { 0x0028, 0x1055, "LO", "Window Center & Width Explanation" }, { 0x0028, 0x1080, "LO", "Gray Scale" }, { 0x0028, 0x1090, "CS", "Recommended Viewing Mode" }, { 0x0028, 0x1100, "xs", "Gray Lookup Table Descriptor" }, { 0x0028, 0x1101, "xs", "Red Palette Color Lookup Table Descriptor" }, { 0x0028, 0x1102, "xs", "Green Palette Color Lookup Table Descriptor" }, { 0x0028, 0x1103, "xs", "Blue Palette Color Lookup Table Descriptor" }, { 0x0028, 0x1111, "OW", "Large Red Palette Color Lookup Table Descriptor" }, { 0x0028, 0x1112, "OW", "Large Green Palette Color Lookup Table Descriptor" }, { 0x0028, 0x1113, "OW", "Large Blue Palette Color Lookup Table Descriptor" }, { 0x0028, 0x1199, "UI", "Palette Color Lookup Table UID" }, { 0x0028, 0x1200, "xs", "Gray Lookup Table Data" }, { 0x0028, 0x1201, "OW", "Red Palette Color Lookup Table Data" }, { 0x0028, 0x1202, "OW", "Green Palette Color Lookup Table Data" }, { 0x0028, 0x1203, "OW", "Blue Palette Color Lookup Table Data" }, { 0x0028, 0x1211, "OW", "Large Red Palette Color Lookup Table Data" }, { 0x0028, 0x1212, "OW", "Large Green Palette Color Lookup Table Data" }, { 0x0028, 0x1213, "OW", "Large Blue Palette Color Lookup Table Data" }, { 0x0028, 0x1214, "UI", "Large Palette Color Lookup Table UID" }, { 0x0028, 0x1221, "OW", "Segmented Red Palette Color Lookup Table Data" }, { 0x0028, 0x1222, "OW", "Segmented Green Palette Color Lookup Table Data" }, { 0x0028, 0x1223, "OW", "Segmented Blue Palette Color Lookup Table Data" }, { 0x0028, 0x1300, "CS", "Implant Present" }, { 0x0028, 0x2110, "CS", "Lossy Image Compression" }, { 0x0028, 0x2112, "DS", "Lossy Image Compression Ratio" }, { 0x0028, 0x3000, "SQ", "Modality LUT Sequence" }, { 0x0028, 0x3002, "US", "LUT Descriptor" }, { 0x0028, 0x3003, "LO", "LUT Explanation" }, { 0x0028, 0x3004, "LO", "Modality LUT Type" }, { 0x0028, 0x3006, "US", "LUT Data" }, { 0x0028, 0x3010, "xs", "VOI LUT Sequence" }, { 0x0028, 0x4000, "LT", "Image Presentation Comments" }, { 0x0028, 0x5000, "SQ", "Biplane Acquisition Sequence" }, { 0x0028, 0x6010, "US", "Representative Frame Number" }, { 0x0028, 0x6020, "US", "Frame Numbers of Interest" }, { 0x0028, 0x6022, "LO", "Frame of Interest Description" }, { 0x0028, 0x6030, "US", "Mask Pointer" }, { 0x0028, 0x6040, "US", "R Wave Pointer" }, { 0x0028, 0x6100, "SQ", "Mask Subtraction Sequence" }, { 0x0028, 0x6101, "CS", "Mask Operation" }, { 0x0028, 0x6102, "US", "Applicable Frame Range" }, { 0x0028, 0x6110, "US", "Mask Frame Numbers" }, { 0x0028, 0x6112, "US", "Contrast Frame Averaging" }, { 0x0028, 0x6114, "FL", "Mask Sub-Pixel Shift" }, { 0x0028, 0x6120, "SS", "TID Offset" }, { 0x0028, 0x6190, "ST", "Mask Operation Explanation" }, { 0x0029, 0x0000, "xs", "?" }, { 0x0029, 0x0001, "xs", "?" }, { 0x0029, 0x0002, "xs", "?" }, { 0x0029, 0x0003, "xs", "?" }, { 0x0029, 0x0004, "xs", "?" }, { 0x0029, 0x0005, "xs", "?" }, { 0x0029, 0x0006, "xs", "?" }, { 0x0029, 0x0007, "SL", "Lower Range Of Pixels" }, { 0x0029, 0x0008, "SH", "Lower Range Of Pixels" }, { 0x0029, 0x0009, "SH", "Lower Range Of Pixels" }, { 0x0029, 0x000a, "SS", "Lower Range Of Pixels" }, { 0x0029, 0x000c, "xs", "?" }, { 0x0029, 0x000e, "CS", "Zoom Enable Status" }, { 0x0029, 0x000f, "CS", "Zoom Select Status" }, { 0x0029, 0x0010, "xs", "?" }, { 0x0029, 0x0011, "xs", "?" }, { 0x0029, 0x0013, "LT", "?" }, { 0x0029, 0x0015, "xs", "?" }, { 0x0029, 0x0016, "SL", "Lower Range Of Pixels" }, { 0x0029, 0x0017, "SL", "Lower Range Of Pixels" }, { 0x0029, 0x0018, "SL", "Upper Range Of Pixels" }, { 0x0029, 0x001a, "SL", "Length Of Total Info In Bytes" }, { 0x0029, 0x001e, "xs", "?" }, { 0x0029, 0x001f, "xs", "?" }, { 0x0029, 0x0020, "xs", "?" }, { 0x0029, 0x0022, "IS", "Pixel Quality Value" }, { 0x0029, 0x0025, "LT", "Processed Pixel Data Quality" }, { 0x0029, 0x0026, "SS", "Version Of Info Structure" }, { 0x0029, 0x0030, "xs", "?" }, { 0x0029, 0x0031, "xs", "?" }, { 0x0029, 0x0032, "xs", "?" }, { 0x0029, 0x0033, "xs", "?" }, { 0x0029, 0x0034, "xs", "?" }, { 0x0029, 0x0035, "SL", "Advantage Comp Underflow" }, { 0x0029, 0x0038, "US", "?" }, { 0x0029, 0x0040, "xs", "?" }, { 0x0029, 0x0041, "DS", "Magnifying Glass Rectangle" }, { 0x0029, 0x0043, "DS", "Magnifying Glass Factor" }, { 0x0029, 0x0044, "US", "Magnifying Glass Function" }, { 0x0029, 0x004e, "CS", "Magnifying Glass Enable Status" }, { 0x0029, 0x004f, "CS", "Magnifying Glass Select Status" }, { 0x0029, 0x0050, "xs", "?" }, { 0x0029, 0x0051, "LT", "Exposure Code" }, { 0x0029, 0x0052, "LT", "Sort Code" }, { 0x0029, 0x0053, "LT", "?" }, { 0x0029, 0x0060, "xs", "?" }, { 0x0029, 0x0061, "xs", "?" }, { 0x0029, 0x0067, "LT", "?" }, { 0x0029, 0x0070, "xs", "?" }, { 0x0029, 0x0071, "xs", "?" }, { 0x0029, 0x0072, "xs", "?" }, { 0x0029, 0x0077, "CS", "Window Select Status" }, { 0x0029, 0x0078, "LT", "ECG Display Printing ID" }, { 0x0029, 0x0079, "CS", "ECG Display Printing" }, { 0x0029, 0x007e, "CS", "ECG Display Printing Enable Status" }, { 0x0029, 0x007f, "CS", "ECG Display Printing Select Status" }, { 0x0029, 0x0080, "xs", "?" }, { 0x0029, 0x0081, "xs", "?" }, { 0x0029, 0x0082, "IS", "View Zoom" }, { 0x0029, 0x0083, "IS", "View Transform" }, { 0x0029, 0x008e, "CS", "Physiological Display Enable Status" }, { 0x0029, 0x008f, "CS", "Physiological Display Select Status" }, { 0x0029, 0x0090, "IS", "?" }, { 0x0029, 0x0099, "LT", "Shutter Type" }, { 0x0029, 0x00a0, "US", "Rows of Rectangular Shutter" }, { 0x0029, 0x00a1, "US", "Columns of Rectangular Shutter" }, { 0x0029, 0x00a2, "US", "Origin of Rectangular Shutter" }, { 0x0029, 0x00b0, "US", "Radius of Circular Shutter" }, { 0x0029, 0x00b2, "US", "Origin of Circular Shutter" }, { 0x0029, 0x00c0, "LT", "Functional Shutter ID" }, { 0x0029, 0x00c1, "xs", "?" }, { 0x0029, 0x00c3, "IS", "Scan Resolution" }, { 0x0029, 0x00c4, "IS", "Field of View" }, { 0x0029, 0x00c5, "LT", "Field Of Shutter Rectangle" }, { 0x0029, 0x00ce, "CS", "Shutter Enable Status" }, { 0x0029, 0x00cf, "CS", "Shutter Select Status" }, { 0x0029, 0x00d0, "IS", "?" }, { 0x0029, 0x00d1, "IS", "?" }, { 0x0029, 0x00d5, "LT", "Slice Thickness" }, { 0x0031, 0x0010, "LT", "Request UID" }, { 0x0031, 0x0012, "LT", "Examination Reason" }, { 0x0031, 0x0030, "DA", "Requested Date" }, { 0x0031, 0x0032, "TM", "Worklist Request Start Time" }, { 0x0031, 0x0033, "TM", "Worklist Request End Time" }, { 0x0031, 0x0045, "LT", "Requesting Physician" }, { 0x0031, 0x004a, "TM", "Requested Time" }, { 0x0031, 0x0050, "LT", "Requested Physician" }, { 0x0031, 0x0080, "LT", "Requested Location" }, { 0x0032, 0x0000, "UL", "Study Group Length" }, { 0x0032, 0x000a, "CS", "Study Status ID" }, { 0x0032, 0x000c, "CS", "Study Priority ID" }, { 0x0032, 0x0012, "LO", "Study ID Issuer" }, { 0x0032, 0x0032, "DA", "Study Verified Date" }, { 0x0032, 0x0033, "TM", "Study Verified Time" }, { 0x0032, 0x0034, "DA", "Study Read Date" }, { 0x0032, 0x0035, "TM", "Study Read Time" }, { 0x0032, 0x1000, "DA", "Scheduled Study Start Date" }, { 0x0032, 0x1001, "TM", "Scheduled Study Start Time" }, { 0x0032, 0x1010, "DA", "Scheduled Study Stop Date" }, { 0x0032, 0x1011, "TM", "Scheduled Study Stop Time" }, { 0x0032, 0x1020, "LO", "Scheduled Study Location" }, { 0x0032, 0x1021, "AE", "Scheduled Study Location AE Title(s)" }, { 0x0032, 0x1030, "LO", "Reason for Study" }, { 0x0032, 0x1032, "PN", "Requesting Physician" }, { 0x0032, 0x1033, "LO", "Requesting Service" }, { 0x0032, 0x1040, "DA", "Study Arrival Date" }, { 0x0032, 0x1041, "TM", "Study Arrival Time" }, { 0x0032, 0x1050, "DA", "Study Completion Date" }, { 0x0032, 0x1051, "TM", "Study Completion Time" }, { 0x0032, 0x1055, "CS", "Study Component Status ID" }, { 0x0032, 0x1060, "LO", "Requested Procedure Description" }, { 0x0032, 0x1064, "SQ", "Requested Procedure Code Sequence" }, { 0x0032, 0x1070, "LO", "Requested Contrast Agent" }, { 0x0032, 0x4000, "LT", "Study Comments" }, { 0x0033, 0x0001, "UN", "?" }, { 0x0033, 0x0002, "UN", "?" }, { 0x0033, 0x0005, "UN", "?" }, { 0x0033, 0x0006, "UN", "?" }, { 0x0033, 0x0010, "LT", "Patient Study UID" }, { 0x0037, 0x0010, "LO", "ReferringDepartment" }, { 0x0037, 0x0020, "US", "ScreenNumber" }, { 0x0037, 0x0040, "SH", "LeftOrientation" }, { 0x0037, 0x0042, "SH", "RightOrientation" }, { 0x0037, 0x0050, "CS", "Inversion" }, { 0x0037, 0x0060, "US", "DSA" }, { 0x0038, 0x0000, "UL", "Visit Group Length" }, { 0x0038, 0x0004, "SQ", "Referenced Patient Alias Sequence" }, { 0x0038, 0x0008, "CS", "Visit Status ID" }, { 0x0038, 0x0010, "LO", "Admission ID" }, { 0x0038, 0x0011, "LO", "Issuer of Admission ID" }, { 0x0038, 0x0016, "LO", "Route of Admissions" }, { 0x0038, 0x001a, "DA", "Scheduled Admission Date" }, { 0x0038, 0x001b, "TM", "Scheduled Admission Time" }, { 0x0038, 0x001c, "DA", "Scheduled Discharge Date" }, { 0x0038, 0x001d, "TM", "Scheduled Discharge Time" }, { 0x0038, 0x001e, "LO", "Scheduled Patient Institution Residence" }, { 0x0038, 0x0020, "DA", "Admitting Date" }, { 0x0038, 0x0021, "TM", "Admitting Time" }, { 0x0038, 0x0030, "DA", "Discharge Date" }, { 0x0038, 0x0032, "TM", "Discharge Time" }, { 0x0038, 0x0040, "LO", "Discharge Diagnosis Description" }, { 0x0038, 0x0044, "SQ", "Discharge Diagnosis Code Sequence" }, { 0x0038, 0x0050, "LO", "Special Needs" }, { 0x0038, 0x0300, "LO", "Current Patient Location" }, { 0x0038, 0x0400, "LO", "Patient's Institution Residence" }, { 0x0038, 0x0500, "LO", "Patient State" }, { 0x0038, 0x4000, "LT", "Visit Comments" }, { 0x0039, 0x0080, "IS", "Private Entity Number" }, { 0x0039, 0x0085, "DA", "Private Entity Date" }, { 0x0039, 0x0090, "TM", "Private Entity Time" }, { 0x0039, 0x0095, "LO", "Private Entity Launch Command" }, { 0x0039, 0x00aa, "CS", "Private Entity Type" }, { 0x003a, 0x0002, "SQ", "Waveform Sequence" }, { 0x003a, 0x0005, "US", "Waveform Number of Channels" }, { 0x003a, 0x0010, "UL", "Waveform Number of Samples" }, { 0x003a, 0x001a, "DS", "Sampling Frequency" }, { 0x003a, 0x0020, "SH", "Group Label" }, { 0x003a, 0x0103, "CS", "Waveform Sample Value Representation" }, { 0x003a, 0x0122, "OB", "Waveform Padding Value" }, { 0x003a, 0x0200, "SQ", "Channel Definition" }, { 0x003a, 0x0202, "IS", "Waveform Channel Number" }, { 0x003a, 0x0203, "SH", "Channel Label" }, { 0x003a, 0x0205, "CS", "Channel Status" }, { 0x003a, 0x0208, "SQ", "Channel Source" }, { 0x003a, 0x0209, "SQ", "Channel Source Modifiers" }, { 0x003a, 0x020a, "SQ", "Differential Channel Source" }, { 0x003a, 0x020b, "SQ", "Differential Channel Source Modifiers" }, { 0x003a, 0x0210, "DS", "Channel Sensitivity" }, { 0x003a, 0x0211, "SQ", "Channel Sensitivity Units" }, { 0x003a, 0x0212, "DS", "Channel Sensitivity Correction Factor" }, { 0x003a, 0x0213, "DS", "Channel Baseline" }, { 0x003a, 0x0214, "DS", "Channel Time Skew" }, { 0x003a, 0x0215, "DS", "Channel Sample Skew" }, { 0x003a, 0x0216, "OB", "Channel Minimum Value" }, { 0x003a, 0x0217, "OB", "Channel Maximum Value" }, { 0x003a, 0x0218, "DS", "Channel Offset" }, { 0x003a, 0x021a, "US", "Bits Per Sample" }, { 0x003a, 0x0220, "DS", "Filter Low Frequency" }, { 0x003a, 0x0221, "DS", "Filter High Frequency" }, { 0x003a, 0x0222, "DS", "Notch Filter Frequency" }, { 0x003a, 0x0223, "DS", "Notch Filter Bandwidth" }, { 0x003a, 0x1000, "OB", "Waveform Data" }, { 0x0040, 0x0001, "AE", "Scheduled Station AE Title" }, { 0x0040, 0x0002, "DA", "Scheduled Procedure Step Start Date" }, { 0x0040, 0x0003, "TM", "Scheduled Procedure Step Start Time" }, { 0x0040, 0x0004, "DA", "Scheduled Procedure Step End Date" }, { 0x0040, 0x0005, "TM", "Scheduled Procedure Step End Time" }, { 0x0040, 0x0006, "PN", "Scheduled Performing Physician Name" }, { 0x0040, 0x0007, "LO", "Scheduled Procedure Step Description" }, { 0x0040, 0x0008, "SQ", "Scheduled Action Item Code Sequence" }, { 0x0040, 0x0009, "SH", "Scheduled Procedure Step ID" }, { 0x0040, 0x0010, "SH", "Scheduled Station Name" }, { 0x0040, 0x0011, "SH", "Scheduled Procedure Step Location" }, { 0x0040, 0x0012, "LO", "Pre-Medication" }, { 0x0040, 0x0020, "CS", "Scheduled Procedure Step Status" }, { 0x0040, 0x0100, "SQ", "Scheduled Procedure Step Sequence" }, { 0x0040, 0x0302, "US", "Entrance Dose" }, { 0x0040, 0x0303, "US", "Exposed Area" }, { 0x0040, 0x0306, "DS", "Distance Source to Entrance" }, { 0x0040, 0x0307, "DS", "Distance Source to Support" }, { 0x0040, 0x0310, "ST", "Comments On Radiation Dose" }, { 0x0040, 0x0312, "DS", "X-Ray Output" }, { 0x0040, 0x0314, "DS", "Half Value Layer" }, { 0x0040, 0x0316, "DS", "Organ Dose" }, { 0x0040, 0x0318, "CS", "Organ Exposed" }, { 0x0040, 0x0400, "LT", "Comments On Scheduled Procedure Step" }, { 0x0040, 0x050a, "LO", "Specimen Accession Number" }, { 0x0040, 0x0550, "SQ", "Specimen Sequence" }, { 0x0040, 0x0551, "LO", "Specimen Identifier" }, { 0x0040, 0x0552, "SQ", "Specimen Description Sequence" }, { 0x0040, 0x0553, "ST", "Specimen Description" }, { 0x0040, 0x0555, "SQ", "Acquisition Context Sequence" }, { 0x0040, 0x0556, "ST", "Acquisition Context Description" }, { 0x0040, 0x059a, "SQ", "Specimen Type Code Sequence" }, { 0x0040, 0x06fa, "LO", "Slide Identifier" }, { 0x0040, 0x071a, "SQ", "Image Center Point Coordinates Sequence" }, { 0x0040, 0x072a, "DS", "X Offset In Slide Coordinate System" }, { 0x0040, 0x073a, "DS", "Y Offset In Slide Coordinate System" }, { 0x0040, 0x074a, "DS", "Z Offset In Slide Coordinate System" }, { 0x0040, 0x08d8, "SQ", "Pixel Spacing Sequence" }, { 0x0040, 0x08da, "SQ", "Coordinate System Axis Code Sequence" }, { 0x0040, 0x08ea, "SQ", "Measurement Units Code Sequence" }, { 0x0040, 0x09f8, "SQ", "Vital Stain Code Sequence" }, { 0x0040, 0x1001, "SH", "Requested Procedure ID" }, { 0x0040, 0x1002, "LO", "Reason For Requested Procedure" }, { 0x0040, 0x1003, "SH", "Requested Procedure Priority" }, { 0x0040, 0x1004, "LO", "Patient Transport Arrangements" }, { 0x0040, 0x1005, "LO", "Requested Procedure Location" }, { 0x0040, 0x1006, "SH", "Placer Order Number of Procedure" }, { 0x0040, 0x1007, "SH", "Filler Order Number of Procedure" }, { 0x0040, 0x1008, "LO", "Confidentiality Code" }, { 0x0040, 0x1009, "SH", "Reporting Priority" }, { 0x0040, 0x1010, "PN", "Names of Intended Recipients of Results" }, { 0x0040, 0x1400, "LT", "Requested Procedure Comments" }, { 0x0040, 0x2001, "LO", "Reason For Imaging Service Request" }, { 0x0040, 0x2004, "DA", "Issue Date of Imaging Service Request" }, { 0x0040, 0x2005, "TM", "Issue Time of Imaging Service Request" }, { 0x0040, 0x2006, "SH", "Placer Order Number of Imaging Service Request" }, { 0x0040, 0x2007, "SH", "Filler Order Number of Imaging Service Request" }, { 0x0040, 0x2008, "PN", "Order Entered By" }, { 0x0040, 0x2009, "SH", "Order Enterer Location" }, { 0x0040, 0x2010, "SH", "Order Callback Phone Number" }, { 0x0040, 0x2400, "LT", "Imaging Service Request Comments" }, { 0x0040, 0x3001, "LO", "Confidentiality Constraint On Patient Data" }, { 0x0040, 0xa007, "CS", "Findings Flag" }, { 0x0040, 0xa020, "SQ", "Findings Sequence" }, { 0x0040, 0xa021, "UI", "Findings Group UID" }, { 0x0040, 0xa022, "UI", "Referenced Findings Group UID" }, { 0x0040, 0xa023, "DA", "Findings Group Recording Date" }, { 0x0040, 0xa024, "TM", "Findings Group Recording Time" }, { 0x0040, 0xa026, "SQ", "Findings Source Category Code Sequence" }, { 0x0040, 0xa027, "LO", "Documenting Organization" }, { 0x0040, 0xa028, "SQ", "Documenting Organization Identifier Code Sequence" }, { 0x0040, 0xa032, "LO", "History Reliability Qualifier Description" }, { 0x0040, 0xa043, "SQ", "Concept Name Code Sequence" }, { 0x0040, 0xa047, "LO", "Measurement Precision Description" }, { 0x0040, 0xa057, "CS", "Urgency or Priority Alerts" }, { 0x0040, 0xa060, "LO", "Sequencing Indicator" }, { 0x0040, 0xa066, "SQ", "Document Identifier Code Sequence" }, { 0x0040, 0xa067, "PN", "Document Author" }, { 0x0040, 0xa068, "SQ", "Document Author Identifier Code Sequence" }, { 0x0040, 0xa070, "SQ", "Identifier Code Sequence" }, { 0x0040, 0xa073, "LO", "Object String Identifier" }, { 0x0040, 0xa074, "OB", "Object Binary Identifier" }, { 0x0040, 0xa075, "PN", "Documenting Observer" }, { 0x0040, 0xa076, "SQ", "Documenting Observer Identifier Code Sequence" }, { 0x0040, 0xa078, "SQ", "Observation Subject Identifier Code Sequence" }, { 0x0040, 0xa080, "SQ", "Person Identifier Code Sequence" }, { 0x0040, 0xa085, "SQ", "Procedure Identifier Code Sequence" }, { 0x0040, 0xa088, "LO", "Object Directory String Identifier" }, { 0x0040, 0xa089, "OB", "Object Directory Binary Identifier" }, { 0x0040, 0xa090, "CS", "History Reliability Qualifier" }, { 0x0040, 0xa0a0, "CS", "Referenced Type of Data" }, { 0x0040, 0xa0b0, "US", "Referenced Waveform Channels" }, { 0x0040, 0xa110, "DA", "Date of Document or Verbal Transaction" }, { 0x0040, 0xa112, "TM", "Time of Document Creation or Verbal Transaction" }, { 0x0040, 0xa121, "DA", "Date" }, { 0x0040, 0xa122, "TM", "Time" }, { 0x0040, 0xa123, "PN", "Person Name" }, { 0x0040, 0xa124, "SQ", "Referenced Person Sequence" }, { 0x0040, 0xa125, "CS", "Report Status ID" }, { 0x0040, 0xa130, "CS", "Temporal Range Type" }, { 0x0040, 0xa132, "UL", "Referenced Sample Offsets" }, { 0x0040, 0xa136, "US", "Referenced Frame Numbers" }, { 0x0040, 0xa138, "DS", "Referenced Time Offsets" }, { 0x0040, 0xa13a, "DT", "Referenced Datetime" }, { 0x0040, 0xa160, "UT", "Text Value" }, { 0x0040, 0xa167, "SQ", "Observation Category Code Sequence" }, { 0x0040, 0xa168, "SQ", "Concept Code Sequence" }, { 0x0040, 0xa16a, "ST", "Bibliographic Citation" }, { 0x0040, 0xa170, "CS", "Observation Class" }, { 0x0040, 0xa171, "UI", "Observation UID" }, { 0x0040, 0xa172, "UI", "Referenced Observation UID" }, { 0x0040, 0xa173, "CS", "Referenced Observation Class" }, { 0x0040, 0xa174, "CS", "Referenced Object Observation Class" }, { 0x0040, 0xa180, "US", "Annotation Group Number" }, { 0x0040, 0xa192, "DA", "Observation Date" }, { 0x0040, 0xa193, "TM", "Observation Time" }, { 0x0040, 0xa194, "CS", "Measurement Automation" }, { 0x0040, 0xa195, "SQ", "Concept Name Code Sequence Modifier" }, { 0x0040, 0xa224, "ST", "Identification Description" }, { 0x0040, 0xa290, "CS", "Coordinates Set Geometric Type" }, { 0x0040, 0xa296, "SQ", "Algorithm Code Sequence" }, { 0x0040, 0xa297, "ST", "Algorithm Description" }, { 0x0040, 0xa29a, "SL", "Pixel Coordinates Set" }, { 0x0040, 0xa300, "SQ", "Measured Value Sequence" }, { 0x0040, 0xa307, "PN", "Current Observer" }, { 0x0040, 0xa30a, "DS", "Numeric Value" }, { 0x0040, 0xa313, "SQ", "Referenced Accession Sequence" }, { 0x0040, 0xa33a, "ST", "Report Status Comment" }, { 0x0040, 0xa340, "SQ", "Procedure Context Sequence" }, { 0x0040, 0xa352, "PN", "Verbal Source" }, { 0x0040, 0xa353, "ST", "Address" }, { 0x0040, 0xa354, "LO", "Telephone Number" }, { 0x0040, 0xa358, "SQ", "Verbal Source Identifier Code Sequence" }, { 0x0040, 0xa380, "SQ", "Report Detail Sequence" }, { 0x0040, 0xa402, "UI", "Observation Subject UID" }, { 0x0040, 0xa403, "CS", "Observation Subject Class" }, { 0x0040, 0xa404, "SQ", "Observation Subject Type Code Sequence" }, { 0x0040, 0xa600, "CS", "Observation Subject Context Flag" }, { 0x0040, 0xa601, "CS", "Observer Context Flag" }, { 0x0040, 0xa603, "CS", "Procedure Context Flag" }, { 0x0040, 0xa730, "SQ", "Observations Sequence" }, { 0x0040, 0xa731, "SQ", "Relationship Sequence" }, { 0x0040, 0xa732, "SQ", "Relationship Type Code Sequence" }, { 0x0040, 0xa744, "SQ", "Language Code Sequence" }, { 0x0040, 0xa992, "ST", "Uniform Resource Locator" }, { 0x0040, 0xb020, "SQ", "Annotation Sequence" }, { 0x0040, 0xdb73, "SQ", "Relationship Type Code Sequence Modifier" }, { 0x0041, 0x0000, "LT", "Papyrus Comments" }, { 0x0041, 0x0010, "xs", "?" }, { 0x0041, 0x0011, "xs", "?" }, { 0x0041, 0x0012, "UL", "Pixel Offset" }, { 0x0041, 0x0013, "SQ", "Image Identifier Sequence" }, { 0x0041, 0x0014, "SQ", "External File Reference Sequence" }, { 0x0041, 0x0015, "US", "Number of Images" }, { 0x0041, 0x0020, "xs", "?" }, { 0x0041, 0x0021, "UI", "Referenced SOP Class UID" }, { 0x0041, 0x0022, "UI", "Referenced SOP Instance UID" }, { 0x0041, 0x0030, "xs", "?" }, { 0x0041, 0x0031, "xs", "?" }, { 0x0041, 0x0032, "xs", "?" }, { 0x0041, 0x0034, "DA", "Modified Date" }, { 0x0041, 0x0036, "TM", "Modified Time" }, { 0x0041, 0x0040, "LT", "Owner Name" }, { 0x0041, 0x0041, "UI", "Referenced Image SOP Class UID" }, { 0x0041, 0x0042, "UI", "Referenced Image SOP Instance UID" }, { 0x0041, 0x0050, "xs", "?" }, { 0x0041, 0x0060, "UL", "Number of Images" }, { 0x0041, 0x0062, "UL", "Number of Other" }, { 0x0041, 0x00a0, "LT", "External Folder Element DSID" }, { 0x0041, 0x00a1, "US", "External Folder Element Data Set Type" }, { 0x0041, 0x00a2, "LT", "External Folder Element File Location" }, { 0x0041, 0x00a3, "UL", "External Folder Element Length" }, { 0x0041, 0x00b0, "LT", "Internal Folder Element DSID" }, { 0x0041, 0x00b1, "US", "Internal Folder Element Data Set Type" }, { 0x0041, 0x00b2, "UL", "Internal Offset To Data Set" }, { 0x0041, 0x00b3, "UL", "Internal Offset To Image" }, { 0x0043, 0x0001, "SS", "Bitmap Of Prescan Options" }, { 0x0043, 0x0002, "SS", "Gradient Offset In X" }, { 0x0043, 0x0003, "SS", "Gradient Offset In Y" }, { 0x0043, 0x0004, "SS", "Gradient Offset In Z" }, { 0x0043, 0x0005, "SS", "Image Is Original Or Unoriginal" }, { 0x0043, 0x0006, "SS", "Number Of EPI Shots" }, { 0x0043, 0x0007, "SS", "Views Per Segment" }, { 0x0043, 0x0008, "SS", "Respiratory Rate In BPM" }, { 0x0043, 0x0009, "SS", "Respiratory Trigger Point" }, { 0x0043, 0x000a, "SS", "Type Of Receiver Used" }, { 0x0043, 0x000b, "DS", "Peak Rate Of Change Of Gradient Field" }, { 0x0043, 0x000c, "DS", "Limits In Units Of Percent" }, { 0x0043, 0x000d, "DS", "PSD Estimated Limit" }, { 0x0043, 0x000e, "DS", "PSD Estimated Limit In Tesla Per Second" }, { 0x0043, 0x000f, "DS", "SAR Avg Head" }, { 0x0043, 0x0010, "US", "Window Value" }, { 0x0043, 0x0011, "US", "Total Input Views" }, { 0x0043, 0x0012, "SS", "Xray Chain" }, { 0x0043, 0x0013, "SS", "Recon Kernel Parameters" }, { 0x0043, 0x0014, "SS", "Calibration Parameters" }, { 0x0043, 0x0015, "SS", "Total Output Views" }, { 0x0043, 0x0016, "SS", "Number Of Overranges" }, { 0x0043, 0x0017, "DS", "IBH Image Scale Factors" }, { 0x0043, 0x0018, "DS", "BBH Coefficients" }, { 0x0043, 0x0019, "SS", "Number Of BBH Chains To Blend" }, { 0x0043, 0x001a, "SL", "Starting Channel Number" }, { 0x0043, 0x001b, "SS", "PPScan Parameters" }, { 0x0043, 0x001c, "SS", "GE Image Integrity" }, { 0x0043, 0x001d, "SS", "Level Value" }, { 0x0043, 0x001e, "xs", "?" }, { 0x0043, 0x001f, "SL", "Max Overranges In A View" }, { 0x0043, 0x0020, "DS", "Avg Overranges All Views" }, { 0x0043, 0x0021, "SS", "Corrected Afterglow Terms" }, { 0x0043, 0x0025, "SS", "Reference Channels" }, { 0x0043, 0x0026, "US", "No Views Ref Channels Blocked" }, { 0x0043, 0x0027, "xs", "?" }, { 0x0043, 0x0028, "OB", "Unique Image Identifier" }, { 0x0043, 0x0029, "OB", "Histogram Tables" }, { 0x0043, 0x002a, "OB", "User Defined Data" }, { 0x0043, 0x002b, "SS", "Private Scan Options" }, { 0x0043, 0x002c, "SS", "Effective Echo Spacing" }, { 0x0043, 0x002d, "SH", "String Slop Field 1" }, { 0x0043, 0x002e, "SH", "String Slop Field 2" }, { 0x0043, 0x002f, "SS", "Raw Data Type" }, { 0x0043, 0x0030, "SS", "Raw Data Type" }, { 0x0043, 0x0031, "DS", "RA Coord Of Target Recon Centre" }, { 0x0043, 0x0032, "SS", "Raw Data Type" }, { 0x0043, 0x0033, "FL", "Neg Scan Spacing" }, { 0x0043, 0x0034, "IS", "Offset Frequency" }, { 0x0043, 0x0035, "UL", "User Usage Tag" }, { 0x0043, 0x0036, "UL", "User Fill Map MSW" }, { 0x0043, 0x0037, "UL", "User Fill Map LSW" }, { 0x0043, 0x0038, "FL", "User 25 To User 48" }, { 0x0043, 0x0039, "IS", "Slop Integer 6 To Slop Integer 9" }, { 0x0043, 0x0040, "FL", "Trigger On Position" }, { 0x0043, 0x0041, "FL", "Degree Of Rotation" }, { 0x0043, 0x0042, "SL", "DAS Trigger Source" }, { 0x0043, 0x0043, "SL", "DAS Fpa Gain" }, { 0x0043, 0x0044, "SL", "DAS Output Source" }, { 0x0043, 0x0045, "SL", "DAS Ad Input" }, { 0x0043, 0x0046, "SL", "DAS Cal Mode" }, { 0x0043, 0x0047, "SL", "DAS Cal Frequency" }, { 0x0043, 0x0048, "SL", "DAS Reg Xm" }, { 0x0043, 0x0049, "SL", "DAS Auto Zero" }, { 0x0043, 0x004a, "SS", "Starting Channel Of View" }, { 0x0043, 0x004b, "SL", "DAS Xm Pattern" }, { 0x0043, 0x004c, "SS", "TGGC Trigger Mode" }, { 0x0043, 0x004d, "FL", "Start Scan To Xray On Delay" }, { 0x0043, 0x004e, "FL", "Duration Of Xray On" }, { 0x0044, 0x0000, "UI", "?" }, { 0x0045, 0x0004, "CS", "AES" }, { 0x0045, 0x0006, "DS", "Angulation" }, { 0x0045, 0x0009, "DS", "Real Magnification Factor" }, { 0x0045, 0x000b, "CS", "Senograph Type" }, { 0x0045, 0x000c, "DS", "Integration Time" }, { 0x0045, 0x000d, "DS", "ROI Origin X and Y" }, { 0x0045, 0x0011, "DS", "Receptor Size cm X and Y" }, { 0x0045, 0x0012, "IS", "Receptor Size Pixels X and Y" }, { 0x0045, 0x0013, "ST", "Screen" }, { 0x0045, 0x0014, "DS", "Pixel Pitch Microns" }, { 0x0045, 0x0015, "IS", "Pixel Depth Bits" }, { 0x0045, 0x0016, "IS", "Binning Factor X and Y" }, { 0x0045, 0x001b, "CS", "Clinical View" }, { 0x0045, 0x001d, "DS", "Mean Of Raw Gray Levels" }, { 0x0045, 0x001e, "DS", "Mean Of Offset Gray Levels" }, { 0x0045, 0x001f, "DS", "Mean Of Corrected Gray Levels" }, { 0x0045, 0x0020, "DS", "Mean Of Region Gray Levels" }, { 0x0045, 0x0021, "DS", "Mean Of Log Region Gray Levels" }, { 0x0045, 0x0022, "DS", "Standard Deviation Of Raw Gray Levels" }, { 0x0045, 0x0023, "DS", "Standard Deviation Of Corrected Gray Levels" }, { 0x0045, 0x0024, "DS", "Standard Deviation Of Region Gray Levels" }, { 0x0045, 0x0025, "DS", "Standard Deviation Of Log Region Gray Levels" }, { 0x0045, 0x0026, "OB", "MAO Buffer" }, { 0x0045, 0x0027, "IS", "Set Number" }, { 0x0045, 0x0028, "CS", "WindowingType (LINEAR or GAMMA)" }, { 0x0045, 0x0029, "DS", "WindowingParameters" }, { 0x0045, 0x002a, "IS", "Crosshair Cursor X Coordinates" }, { 0x0045, 0x002b, "IS", "Crosshair Cursor Y Coordinates" }, { 0x0045, 0x0039, "US", "Vignette Rows" }, { 0x0045, 0x003a, "US", "Vignette Columns" }, { 0x0045, 0x003b, "US", "Vignette Bits Allocated" }, { 0x0045, 0x003c, "US", "Vignette Bits Stored" }, { 0x0045, 0x003d, "US", "Vignette High Bit" }, { 0x0045, 0x003e, "US", "Vignette Pixel Representation" }, { 0x0045, 0x003f, "OB", "Vignette Pixel Data" }, { 0x0047, 0x0001, "SQ", "Reconstruction Parameters Sequence" }, { 0x0047, 0x0050, "UL", "Volume Voxel Count" }, { 0x0047, 0x0051, "UL", "Volume Segment Count" }, { 0x0047, 0x0053, "US", "Volume Slice Size" }, { 0x0047, 0x0054, "US", "Volume Slice Count" }, { 0x0047, 0x0055, "SL", "Volume Threshold Value" }, { 0x0047, 0x0057, "DS", "Volume Voxel Ratio" }, { 0x0047, 0x0058, "DS", "Volume Voxel Size" }, { 0x0047, 0x0059, "US", "Volume Z Position Size" }, { 0x0047, 0x0060, "DS", "Volume Base Line" }, { 0x0047, 0x0061, "DS", "Volume Center Point" }, { 0x0047, 0x0063, "SL", "Volume Skew Base" }, { 0x0047, 0x0064, "DS", "Volume Registration Transform Rotation Matrix" }, { 0x0047, 0x0065, "DS", "Volume Registration Transform Translation Vector" }, { 0x0047, 0x0070, "DS", "KVP List" }, { 0x0047, 0x0071, "IS", "XRay Tube Current List" }, { 0x0047, 0x0072, "IS", "Exposure List" }, { 0x0047, 0x0080, "LO", "Acquisition DLX Identifier" }, { 0x0047, 0x0085, "SQ", "Acquisition DLX 2D Series Sequence" }, { 0x0047, 0x0089, "DS", "Contrast Agent Volume List" }, { 0x0047, 0x008a, "US", "Number Of Injections" }, { 0x0047, 0x008b, "US", "Frame Count" }, { 0x0047, 0x0096, "IS", "Used Frames" }, { 0x0047, 0x0091, "LO", "XA 3D Reconstruction Algorithm Name" }, { 0x0047, 0x0092, "CS", "XA 3D Reconstruction Algorithm Version" }, { 0x0047, 0x0093, "DA", "DLX Calibration Date" }, { 0x0047, 0x0094, "TM", "DLX Calibration Time" }, { 0x0047, 0x0095, "CS", "DLX Calibration Status" }, { 0x0047, 0x0098, "US", "Transform Count" }, { 0x0047, 0x0099, "SQ", "Transform Sequence" }, { 0x0047, 0x009a, "DS", "Transform Rotation Matrix" }, { 0x0047, 0x009b, "DS", "Transform Translation Vector" }, { 0x0047, 0x009c, "LO", "Transform Label" }, { 0x0047, 0x00b1, "US", "Wireframe Count" }, { 0x0047, 0x00b2, "US", "Location System" }, { 0x0047, 0x00b0, "SQ", "Wireframe List" }, { 0x0047, 0x00b5, "LO", "Wireframe Name" }, { 0x0047, 0x00b6, "LO", "Wireframe Group Name" }, { 0x0047, 0x00b7, "LO", "Wireframe Color" }, { 0x0047, 0x00b8, "SL", "Wireframe Attributes" }, { 0x0047, 0x00b9, "SL", "Wireframe Point Count" }, { 0x0047, 0x00ba, "SL", "Wireframe Timestamp" }, { 0x0047, 0x00bb, "SQ", "Wireframe Point List" }, { 0x0047, 0x00bc, "DS", "Wireframe Points Coordinates" }, { 0x0047, 0x00c0, "DS", "Volume Upper Left High Corner RAS" }, { 0x0047, 0x00c1, "DS", "Volume Slice To RAS Rotation Matrix" }, { 0x0047, 0x00c2, "DS", "Volume Upper Left High Corner TLOC" }, { 0x0047, 0x00d1, "OB", "Volume Segment List" }, { 0x0047, 0x00d2, "OB", "Volume Gradient List" }, { 0x0047, 0x00d3, "OB", "Volume Density List" }, { 0x0047, 0x00d4, "OB", "Volume Z Position List" }, { 0x0047, 0x00d5, "OB", "Volume Original Index List" }, { 0x0050, 0x0000, "UL", "Calibration Group Length" }, { 0x0050, 0x0004, "CS", "Calibration Object" }, { 0x0050, 0x0010, "SQ", "DeviceSequence" }, { 0x0050, 0x0014, "DS", "DeviceLength" }, { 0x0050, 0x0016, "DS", "DeviceDiameter" }, { 0x0050, 0x0017, "CS", "DeviceDiameterUnits" }, { 0x0050, 0x0018, "DS", "DeviceVolume" }, { 0x0050, 0x0019, "DS", "InterMarkerDistance" }, { 0x0050, 0x0020, "LO", "DeviceDescription" }, { 0x0050, 0x0030, "SQ", "CodedInterventionDeviceSequence" }, { 0x0051, 0x0010, "xs", "Image Text" }, { 0x0054, 0x0000, "UL", "Nuclear Acquisition Group Length" }, { 0x0054, 0x0010, "US", "Energy Window Vector" }, { 0x0054, 0x0011, "US", "Number of Energy Windows" }, { 0x0054, 0x0012, "SQ", "Energy Window Information Sequence" }, { 0x0054, 0x0013, "SQ", "Energy Window Range Sequence" }, { 0x0054, 0x0014, "DS", "Energy Window Lower Limit" }, { 0x0054, 0x0015, "DS", "Energy Window Upper Limit" }, { 0x0054, 0x0016, "SQ", "Radiopharmaceutical Information Sequence" }, { 0x0054, 0x0017, "IS", "Residual Syringe Counts" }, { 0x0054, 0x0018, "SH", "Energy Window Name" }, { 0x0054, 0x0020, "US", "Detector Vector" }, { 0x0054, 0x0021, "US", "Number of Detectors" }, { 0x0054, 0x0022, "SQ", "Detector Information Sequence" }, { 0x0054, 0x0030, "US", "Phase Vector" }, { 0x0054, 0x0031, "US", "Number of Phases" }, { 0x0054, 0x0032, "SQ", "Phase Information Sequence" }, { 0x0054, 0x0033, "US", "Number of Frames In Phase" }, { 0x0054, 0x0036, "IS", "Phase Delay" }, { 0x0054, 0x0038, "IS", "Pause Between Frames" }, { 0x0054, 0x0050, "US", "Rotation Vector" }, { 0x0054, 0x0051, "US", "Number of Rotations" }, { 0x0054, 0x0052, "SQ", "Rotation Information Sequence" }, { 0x0054, 0x0053, "US", "Number of Frames In Rotation" }, { 0x0054, 0x0060, "US", "R-R Interval Vector" }, { 0x0054, 0x0061, "US", "Number of R-R Intervals" }, { 0x0054, 0x0062, "SQ", "Gated Information Sequence" }, { 0x0054, 0x0063, "SQ", "Data Information Sequence" }, { 0x0054, 0x0070, "US", "Time Slot Vector" }, { 0x0054, 0x0071, "US", "Number of Time Slots" }, { 0x0054, 0x0072, "SQ", "Time Slot Information Sequence" }, { 0x0054, 0x0073, "DS", "Time Slot Time" }, { 0x0054, 0x0080, "US", "Slice Vector" }, { 0x0054, 0x0081, "US", "Number of Slices" }, { 0x0054, 0x0090, "US", "Angular View Vector" }, { 0x0054, 0x0100, "US", "Time Slice Vector" }, { 0x0054, 0x0101, "US", "Number Of Time Slices" }, { 0x0054, 0x0200, "DS", "Start Angle" }, { 0x0054, 0x0202, "CS", "Type of Detector Motion" }, { 0x0054, 0x0210, "IS", "Trigger Vector" }, { 0x0054, 0x0211, "US", "Number of Triggers in Phase" }, { 0x0054, 0x0220, "SQ", "View Code Sequence" }, { 0x0054, 0x0222, "SQ", "View Modifier Code Sequence" }, { 0x0054, 0x0300, "SQ", "Radionuclide Code Sequence" }, { 0x0054, 0x0302, "SQ", "Radiopharmaceutical Route Code Sequence" }, { 0x0054, 0x0304, "SQ", "Radiopharmaceutical Code Sequence" }, { 0x0054, 0x0306, "SQ", "Calibration Data Sequence" }, { 0x0054, 0x0308, "US", "Energy Window Number" }, { 0x0054, 0x0400, "SH", "Image ID" }, { 0x0054, 0x0410, "SQ", "Patient Orientation Code Sequence" }, { 0x0054, 0x0412, "SQ", "Patient Orientation Modifier Code Sequence" }, { 0x0054, 0x0414, "SQ", "Patient Gantry Relationship Code Sequence" }, { 0x0054, 0x1000, "CS", "Positron Emission Tomography Series Type" }, { 0x0054, 0x1001, "CS", "Positron Emission Tomography Units" }, { 0x0054, 0x1002, "CS", "Counts Source" }, { 0x0054, 0x1004, "CS", "Reprojection Method" }, { 0x0054, 0x1100, "CS", "Randoms Correction Method" }, { 0x0054, 0x1101, "LO", "Attenuation Correction Method" }, { 0x0054, 0x1102, "CS", "Decay Correction" }, { 0x0054, 0x1103, "LO", "Reconstruction Method" }, { 0x0054, 0x1104, "LO", "Detector Lines of Response Used" }, { 0x0054, 0x1105, "LO", "Scatter Correction Method" }, { 0x0054, 0x1200, "DS", "Axial Acceptance" }, { 0x0054, 0x1201, "IS", "Axial Mash" }, { 0x0054, 0x1202, "IS", "Transverse Mash" }, { 0x0054, 0x1203, "DS", "Detector Element Size" }, { 0x0054, 0x1210, "DS", "Coincidence Window Width" }, { 0x0054, 0x1220, "CS", "Secondary Counts Type" }, { 0x0054, 0x1300, "DS", "Frame Reference Time" }, { 0x0054, 0x1310, "IS", "Primary Prompts Counts Accumulated" }, { 0x0054, 0x1311, "IS", "Secondary Counts Accumulated" }, { 0x0054, 0x1320, "DS", "Slice Sensitivity Factor" }, { 0x0054, 0x1321, "DS", "Decay Factor" }, { 0x0054, 0x1322, "DS", "Dose Calibration Factor" }, { 0x0054, 0x1323, "DS", "Scatter Fraction Factor" }, { 0x0054, 0x1324, "DS", "Dead Time Factor" }, { 0x0054, 0x1330, "US", "Image Index" }, { 0x0054, 0x1400, "CS", "Counts Included" }, { 0x0054, 0x1401, "CS", "Dead Time Correction Flag" }, { 0x0055, 0x0046, "LT", "Current Ward" }, { 0x0058, 0x0000, "SQ", "?" }, { 0x0060, 0x3000, "SQ", "Histogram Sequence" }, { 0x0060, 0x3002, "US", "Histogram Number of Bins" }, { 0x0060, 0x3004, "xs", "Histogram First Bin Value" }, { 0x0060, 0x3006, "xs", "Histogram Last Bin Value" }, { 0x0060, 0x3008, "US", "Histogram Bin Width" }, { 0x0060, 0x3010, "LO", "Histogram Explanation" }, { 0x0060, 0x3020, "UL", "Histogram Data" }, { 0x0070, 0x0001, "SQ", "Graphic Annotation Sequence" }, { 0x0070, 0x0002, "CS", "Graphic Layer" }, { 0x0070, 0x0003, "CS", "Bounding Box Annotation Units" }, { 0x0070, 0x0004, "CS", "Anchor Point Annotation Units" }, { 0x0070, 0x0005, "CS", "Graphic Annotation Units" }, { 0x0070, 0x0006, "ST", "Unformatted Text Value" }, { 0x0070, 0x0008, "SQ", "Text Object Sequence" }, { 0x0070, 0x0009, "SQ", "Graphic Object Sequence" }, { 0x0070, 0x0010, "FL", "Bounding Box TLHC" }, { 0x0070, 0x0011, "FL", "Bounding Box BRHC" }, { 0x0070, 0x0014, "FL", "Anchor Point" }, { 0x0070, 0x0015, "CS", "Anchor Point Visibility" }, { 0x0070, 0x0020, "US", "Graphic Dimensions" }, { 0x0070, 0x0021, "US", "Number Of Graphic Points" }, { 0x0070, 0x0022, "FL", "Graphic Data" }, { 0x0070, 0x0023, "CS", "Graphic Type" }, { 0x0070, 0x0024, "CS", "Graphic Filled" }, { 0x0070, 0x0040, "IS", "Image Rotation" }, { 0x0070, 0x0041, "CS", "Image Horizontal Flip" }, { 0x0070, 0x0050, "US", "Displayed Area TLHC" }, { 0x0070, 0x0051, "US", "Displayed Area BRHC" }, { 0x0070, 0x0060, "SQ", "Graphic Layer Sequence" }, { 0x0070, 0x0062, "IS", "Graphic Layer Order" }, { 0x0070, 0x0066, "US", "Graphic Layer Recommended Display Value" }, { 0x0070, 0x0068, "LO", "Graphic Layer Description" }, { 0x0070, 0x0080, "CS", "Presentation Label" }, { 0x0070, 0x0081, "LO", "Presentation Description" }, { 0x0070, 0x0082, "DA", "Presentation Creation Date" }, { 0x0070, 0x0083, "TM", "Presentation Creation Time" }, { 0x0070, 0x0084, "PN", "Presentation Creator's Name" }, { 0x0070, 0x031a, "UI", "Fiducial UID" }, { 0x0087, 0x0010, "CS", "Media Type" }, { 0x0087, 0x0020, "CS", "Media Location" }, { 0x0087, 0x0050, "IS", "Estimated Retrieve Time" }, { 0x0088, 0x0000, "UL", "Storage Group Length" }, { 0x0088, 0x0130, "SH", "Storage Media FileSet ID" }, { 0x0088, 0x0140, "UI", "Storage Media FileSet UID" }, { 0x0088, 0x0200, "SQ", "Icon Image Sequence" }, { 0x0088, 0x0904, "LO", "Topic Title" }, { 0x0088, 0x0906, "ST", "Topic Subject" }, { 0x0088, 0x0910, "LO", "Topic Author" }, { 0x0088, 0x0912, "LO", "Topic Key Words" }, { 0x0095, 0x0001, "LT", "Examination Folder ID" }, { 0x0095, 0x0004, "UL", "Folder Reported Status" }, { 0x0095, 0x0005, "LT", "Folder Reporting Radiologist" }, { 0x0095, 0x0007, "LT", "SIENET ISA PLA" }, { 0x0099, 0x0002, "UL", "Data Object Attributes" }, { 0x00e1, 0x0001, "US", "Data Dictionary Version" }, { 0x00e1, 0x0014, "LT", "?" }, { 0x00e1, 0x0022, "DS", "?" }, { 0x00e1, 0x0023, "DS", "?" }, { 0x00e1, 0x0024, "LT", "?" }, { 0x00e1, 0x0025, "LT", "?" }, { 0x00e1, 0x0040, "SH", "Offset From CT MR Images" }, { 0x0193, 0x0002, "DS", "RIS Key" }, { 0x0307, 0x0001, "UN", "RIS Worklist IMGEF" }, { 0x0309, 0x0001, "UN", "RIS Report IMGEF" }, { 0x0601, 0x0000, "SH", "Implementation Version" }, { 0x0601, 0x0020, "DS", "Relative Table Position" }, { 0x0601, 0x0021, "DS", "Relative Table Height" }, { 0x0601, 0x0030, "SH", "Surview Direction" }, { 0x0601, 0x0031, "DS", "Surview Length" }, { 0x0601, 0x0050, "SH", "Image View Type" }, { 0x0601, 0x0070, "DS", "Batch Number" }, { 0x0601, 0x0071, "DS", "Batch Size" }, { 0x0601, 0x0072, "DS", "Batch Slice Number" }, { 0x1000, 0x0000, "xs", "?" }, { 0x1000, 0x0001, "US", "Run Length Triplet" }, { 0x1000, 0x0002, "US", "Huffman Table Size" }, { 0x1000, 0x0003, "US", "Huffman Table Triplet" }, { 0x1000, 0x0004, "US", "Shift Table Size" }, { 0x1000, 0x0005, "US", "Shift Table Triplet" }, { 0x1010, 0x0000, "xs", "?" }, { 0x1369, 0x0000, "US", "?" }, { 0x2000, 0x0000, "UL", "Film Session Group Length" }, { 0x2000, 0x0010, "IS", "Number of Copies" }, { 0x2000, 0x0020, "CS", "Print Priority" }, { 0x2000, 0x0030, "CS", "Medium Type" }, { 0x2000, 0x0040, "CS", "Film Destination" }, { 0x2000, 0x0050, "LO", "Film Session Label" }, { 0x2000, 0x0060, "IS", "Memory Allocation" }, { 0x2000, 0x0500, "SQ", "Referenced Film Box Sequence" }, { 0x2010, 0x0000, "UL", "Film Box Group Length" }, { 0x2010, 0x0010, "ST", "Image Display Format" }, { 0x2010, 0x0030, "CS", "Annotation Display Format ID" }, { 0x2010, 0x0040, "CS", "Film Orientation" }, { 0x2010, 0x0050, "CS", "Film Size ID" }, { 0x2010, 0x0060, "CS", "Magnification Type" }, { 0x2010, 0x0080, "CS", "Smoothing Type" }, { 0x2010, 0x0100, "CS", "Border Density" }, { 0x2010, 0x0110, "CS", "Empty Image Density" }, { 0x2010, 0x0120, "US", "Min Density" }, { 0x2010, 0x0130, "US", "Max Density" }, { 0x2010, 0x0140, "CS", "Trim" }, { 0x2010, 0x0150, "ST", "Configuration Information" }, { 0x2010, 0x0500, "SQ", "Referenced Film Session Sequence" }, { 0x2010, 0x0510, "SQ", "Referenced Image Box Sequence" }, { 0x2010, 0x0520, "SQ", "Referenced Basic Annotation Box Sequence" }, { 0x2020, 0x0000, "UL", "Image Box Group Length" }, { 0x2020, 0x0010, "US", "Image Box Position" }, { 0x2020, 0x0020, "CS", "Polarity" }, { 0x2020, 0x0030, "DS", "Requested Image Size" }, { 0x2020, 0x0110, "SQ", "Preformatted Grayscale Image Sequence" }, { 0x2020, 0x0111, "SQ", "Preformatted Color Image Sequence" }, { 0x2020, 0x0130, "SQ", "Referenced Image Overlay Box Sequence" }, { 0x2020, 0x0140, "SQ", "Referenced VOI LUT Box Sequence" }, { 0x2030, 0x0000, "UL", "Annotation Group Length" }, { 0x2030, 0x0010, "US", "Annotation Position" }, { 0x2030, 0x0020, "LO", "Text String" }, { 0x2040, 0x0000, "UL", "Overlay Box Group Length" }, { 0x2040, 0x0010, "SQ", "Referenced Overlay Plane Sequence" }, { 0x2040, 0x0011, "US", "Referenced Overlay Plane Groups" }, { 0x2040, 0x0060, "CS", "Overlay Magnification Type" }, { 0x2040, 0x0070, "CS", "Overlay Smoothing Type" }, { 0x2040, 0x0080, "CS", "Overlay Foreground Density" }, { 0x2040, 0x0090, "CS", "Overlay Mode" }, { 0x2040, 0x0100, "CS", "Threshold Density" }, { 0x2040, 0x0500, "SQ", "Referenced Overlay Image Box Sequence" }, { 0x2050, 0x0010, "SQ", "Presentation LUT Sequence" }, { 0x2050, 0x0020, "CS", "Presentation LUT Shape" }, { 0x2100, 0x0000, "UL", "Print Job Group Length" }, { 0x2100, 0x0020, "CS", "Execution Status" }, { 0x2100, 0x0030, "CS", "Execution Status Info" }, { 0x2100, 0x0040, "DA", "Creation Date" }, { 0x2100, 0x0050, "TM", "Creation Time" }, { 0x2100, 0x0070, "AE", "Originator" }, { 0x2100, 0x0500, "SQ", "Referenced Print Job Sequence" }, { 0x2110, 0x0000, "UL", "Printer Group Length" }, { 0x2110, 0x0010, "CS", "Printer Status" }, { 0x2110, 0x0020, "CS", "Printer Status Info" }, { 0x2110, 0x0030, "LO", "Printer Name" }, { 0x2110, 0x0099, "SH", "Print Queue ID" }, { 0x3002, 0x0002, "SH", "RT Image Label" }, { 0x3002, 0x0003, "LO", "RT Image Name" }, { 0x3002, 0x0004, "ST", "RT Image Description" }, { 0x3002, 0x000a, "CS", "Reported Values Origin" }, { 0x3002, 0x000c, "CS", "RT Image Plane" }, { 0x3002, 0x000e, "DS", "X-Ray Image Receptor Angle" }, { 0x3002, 0x0010, "DS", "RTImageOrientation" }, { 0x3002, 0x0011, "DS", "Image Plane Pixel Spacing" }, { 0x3002, 0x0012, "DS", "RT Image Position" }, { 0x3002, 0x0020, "SH", "Radiation Machine Name" }, { 0x3002, 0x0022, "DS", "Radiation Machine SAD" }, { 0x3002, 0x0024, "DS", "Radiation Machine SSD" }, { 0x3002, 0x0026, "DS", "RT Image SID" }, { 0x3002, 0x0028, "DS", "Source to Reference Object Distance" }, { 0x3002, 0x0029, "IS", "Fraction Number" }, { 0x3002, 0x0030, "SQ", "Exposure Sequence" }, { 0x3002, 0x0032, "DS", "Meterset Exposure" }, { 0x3004, 0x0001, "CS", "DVH Type" }, { 0x3004, 0x0002, "CS", "Dose Units" }, { 0x3004, 0x0004, "CS", "Dose Type" }, { 0x3004, 0x0006, "LO", "Dose Comment" }, { 0x3004, 0x0008, "DS", "Normalization Point" }, { 0x3004, 0x000a, "CS", "Dose Summation Type" }, { 0x3004, 0x000c, "DS", "GridFrame Offset Vector" }, { 0x3004, 0x000e, "DS", "Dose Grid Scaling" }, { 0x3004, 0x0010, "SQ", "RT Dose ROI Sequence" }, { 0x3004, 0x0012, "DS", "Dose Value" }, { 0x3004, 0x0040, "DS", "DVH Normalization Point" }, { 0x3004, 0x0042, "DS", "DVH Normalization Dose Value" }, { 0x3004, 0x0050, "SQ", "DVH Sequence" }, { 0x3004, 0x0052, "DS", "DVH Dose Scaling" }, { 0x3004, 0x0054, "CS", "DVH Volume Units" }, { 0x3004, 0x0056, "IS", "DVH Number of Bins" }, { 0x3004, 0x0058, "DS", "DVH Data" }, { 0x3004, 0x0060, "SQ", "DVH Referenced ROI Sequence" }, { 0x3004, 0x0062, "CS", "DVH ROI Contribution Type" }, { 0x3004, 0x0070, "DS", "DVH Minimum Dose" }, { 0x3004, 0x0072, "DS", "DVH Maximum Dose" }, { 0x3004, 0x0074, "DS", "DVH Mean Dose" }, { 0x3006, 0x0002, "SH", "Structure Set Label" }, { 0x3006, 0x0004, "LO", "Structure Set Name" }, { 0x3006, 0x0006, "ST", "Structure Set Description" }, { 0x3006, 0x0008, "DA", "Structure Set Date" }, { 0x3006, 0x0009, "TM", "Structure Set Time" }, { 0x3006, 0x0010, "SQ", "Referenced Frame of Reference Sequence" }, { 0x3006, 0x0012, "SQ", "RT Referenced Study Sequence" }, { 0x3006, 0x0014, "SQ", "RT Referenced Series Sequence" }, { 0x3006, 0x0016, "SQ", "Contour Image Sequence" }, { 0x3006, 0x0020, "SQ", "Structure Set ROI Sequence" }, { 0x3006, 0x0022, "IS", "ROI Number" }, { 0x3006, 0x0024, "UI", "Referenced Frame of Reference UID" }, { 0x3006, 0x0026, "LO", "ROI Name" }, { 0x3006, 0x0028, "ST", "ROI Description" }, { 0x3006, 0x002a, "IS", "ROI Display Color" }, { 0x3006, 0x002c, "DS", "ROI Volume" }, { 0x3006, 0x0030, "SQ", "RT Related ROI Sequence" }, { 0x3006, 0x0033, "CS", "RT ROI Relationship" }, { 0x3006, 0x0036, "CS", "ROI Generation Algorithm" }, { 0x3006, 0x0038, "LO", "ROI Generation Description" }, { 0x3006, 0x0039, "SQ", "ROI Contour Sequence" }, { 0x3006, 0x0040, "SQ", "Contour Sequence" }, { 0x3006, 0x0042, "CS", "Contour Geometric Type" }, { 0x3006, 0x0044, "DS", "Contour SlabT hickness" }, { 0x3006, 0x0045, "DS", "Contour Offset Vector" }, { 0x3006, 0x0046, "IS", "Number of Contour Points" }, { 0x3006, 0x0050, "DS", "Contour Data" }, { 0x3006, 0x0080, "SQ", "RT ROI Observations Sequence" }, { 0x3006, 0x0082, "IS", "Observation Number" }, { 0x3006, 0x0084, "IS", "Referenced ROI Number" }, { 0x3006, 0x0085, "SH", "ROI Observation Label" }, { 0x3006, 0x0086, "SQ", "RT ROI Identification Code Sequence" }, { 0x3006, 0x0088, "ST", "ROI Observation Description" }, { 0x3006, 0x00a0, "SQ", "Related RT ROI Observations Sequence" }, { 0x3006, 0x00a4, "CS", "RT ROI Interpreted Type" }, { 0x3006, 0x00a6, "PN", "ROI Interpreter" }, { 0x3006, 0x00b0, "SQ", "ROI Physical Properties Sequence" }, { 0x3006, 0x00b2, "CS", "ROI Physical Property" }, { 0x3006, 0x00b4, "DS", "ROI Physical Property Value" }, { 0x3006, 0x00c0, "SQ", "Frame of Reference Relationship Sequence" }, { 0x3006, 0x00c2, "UI", "Related Frame of Reference UID" }, { 0x3006, 0x00c4, "CS", "Frame of Reference Transformation Type" }, { 0x3006, 0x00c6, "DS", "Frame of Reference Transformation Matrix" }, { 0x3006, 0x00c8, "LO", "Frame of Reference Transformation Comment" }, { 0x300a, 0x0002, "SH", "RT Plan Label" }, { 0x300a, 0x0003, "LO", "RT Plan Name" }, { 0x300a, 0x0004, "ST", "RT Plan Description" }, { 0x300a, 0x0006, "DA", "RT Plan Date" }, { 0x300a, 0x0007, "TM", "RT Plan Time" }, { 0x300a, 0x0009, "LO", "Treatment Protocols" }, { 0x300a, 0x000a, "CS", "Treatment Intent" }, { 0x300a, 0x000b, "LO", "Treatment Sites" }, { 0x300a, 0x000c, "CS", "RT Plan Geometry" }, { 0x300a, 0x000e, "ST", "Prescription Description" }, { 0x300a, 0x0010, "SQ", "Dose ReferenceSequence" }, { 0x300a, 0x0012, "IS", "Dose ReferenceNumber" }, { 0x300a, 0x0014, "CS", "Dose Reference Structure Type" }, { 0x300a, 0x0016, "LO", "Dose ReferenceDescription" }, { 0x300a, 0x0018, "DS", "Dose Reference Point Coordinates" }, { 0x300a, 0x001a, "DS", "Nominal Prior Dose" }, { 0x300a, 0x0020, "CS", "Dose Reference Type" }, { 0x300a, 0x0021, "DS", "Constraint Weight" }, { 0x300a, 0x0022, "DS", "Delivery Warning Dose" }, { 0x300a, 0x0023, "DS", "Delivery Maximum Dose" }, { 0x300a, 0x0025, "DS", "Target Minimum Dose" }, { 0x300a, 0x0026, "DS", "Target Prescription Dose" }, { 0x300a, 0x0027, "DS", "Target Maximum Dose" }, { 0x300a, 0x0028, "DS", "Target Underdose Volume Fraction" }, { 0x300a, 0x002a, "DS", "Organ at Risk Full-volume Dose" }, { 0x300a, 0x002b, "DS", "Organ at Risk Limit Dose" }, { 0x300a, 0x002c, "DS", "Organ at Risk Maximum Dose" }, { 0x300a, 0x002d, "DS", "Organ at Risk Overdose Volume Fraction" }, { 0x300a, 0x0040, "SQ", "Tolerance Table Sequence" }, { 0x300a, 0x0042, "IS", "Tolerance Table Number" }, { 0x300a, 0x0043, "SH", "Tolerance Table Label" }, { 0x300a, 0x0044, "DS", "Gantry Angle Tolerance" }, { 0x300a, 0x0046, "DS", "Beam Limiting Device Angle Tolerance" }, { 0x300a, 0x0048, "SQ", "Beam Limiting Device Tolerance Sequence" }, { 0x300a, 0x004a, "DS", "Beam Limiting Device Position Tolerance" }, { 0x300a, 0x004c, "DS", "Patient Support Angle Tolerance" }, { 0x300a, 0x004e, "DS", "Table Top Eccentric Angle Tolerance" }, { 0x300a, 0x0051, "DS", "Table Top Vertical Position Tolerance" }, { 0x300a, 0x0052, "DS", "Table Top Longitudinal Position Tolerance" }, { 0x300a, 0x0053, "DS", "Table Top Lateral Position Tolerance" }, { 0x300a, 0x0055, "CS", "RT Plan Relationship" }, { 0x300a, 0x0070, "SQ", "Fraction Group Sequence" }, { 0x300a, 0x0071, "IS", "Fraction Group Number" }, { 0x300a, 0x0078, "IS", "Number of Fractions Planned" }, { 0x300a, 0x0079, "IS", "Number of Fractions Per Day" }, { 0x300a, 0x007a, "IS", "Repeat Fraction Cycle Length" }, { 0x300a, 0x007b, "LT", "Fraction Pattern" }, { 0x300a, 0x0080, "IS", "Number of Beams" }, { 0x300a, 0x0082, "DS", "Beam Dose Specification Point" }, { 0x300a, 0x0084, "DS", "Beam Dose" }, { 0x300a, 0x0086, "DS", "Beam Meterset" }, { 0x300a, 0x00a0, "IS", "Number of Brachy Application Setups" }, { 0x300a, 0x00a2, "DS", "Brachy Application Setup Dose Specification Point" }, { 0x300a, 0x00a4, "DS", "Brachy Application Setup Dose" }, { 0x300a, 0x00b0, "SQ", "Beam Sequence" }, { 0x300a, 0x00b2, "SH", "Treatment Machine Name " }, { 0x300a, 0x00b3, "CS", "Primary Dosimeter Unit" }, { 0x300a, 0x00b4, "DS", "Source-Axis Distance" }, { 0x300a, 0x00b6, "SQ", "Beam Limiting Device Sequence" }, { 0x300a, 0x00b8, "CS", "RT Beam Limiting Device Type" }, { 0x300a, 0x00ba, "DS", "Source to Beam Limiting Device Distance" }, { 0x300a, 0x00bc, "IS", "Number of Leaf/Jaw Pairs" }, { 0x300a, 0x00be, "DS", "Leaf Position Boundaries" }, { 0x300a, 0x00c0, "IS", "Beam Number" }, { 0x300a, 0x00c2, "LO", "Beam Name" }, { 0x300a, 0x00c3, "ST", "Beam Description" }, { 0x300a, 0x00c4, "CS", "Beam Type" }, { 0x300a, 0x00c6, "CS", "Radiation Type" }, { 0x300a, 0x00c8, "IS", "Reference Image Number" }, { 0x300a, 0x00ca, "SQ", "Planned Verification Image Sequence" }, { 0x300a, 0x00cc, "LO", "Imaging Device Specific Acquisition Parameters" }, { 0x300a, 0x00ce, "CS", "Treatment Delivery Type" }, { 0x300a, 0x00d0, "IS", "Number of Wedges" }, { 0x300a, 0x00d1, "SQ", "Wedge Sequence" }, { 0x300a, 0x00d2, "IS", "Wedge Number" }, { 0x300a, 0x00d3, "CS", "Wedge Type" }, { 0x300a, 0x00d4, "SH", "Wedge ID" }, { 0x300a, 0x00d5, "IS", "Wedge Angle" }, { 0x300a, 0x00d6, "DS", "Wedge Factor" }, { 0x300a, 0x00d8, "DS", "Wedge Orientation" }, { 0x300a, 0x00da, "DS", "Source to Wedge Tray Distance" }, { 0x300a, 0x00e0, "IS", "Number of Compensators" }, { 0x300a, 0x00e1, "SH", "Material ID" }, { 0x300a, 0x00e2, "DS", "Total Compensator Tray Factor" }, { 0x300a, 0x00e3, "SQ", "Compensator Sequence" }, { 0x300a, 0x00e4, "IS", "Compensator Number" }, { 0x300a, 0x00e5, "SH", "Compensator ID" }, { 0x300a, 0x00e6, "DS", "Source to Compensator Tray Distance" }, { 0x300a, 0x00e7, "IS", "Compensator Rows" }, { 0x300a, 0x00e8, "IS", "Compensator Columns" }, { 0x300a, 0x00e9, "DS", "Compensator Pixel Spacing" }, { 0x300a, 0x00ea, "DS", "Compensator Position" }, { 0x300a, 0x00eb, "DS", "Compensator Transmission Data" }, { 0x300a, 0x00ec, "DS", "Compensator Thickness Data" }, { 0x300a, 0x00ed, "IS", "Number of Boli" }, { 0x300a, 0x00f0, "IS", "Number of Blocks" }, { 0x300a, 0x00f2, "DS", "Total Block Tray Factor" }, { 0x300a, 0x00f4, "SQ", "Block Sequence" }, { 0x300a, 0x00f5, "SH", "Block Tray ID" }, { 0x300a, 0x00f6, "DS", "Source to Block Tray Distance" }, { 0x300a, 0x00f8, "CS", "Block Type" }, { 0x300a, 0x00fa, "CS", "Block Divergence" }, { 0x300a, 0x00fc, "IS", "Block Number" }, { 0x300a, 0x00fe, "LO", "Block Name" }, { 0x300a, 0x0100, "DS", "Block Thickness" }, { 0x300a, 0x0102, "DS", "Block Transmission" }, { 0x300a, 0x0104, "IS", "Block Number of Points" }, { 0x300a, 0x0106, "DS", "Block Data" }, { 0x300a, 0x0107, "SQ", "Applicator Sequence" }, { 0x300a, 0x0108, "SH", "Applicator ID" }, { 0x300a, 0x0109, "CS", "Applicator Type" }, { 0x300a, 0x010a, "LO", "Applicator Description" }, { 0x300a, 0x010c, "DS", "Cumulative Dose Reference Coefficient" }, { 0x300a, 0x010e, "DS", "Final Cumulative Meterset Weight" }, { 0x300a, 0x0110, "IS", "Number of Control Points" }, { 0x300a, 0x0111, "SQ", "Control Point Sequence" }, { 0x300a, 0x0112, "IS", "Control Point Index" }, { 0x300a, 0x0114, "DS", "Nominal Beam Energy" }, { 0x300a, 0x0115, "DS", "Dose Rate Set" }, { 0x300a, 0x0116, "SQ", "Wedge Position Sequence" }, { 0x300a, 0x0118, "CS", "Wedge Position" }, { 0x300a, 0x011a, "SQ", "Beam Limiting Device Position Sequence" }, { 0x300a, 0x011c, "DS", "Leaf Jaw Positions" }, { 0x300a, 0x011e, "DS", "Gantry Angle" }, { 0x300a, 0x011f, "CS", "Gantry Rotation Direction" }, { 0x300a, 0x0120, "DS", "Beam Limiting Device Angle" }, { 0x300a, 0x0121, "CS", "Beam Limiting Device Rotation Direction" }, { 0x300a, 0x0122, "DS", "Patient Support Angle" }, { 0x300a, 0x0123, "CS", "Patient Support Rotation Direction" }, { 0x300a, 0x0124, "DS", "Table Top Eccentric Axis Distance" }, { 0x300a, 0x0125, "DS", "Table Top Eccentric Angle" }, { 0x300a, 0x0126, "CS", "Table Top Eccentric Rotation Direction" }, { 0x300a, 0x0128, "DS", "Table Top Vertical Position" }, { 0x300a, 0x0129, "DS", "Table Top Longitudinal Position" }, { 0x300a, 0x012a, "DS", "Table Top Lateral Position" }, { 0x300a, 0x012c, "DS", "Isocenter Position" }, { 0x300a, 0x012e, "DS", "Surface Entry Point" }, { 0x300a, 0x0130, "DS", "Source to Surface Distance" }, { 0x300a, 0x0134, "DS", "Cumulative Meterset Weight" }, { 0x300a, 0x0180, "SQ", "Patient Setup Sequence" }, { 0x300a, 0x0182, "IS", "Patient Setup Number" }, { 0x300a, 0x0184, "LO", "Patient Additional Position" }, { 0x300a, 0x0190, "SQ", "Fixation Device Sequence" }, { 0x300a, 0x0192, "CS", "Fixation Device Type" }, { 0x300a, 0x0194, "SH", "Fixation Device Label" }, { 0x300a, 0x0196, "ST", "Fixation Device Description" }, { 0x300a, 0x0198, "SH", "Fixation Device Position" }, { 0x300a, 0x01a0, "SQ", "Shielding Device Sequence" }, { 0x300a, 0x01a2, "CS", "Shielding Device Type" }, { 0x300a, 0x01a4, "SH", "Shielding Device Label" }, { 0x300a, 0x01a6, "ST", "Shielding Device Description" }, { 0x300a, 0x01a8, "SH", "Shielding Device Position" }, { 0x300a, 0x01b0, "CS", "Setup Technique" }, { 0x300a, 0x01b2, "ST", "Setup TechniqueDescription" }, { 0x300a, 0x01b4, "SQ", "Setup Device Sequence" }, { 0x300a, 0x01b6, "CS", "Setup Device Type" }, { 0x300a, 0x01b8, "SH", "Setup Device Label" }, { 0x300a, 0x01ba, "ST", "Setup Device Description" }, { 0x300a, 0x01bc, "DS", "Setup Device Parameter" }, { 0x300a, 0x01d0, "ST", "Setup ReferenceDescription" }, { 0x300a, 0x01d2, "DS", "Table Top Vertical Setup Displacement" }, { 0x300a, 0x01d4, "DS", "Table Top Longitudinal Setup Displacement" }, { 0x300a, 0x01d6, "DS", "Table Top Lateral Setup Displacement" }, { 0x300a, 0x0200, "CS", "Brachy Treatment Technique" }, { 0x300a, 0x0202, "CS", "Brachy Treatment Type" }, { 0x300a, 0x0206, "SQ", "Treatment Machine Sequence" }, { 0x300a, 0x0210, "SQ", "Source Sequence" }, { 0x300a, 0x0212, "IS", "Source Number" }, { 0x300a, 0x0214, "CS", "Source Type" }, { 0x300a, 0x0216, "LO", "Source Manufacturer" }, { 0x300a, 0x0218, "DS", "Active Source Diameter" }, { 0x300a, 0x021a, "DS", "Active Source Length" }, { 0x300a, 0x0222, "DS", "Source Encapsulation Nominal Thickness" }, { 0x300a, 0x0224, "DS", "Source Encapsulation Nominal Transmission" }, { 0x300a, 0x0226, "LO", "Source IsotopeName" }, { 0x300a, 0x0228, "DS", "Source Isotope Half Life" }, { 0x300a, 0x022a, "DS", "Reference Air Kerma Rate" }, { 0x300a, 0x022c, "DA", "Air Kerma Rate Reference Date" }, { 0x300a, 0x022e, "TM", "Air Kerma Rate Reference Time" }, { 0x300a, 0x0230, "SQ", "Application Setup Sequence" }, { 0x300a, 0x0232, "CS", "Application Setup Type" }, { 0x300a, 0x0234, "IS", "Application Setup Number" }, { 0x300a, 0x0236, "LO", "Application Setup Name" }, { 0x300a, 0x0238, "LO", "Application Setup Manufacturer" }, { 0x300a, 0x0240, "IS", "Template Number" }, { 0x300a, 0x0242, "SH", "Template Type" }, { 0x300a, 0x0244, "LO", "Template Name" }, { 0x300a, 0x0250, "DS", "Total Reference Air Kerma" }, { 0x300a, 0x0260, "SQ", "Brachy Accessory Device Sequence" }, { 0x300a, 0x0262, "IS", "Brachy Accessory Device Number" }, { 0x300a, 0x0263, "SH", "Brachy Accessory Device ID" }, { 0x300a, 0x0264, "CS", "Brachy Accessory Device Type" }, { 0x300a, 0x0266, "LO", "Brachy Accessory Device Name" }, { 0x300a, 0x026a, "DS", "Brachy Accessory Device Nominal Thickness" }, { 0x300a, 0x026c, "DS", "Brachy Accessory Device Nominal Transmission" }, { 0x300a, 0x0280, "SQ", "Channel Sequence" }, { 0x300a, 0x0282, "IS", "Channel Number" }, { 0x300a, 0x0284, "DS", "Channel Length" }, { 0x300a, 0x0286, "DS", "Channel Total Time" }, { 0x300a, 0x0288, "CS", "Source Movement Type" }, { 0x300a, 0x028a, "IS", "Number of Pulses" }, { 0x300a, 0x028c, "DS", "Pulse Repetition Interval" }, { 0x300a, 0x0290, "IS", "Source Applicator Number" }, { 0x300a, 0x0291, "SH", "Source Applicator ID" }, { 0x300a, 0x0292, "CS", "Source Applicator Type" }, { 0x300a, 0x0294, "LO", "Source Applicator Name" }, { 0x300a, 0x0296, "DS", "Source Applicator Length" }, { 0x300a, 0x0298, "LO", "Source Applicator Manufacturer" }, { 0x300a, 0x029c, "DS", "Source Applicator Wall Nominal Thickness" }, { 0x300a, 0x029e, "DS", "Source Applicator Wall Nominal Transmission" }, { 0x300a, 0x02a0, "DS", "Source Applicator Step Size" }, { 0x300a, 0x02a2, "IS", "Transfer Tube Number" }, { 0x300a, 0x02a4, "DS", "Transfer Tube Length" }, { 0x300a, 0x02b0, "SQ", "Channel Shield Sequence" }, { 0x300a, 0x02b2, "IS", "Channel Shield Number" }, { 0x300a, 0x02b3, "SH", "Channel Shield ID" }, { 0x300a, 0x02b4, "LO", "Channel Shield Name" }, { 0x300a, 0x02b8, "DS", "Channel Shield Nominal Thickness" }, { 0x300a, 0x02ba, "DS", "Channel Shield Nominal Transmission" }, { 0x300a, 0x02c8, "DS", "Final Cumulative Time Weight" }, { 0x300a, 0x02d0, "SQ", "Brachy Control Point Sequence" }, { 0x300a, 0x02d2, "DS", "Control Point Relative Position" }, { 0x300a, 0x02d4, "DS", "Control Point 3D Position" }, { 0x300a, 0x02d6, "DS", "Cumulative Time Weight" }, { 0x300c, 0x0002, "SQ", "Referenced RT Plan Sequence" }, { 0x300c, 0x0004, "SQ", "Referenced Beam Sequence" }, { 0x300c, 0x0006, "IS", "Referenced Beam Number" }, { 0x300c, 0x0007, "IS", "Referenced Reference Image Number" }, { 0x300c, 0x0008, "DS", "Start Cumulative Meterset Weight" }, { 0x300c, 0x0009, "DS", "End Cumulative Meterset Weight" }, { 0x300c, 0x000a, "SQ", "Referenced Brachy Application Setup Sequence" }, { 0x300c, 0x000c, "IS", "Referenced Brachy Application Setup Number" }, { 0x300c, 0x000e, "IS", "Referenced Source Number" }, { 0x300c, 0x0020, "SQ", "Referenced Fraction Group Sequence" }, { 0x300c, 0x0022, "IS", "Referenced Fraction Group Number" }, { 0x300c, 0x0040, "SQ", "Referenced Verification Image Sequence" }, { 0x300c, 0x0042, "SQ", "Referenced Reference Image Sequence" }, { 0x300c, 0x0050, "SQ", "Referenced Dose Reference Sequence" }, { 0x300c, 0x0051, "IS", "Referenced Dose Reference Number" }, { 0x300c, 0x0055, "SQ", "Brachy Referenced Dose Reference Sequence" }, { 0x300c, 0x0060, "SQ", "Referenced Structure Set Sequence" }, { 0x300c, 0x006a, "IS", "Referenced Patient Setup Number" }, { 0x300c, 0x0080, "SQ", "Referenced Dose Sequence" }, { 0x300c, 0x00a0, "IS", "Referenced Tolerance Table Number" }, { 0x300c, 0x00b0, "SQ", "Referenced Bolus Sequence" }, { 0x300c, 0x00c0, "IS", "Referenced Wedge Number" }, { 0x300c, 0x00d0, "IS", "Referenced Compensato rNumber" }, { 0x300c, 0x00e0, "IS", "Referenced Block Number" }, { 0x300c, 0x00f0, "IS", "Referenced Control Point" }, { 0x300e, 0x0002, "CS", "Approval Status" }, { 0x300e, 0x0004, "DA", "Review Date" }, { 0x300e, 0x0005, "TM", "Review Time" }, { 0x300e, 0x0008, "PN", "Reviewer Name" }, { 0x4000, 0x0000, "UL", "Text Group Length" }, { 0x4000, 0x0010, "LT", "Text Arbitrary" }, { 0x4000, 0x4000, "LT", "Text Comments" }, { 0x4008, 0x0000, "UL", "Results Group Length" }, { 0x4008, 0x0040, "SH", "Results ID" }, { 0x4008, 0x0042, "LO", "Results ID Issuer" }, { 0x4008, 0x0050, "SQ", "Referenced Interpretation Sequence" }, { 0x4008, 0x00ff, "CS", "Report Production Status" }, { 0x4008, 0x0100, "DA", "Interpretation Recorded Date" }, { 0x4008, 0x0101, "TM", "Interpretation Recorded Time" }, { 0x4008, 0x0102, "PN", "Interpretation Recorder" }, { 0x4008, 0x0103, "LO", "Reference to Recorded Sound" }, { 0x4008, 0x0108, "DA", "Interpretation Transcription Date" }, { 0x4008, 0x0109, "TM", "Interpretation Transcription Time" }, { 0x4008, 0x010a, "PN", "Interpretation Transcriber" }, { 0x4008, 0x010b, "ST", "Interpretation Text" }, { 0x4008, 0x010c, "PN", "Interpretation Author" }, { 0x4008, 0x0111, "SQ", "Interpretation Approver Sequence" }, { 0x4008, 0x0112, "DA", "Interpretation Approval Date" }, { 0x4008, 0x0113, "TM", "Interpretation Approval Time" }, { 0x4008, 0x0114, "PN", "Physician Approving Interpretation" }, { 0x4008, 0x0115, "LT", "Interpretation Diagnosis Description" }, { 0x4008, 0x0117, "SQ", "InterpretationDiagnosis Code Sequence" }, { 0x4008, 0x0118, "SQ", "Results Distribution List Sequence" }, { 0x4008, 0x0119, "PN", "Distribution Name" }, { 0x4008, 0x011a, "LO", "Distribution Address" }, { 0x4008, 0x0200, "SH", "Interpretation ID" }, { 0x4008, 0x0202, "LO", "Interpretation ID Issuer" }, { 0x4008, 0x0210, "CS", "Interpretation Type ID" }, { 0x4008, 0x0212, "CS", "Interpretation Status ID" }, { 0x4008, 0x0300, "ST", "Impressions" }, { 0x4008, 0x4000, "ST", "Results Comments" }, { 0x4009, 0x0001, "LT", "Report ID" }, { 0x4009, 0x0020, "LT", "Report Status" }, { 0x4009, 0x0030, "DA", "Report Creation Date" }, { 0x4009, 0x0070, "LT", "Report Approving Physician" }, { 0x4009, 0x00e0, "LT", "Report Text" }, { 0x4009, 0x00e1, "LT", "Report Author" }, { 0x4009, 0x00e3, "LT", "Reporting Radiologist" }, { 0x5000, 0x0000, "UL", "Curve Group Length" }, { 0x5000, 0x0005, "US", "Curve Dimensions" }, { 0x5000, 0x0010, "US", "Number of Points" }, { 0x5000, 0x0020, "CS", "Type of Data" }, { 0x5000, 0x0022, "LO", "Curve Description" }, { 0x5000, 0x0030, "SH", "Axis Units" }, { 0x5000, 0x0040, "SH", "Axis Labels" }, { 0x5000, 0x0103, "US", "Data Value Representation" }, { 0x5000, 0x0104, "US", "Minimum Coordinate Value" }, { 0x5000, 0x0105, "US", "Maximum Coordinate Value" }, { 0x5000, 0x0106, "SH", "Curve Range" }, { 0x5000, 0x0110, "US", "Curve Data Descriptor" }, { 0x5000, 0x0112, "US", "Coordinate Start Value" }, { 0x5000, 0x0114, "US", "Coordinate Step Value" }, { 0x5000, 0x1001, "CS", "Curve Activation Layer" }, { 0x5000, 0x2000, "US", "Audio Type" }, { 0x5000, 0x2002, "US", "Audio Sample Format" }, { 0x5000, 0x2004, "US", "Number of Channels" }, { 0x5000, 0x2006, "UL", "Number of Samples" }, { 0x5000, 0x2008, "UL", "Sample Rate" }, { 0x5000, 0x200a, "UL", "Total Time" }, { 0x5000, 0x200c, "xs", "Audio Sample Data" }, { 0x5000, 0x200e, "LT", "Audio Comments" }, { 0x5000, 0x2500, "LO", "Curve Label" }, { 0x5000, 0x2600, "SQ", "CurveReferenced Overlay Sequence" }, { 0x5000, 0x2610, "US", "CurveReferenced Overlay Group" }, { 0x5000, 0x3000, "OW", "Curve Data" }, { 0x6000, 0x0000, "UL", "Overlay Group Length" }, { 0x6000, 0x0001, "US", "Gray Palette Color Lookup Table Descriptor" }, { 0x6000, 0x0002, "US", "Gray Palette Color Lookup Table Data" }, { 0x6000, 0x0010, "US", "Overlay Rows" }, { 0x6000, 0x0011, "US", "Overlay Columns" }, { 0x6000, 0x0012, "US", "Overlay Planes" }, { 0x6000, 0x0015, "IS", "Number of Frames in Overlay" }, { 0x6000, 0x0022, "LO", "Overlay Description" }, { 0x6000, 0x0040, "CS", "Overlay Type" }, { 0x6000, 0x0045, "CS", "Overlay Subtype" }, { 0x6000, 0x0050, "SS", "Overlay Origin" }, { 0x6000, 0x0051, "US", "Image Frame Origin" }, { 0x6000, 0x0052, "US", "Plane Origin" }, { 0x6000, 0x0060, "LO", "Overlay Compression Code" }, { 0x6000, 0x0061, "SH", "Overlay Compression Originator" }, { 0x6000, 0x0062, "SH", "Overlay Compression Label" }, { 0x6000, 0x0063, "SH", "Overlay Compression Description" }, { 0x6000, 0x0066, "AT", "Overlay Compression Step Pointers" }, { 0x6000, 0x0068, "US", "Overlay Repeat Interval" }, { 0x6000, 0x0069, "US", "Overlay Bits Grouped" }, { 0x6000, 0x0100, "US", "Overlay Bits Allocated" }, { 0x6000, 0x0102, "US", "Overlay Bit Position" }, { 0x6000, 0x0110, "LO", "Overlay Format" }, { 0x6000, 0x0200, "xs", "Overlay Location" }, { 0x6000, 0x0800, "LO", "Overlay Code Label" }, { 0x6000, 0x0802, "US", "Overlay Number of Tables" }, { 0x6000, 0x0803, "AT", "Overlay Code Table Location" }, { 0x6000, 0x0804, "US", "Overlay Bits For Code Word" }, { 0x6000, 0x1001, "CS", "Overlay Activation Layer" }, { 0x6000, 0x1100, "US", "Overlay Descriptor - Gray" }, { 0x6000, 0x1101, "US", "Overlay Descriptor - Red" }, { 0x6000, 0x1102, "US", "Overlay Descriptor - Green" }, { 0x6000, 0x1103, "US", "Overlay Descriptor - Blue" }, { 0x6000, 0x1200, "US", "Overlays - Gray" }, { 0x6000, 0x1201, "US", "Overlays - Red" }, { 0x6000, 0x1202, "US", "Overlays - Green" }, { 0x6000, 0x1203, "US", "Overlays - Blue" }, { 0x6000, 0x1301, "IS", "ROI Area" }, { 0x6000, 0x1302, "DS", "ROI Mean" }, { 0x6000, 0x1303, "DS", "ROI Standard Deviation" }, { 0x6000, 0x1500, "LO", "Overlay Label" }, { 0x6000, 0x3000, "OW", "Overlay Data" }, { 0x6000, 0x4000, "LT", "Overlay Comments" }, { 0x6001, 0x0000, "UN", "?" }, { 0x6001, 0x0010, "LO", "?" }, { 0x6001, 0x1010, "xs", "?" }, { 0x6001, 0x1030, "xs", "?" }, { 0x6021, 0x0000, "xs", "?" }, { 0x6021, 0x0010, "xs", "?" }, { 0x7001, 0x0010, "LT", "Dummy" }, { 0x7003, 0x0010, "LT", "Info" }, { 0x7005, 0x0010, "LT", "Dummy" }, { 0x7000, 0x0004, "ST", "TextAnnotation" }, { 0x7000, 0x0005, "IS", "Box" }, { 0x7000, 0x0007, "IS", "ArrowEnd" }, { 0x7001, 0x0001, "SL", "Private Group Length To End" }, { 0x7001, 0x0002, "OB", "Unknown" }, { 0x7001, 0x0011, "SL", "Private Creator" }, { 0x7001, 0x0021, "SL", "Private Creator" }, { 0x7001, 0x0022, "SQ", "Private Creator" }, { 0x7001, 0x0041, "SL", "Private Creator" }, { 0x7001, 0x0042, "SL", "Private Creator" }, { 0x7001, 0x0051, "SL", "Private Creator" }, { 0x7001, 0x0052, "SL", "Private Creator" }, { 0x7001, 0x0075, "SL", "Private Creator" }, { 0x7001, 0x0076, "SL", "Private Creator" }, { 0x7001, 0x0077, "OB", "Private Creator" }, { 0x7001, 0x0101, "SL", "Unknown" }, { 0x7001, 0x0121, "SL", "Unknown" }, { 0x7001, 0x0122, "SQ", "Unknown" }, { 0x7fe0, 0x0000, "UL", "Pixel Data Group Length" }, { 0x7fe0, 0x0010, "xs", "Pixel Data" }, { 0x7fe0, 0x0020, "OW", "Coefficients SDVN" }, { 0x7fe0, 0x0030, "OW", "Coefficients SDHN" }, { 0x7fe0, 0x0040, "OW", "Coefficients SDDN" }, { 0x7fe1, 0x0010, "xs", "Pixel Data" }, { 0x7f00, 0x0000, "UL", "Variable Pixel Data Group Length" }, { 0x7f00, 0x0010, "xs", "Variable Pixel Data" }, { 0x7f00, 0x0011, "US", "Variable Next Data Group" }, { 0x7f00, 0x0020, "OW", "Variable Coefficients SDVN" }, { 0x7f00, 0x0030, "OW", "Variable Coefficients SDHN" }, { 0x7f00, 0x0040, "OW", "Variable Coefficients SDDN" }, { 0x7fe1, 0x0000, "OB", "Binary Data" }, { 0x7fe3, 0x0000, "LT", "Image Graphics Format Code" }, { 0x7fe3, 0x0010, "OB", "Image Graphics" }, { 0x7fe3, 0x0020, "OB", "Image Graphics Dummy" }, { 0x7ff1, 0x0001, "US", "?" }, { 0x7ff1, 0x0002, "US", "?" }, { 0x7ff1, 0x0003, "xs", "?" }, { 0x7ff1, 0x0004, "IS", "?" }, { 0x7ff1, 0x0005, "US", "?" }, { 0x7ff1, 0x0007, "US", "?" }, { 0x7ff1, 0x0008, "US", "?" }, { 0x7ff1, 0x0009, "US", "?" }, { 0x7ff1, 0x000a, "LT", "?" }, { 0x7ff1, 0x000b, "US", "?" }, { 0x7ff1, 0x000c, "US", "?" }, { 0x7ff1, 0x000d, "US", "?" }, { 0x7ff1, 0x0010, "US", "?" }, { 0xfffc, 0xfffc, "OB", "Data Set Trailing Padding" }, { 0xfffe, 0xe000, "!!", "Item" }, { 0xfffe, 0xe00d, "!!", "Item Delimitation Item" }, { 0xfffe, 0xe0dd, "!!", "Sequence Delimitation Item" }, { 0xffff, 0xffff, "xs", (char *) NULL } }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s D C M % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsDCM() returns MagickTrue if the image format type, identified by the % magick string, is DCM. % % The format of the IsDCM method is: % % MagickBooleanType IsDCM(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsDCM(const unsigned char *magick,const size_t length) { if (length < 132) return(MagickFalse); if (LocaleNCompare((char *) (magick+128),"DICM",4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d D C M I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadDCMImage() reads a Digital Imaging and Communications in Medicine % (DICOM) file and returns it. It allocates the memory necessary for the % new Image structure and returns a pointer to the new image. % % The format of the ReadDCMImage method is: % % Image *ReadDCMImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ typedef struct _DCMInfo { MagickBooleanType polarity; Quantum *scale; size_t bits_allocated, bytes_per_pixel, depth, mask, max_value, samples_per_pixel, signed_data, significant_bits; MagickBooleanType rescale; double rescale_intercept, rescale_slope, window_center, window_width; } DCMInfo; typedef struct _DCMStreamInfo { size_t remaining, segment_count; ssize_t segments[15]; size_t offset_count; ssize_t *offsets; ssize_t count; int byte; } DCMStreamInfo; static int ReadDCMByte(DCMStreamInfo *stream_info,Image *image) { if (image->compression != RLECompression) return(ReadBlobByte(image)); if (stream_info->count == 0) { int byte; ssize_t count; if (stream_info->remaining <= 2) stream_info->remaining=0; else stream_info->remaining-=2; count=(ssize_t) ReadBlobByte(image); byte=ReadBlobByte(image); if (count == 128) return(0); else if (count < 128) { /* Literal bytes. */ stream_info->count=count; stream_info->byte=(-1); return(byte); } else { /* Repeated bytes. */ stream_info->count=256-count; stream_info->byte=byte; return(byte); } } stream_info->count--; if (stream_info->byte >= 0) return(stream_info->byte); if (stream_info->remaining > 0) stream_info->remaining--; return(ReadBlobByte(image)); } static unsigned short ReadDCMShort(DCMStreamInfo *stream_info,Image *image) { int shift, byte; unsigned short value; if (image->compression != RLECompression) return(ReadBlobLSBShort(image)); shift=image->depth < 16 ? 4 : 8; value=(unsigned short) ReadDCMByte(stream_info,image); byte=ReadDCMByte(stream_info,image); if (byte < 0) return(0); value|=(unsigned short) (byte << shift); return(value); } static signed short ReadDCMSignedShort(DCMStreamInfo *stream_info,Image *image) { union { unsigned short unsigned_value; signed short signed_value; } quantum; quantum.unsigned_value=ReadDCMShort(stream_info,image); return(quantum.signed_value); } static MagickBooleanType ReadDCMPixels(Image *image,DCMInfo *info, DCMStreamInfo *stream_info,MagickBooleanType first_segment, ExceptionInfo *exception) { int byte, index; MagickBooleanType status; PixelPacket pixel; register ssize_t i, x; register Quantum *q; ssize_t y; /* Convert DCM Medical image to pixel packets. */ byte=0; i=0; status=MagickTrue; (void) memset(&pixel,0,sizeof(pixel)); for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (info->samples_per_pixel == 1) { int pixel_value; if (info->bytes_per_pixel == 1) pixel_value=info->polarity != MagickFalse ? ((int) info->max_value-ReadDCMByte(stream_info,image)) : ReadDCMByte(stream_info,image); else if ((info->bits_allocated != 12) || (info->significant_bits != 12)) { if (info->signed_data) pixel_value=ReadDCMSignedShort(stream_info,image); else pixel_value=(int) ReadDCMShort(stream_info,image); if (info->polarity != MagickFalse) pixel_value=(int)info->max_value-pixel_value; } else { if ((i & 0x01) != 0) pixel_value=(ReadDCMByte(stream_info,image) << 8) | byte; else { pixel_value=ReadDCMSignedShort(stream_info,image); byte=(int) (pixel_value & 0x0f); pixel_value>>=4; } i++; } if (info->signed_data == 1) pixel_value-=32767; index=pixel_value; if (info->rescale != MagickFalse) { double scaled_value; scaled_value=pixel_value*info->rescale_slope+ info->rescale_intercept; index=(int) scaled_value; if (info->window_width != 0) { double window_max, window_min; window_min=ceil(info->window_center- (info->window_width-1.0)/2.0-0.5); window_max=floor(info->window_center+ (info->window_width-1.0)/2.0+0.5); if (scaled_value <= window_min) index=0; else if (scaled_value > window_max) index=(int) info->max_value; else index=(int) (info->max_value*(((scaled_value- info->window_center-0.5)/(info->window_width-1))+0.5)); } } index&=info->mask; index=(int) ConstrainColormapIndex(image,(ssize_t) index,exception); if (first_segment) SetPixelIndex(image,(Quantum) index,q); else SetPixelIndex(image,(Quantum) (((size_t) index) | (((size_t) GetPixelIndex(image,q)) << 8)),q); pixel.red=(unsigned int) image->colormap[index].red; pixel.green=(unsigned int) image->colormap[index].green; pixel.blue=(unsigned int) image->colormap[index].blue; } else { if (info->bytes_per_pixel == 1) { pixel.red=(unsigned int) ReadDCMByte(stream_info,image); pixel.green=(unsigned int) ReadDCMByte(stream_info,image); pixel.blue=(unsigned int) ReadDCMByte(stream_info,image); } else { pixel.red=ReadDCMShort(stream_info,image); pixel.green=ReadDCMShort(stream_info,image); pixel.blue=ReadDCMShort(stream_info,image); } pixel.red&=info->mask; pixel.green&=info->mask; pixel.blue&=info->mask; if (info->scale != (Quantum *) NULL) { if ((MagickSizeType) pixel.red <= GetQuantumRange(info->depth)) pixel.red=info->scale[pixel.red]; if ((MagickSizeType) pixel.green <= GetQuantumRange(info->depth)) pixel.green=info->scale[pixel.green]; if ((MagickSizeType) pixel.blue <= GetQuantumRange(info->depth)) pixel.blue=info->scale[pixel.blue]; } } if (first_segment != MagickFalse) { SetPixelRed(image,(Quantum) pixel.red,q); SetPixelGreen(image,(Quantum) pixel.green,q); SetPixelBlue(image,(Quantum) pixel.blue,q); } else { SetPixelRed(image,(Quantum) (((size_t) pixel.red) | (((size_t) GetPixelRed(image,q)) << 8)),q); SetPixelGreen(image,(Quantum) (((size_t) pixel.green) | (((size_t) GetPixelGreen(image,q)) << 8)),q); SetPixelBlue(image,(Quantum) (((size_t) pixel.blue) | (((size_t) GetPixelBlue(image,q)) << 8)),q); } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } return(status); } static Image *ReadDCMImage(const ImageInfo *image_info,ExceptionInfo *exception) { #define ThrowDCMException(exception,message) \ { \ if (info.scale != (Quantum *) NULL) \ info.scale=(Quantum *) RelinquishMagickMemory(info.scale); \ if (data != (unsigned char *) NULL) \ data=(unsigned char *) RelinquishMagickMemory(data); \ if (graymap != (int *) NULL) \ graymap=(int *) RelinquishMagickMemory(graymap); \ if (bluemap != (int *) NULL) \ bluemap=(int *) RelinquishMagickMemory(bluemap); \ if (greenmap != (int *) NULL) \ greenmap=(int *) RelinquishMagickMemory(greenmap); \ if (redmap != (int *) NULL) \ redmap=(int *) RelinquishMagickMemory(redmap); \ if (stream_info->offsets != (ssize_t *) NULL) \ stream_info->offsets=(ssize_t *) RelinquishMagickMemory( \ stream_info->offsets); \ if (stream_info != (DCMStreamInfo *) NULL) \ stream_info=(DCMStreamInfo *) RelinquishMagickMemory(stream_info); \ ThrowReaderException((exception),(message)); \ } char explicit_vr[MagickPathExtent], implicit_vr[MagickPathExtent], magick[MagickPathExtent], photometric[MagickPathExtent]; DCMInfo info; DCMStreamInfo *stream_info; Image *image; int *bluemap, datum, *greenmap, *graymap, *redmap; MagickBooleanType explicit_file, explicit_retry, use_explicit; MagickOffsetType offset; register unsigned char *p; register ssize_t i; size_t colors, height, length, number_scenes, quantum, status, width; ssize_t count, scene; unsigned char *data; unsigned short group, element; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } image->depth=8UL; image->endian=LSBEndian; /* Read DCM preamble. */ (void) memset(&info,0,sizeof(info)); data=(unsigned char *) NULL; graymap=(int *) NULL; redmap=(int *) NULL; greenmap=(int *) NULL; bluemap=(int *) NULL; stream_info=(DCMStreamInfo *) AcquireMagickMemory(sizeof(*stream_info)); if (stream_info == (DCMStreamInfo *) NULL) ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(stream_info,0,sizeof(*stream_info)); count=ReadBlob(image,128,(unsigned char *) magick); if (count != 128) ThrowDCMException(CorruptImageError,"ImproperImageHeader"); count=ReadBlob(image,4,(unsigned char *) magick); if ((count != 4) || (LocaleNCompare(magick,"DICM",4) != 0)) { offset=SeekBlob(image,0L,SEEK_SET); if (offset < 0) ThrowDCMException(CorruptImageError,"ImproperImageHeader"); } /* Read DCM Medical image. */ (void) CopyMagickString(photometric,"MONOCHROME1 ",MagickPathExtent); info.bits_allocated=8; info.bytes_per_pixel=1; info.depth=8; info.mask=0xffff; info.max_value=255UL; info.samples_per_pixel=1; info.signed_data=(~0UL); info.rescale_slope=1.0; data=(unsigned char *) NULL; element=0; explicit_vr[2]='\0'; explicit_file=MagickFalse; colors=0; redmap=(int *) NULL; greenmap=(int *) NULL; bluemap=(int *) NULL; graymap=(int *) NULL; height=0; number_scenes=1; use_explicit=MagickFalse; explicit_retry = MagickFalse; width=0; while (TellBlob(image) < (MagickOffsetType) GetBlobSize(image)) { for (group=0; (group != 0x7FE0) || (element != 0x0010) ; ) { /* Read a group. */ image->offset=(ssize_t) TellBlob(image); group=ReadBlobLSBShort(image); element=ReadBlobLSBShort(image); if ((group == 0xfffc) && (element == 0xfffc)) break; if ((group != 0x0002) && (image->endian == MSBEndian)) { group=(unsigned short) ((group << 8) | ((group >> 8) & 0xFF)); element=(unsigned short) ((element << 8) | ((element >> 8) & 0xFF)); } quantum=0; /* Find corresponding VR for this group and element. */ for (i=0; dicom_info[i].group < 0xffff; i++) if ((group == dicom_info[i].group) && (element == dicom_info[i].element)) break; (void) CopyMagickString(implicit_vr,dicom_info[i].vr,MagickPathExtent); count=ReadBlob(image,2,(unsigned char *) explicit_vr); if (count != 2) ThrowDCMException(CorruptImageError,"ImproperImageHeader"); /* Check for "explicitness", but meta-file headers always explicit. */ if ((explicit_file == MagickFalse) && (group != 0x0002)) explicit_file=(isupper((unsigned char) *explicit_vr) != MagickFalse) && (isupper((unsigned char) *(explicit_vr+1)) != MagickFalse) ? MagickTrue : MagickFalse; use_explicit=((group == 0x0002) && (explicit_retry == MagickFalse)) || (explicit_file != MagickFalse) ? MagickTrue : MagickFalse; if ((use_explicit != MagickFalse) && (strncmp(implicit_vr,"xs",2) == 0)) (void) CopyMagickString(implicit_vr,explicit_vr,MagickPathExtent); if ((use_explicit == MagickFalse) || (strncmp(implicit_vr,"!!",2) == 0)) { offset=SeekBlob(image,(MagickOffsetType) -2,SEEK_CUR); if (offset < 0) ThrowDCMException(CorruptImageError,"ImproperImageHeader"); quantum=4; } else { /* Assume explicit type. */ quantum=2; if ((strncmp(explicit_vr,"OB",2) == 0) || (strncmp(explicit_vr,"UN",2) == 0) || (strncmp(explicit_vr,"OW",2) == 0) || (strncmp(explicit_vr,"SQ",2) == 0)) { (void) ReadBlobLSBShort(image); quantum=4; } } datum=0; if (quantum == 4) { if (group == 0x0002) datum=ReadBlobLSBSignedLong(image); else datum=ReadBlobSignedLong(image); } else if (quantum == 2) { if (group == 0x0002) datum=ReadBlobLSBSignedShort(image); else datum=ReadBlobSignedShort(image); } quantum=0; length=1; if (datum != 0) { if ((strncmp(implicit_vr,"OW",2) == 0) || (strncmp(implicit_vr,"SS",2) == 0) || (strncmp(implicit_vr,"US",2) == 0)) quantum=2; else if ((strncmp(implicit_vr,"FL",2) == 0) || (strncmp(implicit_vr,"OF",2) == 0) || (strncmp(implicit_vr,"SL",2) == 0) || (strncmp(implicit_vr,"UL",2) == 0)) quantum=4; else if (strncmp(implicit_vr,"FD",2) == 0) quantum=8; else quantum=1; if (datum != ~0) length=(size_t) datum/quantum; else { /* Sequence and item of undefined length. */ quantum=0; length=0; } } if (image_info->verbose != MagickFalse) { /* Display Dicom info. */ if (use_explicit == MagickFalse) explicit_vr[0]='\0'; for (i=0; dicom_info[i].description != (char *) NULL; i++) if ((group == dicom_info[i].group) && (element == dicom_info[i].element)) break; (void) FormatLocaleFile(stdout,"0x%04lX %4ld %s-%s (0x%04lx,0x%04lx)", (unsigned long) image->offset,(long) length,implicit_vr,explicit_vr, (unsigned long) group,(unsigned long) element); if (dicom_info[i].description != (char *) NULL) (void) FormatLocaleFile(stdout," %s",dicom_info[i].description); (void) FormatLocaleFile(stdout,": "); } if ((group == 0x7FE0) && (element == 0x0010)) { if (image_info->verbose != MagickFalse) (void) FormatLocaleFile(stdout,"\n"); break; } /* Allocate space and read an array. */ data=(unsigned char *) NULL; if ((length == 1) && (quantum == 1)) datum=ReadBlobByte(image); else if ((length == 1) && (quantum == 2)) { if (group == 0x0002) datum=ReadBlobLSBSignedShort(image); else datum=ReadBlobSignedShort(image); } else if ((length == 1) && (quantum == 4)) { if (group == 0x0002) datum=ReadBlobLSBSignedLong(image); else datum=ReadBlobSignedLong(image); } else if ((quantum != 0) && (length != 0)) { if (length > (size_t) GetBlobSize(image)) ThrowDCMException(CorruptImageError, "InsufficientImageDataInFile"); if (~length >= 1) data=(unsigned char *) AcquireQuantumMemory(length+1,quantum* sizeof(*data)); if (data == (unsigned char *) NULL) ThrowDCMException(ResourceLimitError, "MemoryAllocationFailed"); count=ReadBlob(image,(size_t) quantum*length,data); if (count != (ssize_t) (quantum*length)) { if (image_info->verbose != MagickFalse) (void) FormatLocaleFile(stdout,"count=%d quantum=%d " "length=%d group=%d\n",(int) count,(int) quantum,(int) length,(int) group); ThrowDCMException(CorruptImageError, "InsufficientImageDataInFile"); } data[length*quantum]='\0'; } if ((((unsigned int) group << 16) | element) == 0xFFFEE0DD) { if (data != (unsigned char *) NULL) data=(unsigned char *) RelinquishMagickMemory(data); continue; } switch (group) { case 0x0002: { switch (element) { case 0x0010: { char transfer_syntax[MagickPathExtent]; /* Transfer Syntax. */ if ((datum == 0) && (explicit_retry == MagickFalse)) { explicit_retry=MagickTrue; (void) SeekBlob(image,(MagickOffsetType) 0,SEEK_SET); group=0; element=0; if (image_info->verbose != MagickFalse) (void) FormatLocaleFile(stdout, "Corrupted image - trying explicit format\n"); break; } *transfer_syntax='\0'; if (data != (unsigned char *) NULL) (void) CopyMagickString(transfer_syntax,(char *) data, MagickPathExtent); if (image_info->verbose != MagickFalse) (void) FormatLocaleFile(stdout,"transfer_syntax=%s\n", (const char *) transfer_syntax); if (strncmp(transfer_syntax,"1.2.840.10008.1.2",17) == 0) { int subtype, type; type=1; subtype=0; if (strlen(transfer_syntax) > 17) { count=(ssize_t) sscanf(transfer_syntax+17,".%d.%d",&type, &subtype); if (count < 1) ThrowDCMException(CorruptImageError, "ImproperImageHeader"); } switch (type) { case 1: { image->endian=LSBEndian; break; } case 2: { image->endian=MSBEndian; break; } case 4: { if ((subtype >= 80) && (subtype <= 81)) image->compression=JPEGCompression; else if ((subtype >= 90) && (subtype <= 93)) image->compression=JPEG2000Compression; else image->compression=JPEGCompression; break; } case 5: { image->compression=RLECompression; break; } } } break; } default: break; } break; } case 0x0028: { switch (element) { case 0x0002: { /* Samples per pixel. */ info.samples_per_pixel=(size_t) datum; if ((info.samples_per_pixel == 0) || (info.samples_per_pixel > 4)) ThrowDCMException(CorruptImageError,"ImproperImageHeader"); break; } case 0x0004: { /* Photometric interpretation. */ if (data == (unsigned char *) NULL) break; for (i=0; i < (ssize_t) MagickMin(length,MagickPathExtent-1); i++) photometric[i]=(char) data[i]; photometric[i]='\0'; info.polarity=LocaleCompare(photometric,"MONOCHROME1 ") == 0 ? MagickTrue : MagickFalse; break; } case 0x0006: { /* Planar configuration. */ if (datum == 1) image->interlace=PlaneInterlace; break; } case 0x0008: { /* Number of frames. */ if (data == (unsigned char *) NULL) break; number_scenes=StringToUnsignedLong((char *) data); break; } case 0x0010: { /* Image rows. */ height=(size_t) datum; break; } case 0x0011: { /* Image columns. */ width=(size_t) datum; break; } case 0x0100: { /* Bits allocated. */ info.bits_allocated=(size_t) datum; info.bytes_per_pixel=1; if (datum > 8) info.bytes_per_pixel=2; info.depth=info.bits_allocated; if ((info.depth == 0) || (info.depth > 32)) ThrowDCMException(CorruptImageError,"ImproperImageHeader"); info.max_value=(1UL << info.bits_allocated)-1; image->depth=info.depth; break; } case 0x0101: { /* Bits stored. */ info.significant_bits=(size_t) datum; info.bytes_per_pixel=1; if (info.significant_bits > 8) info.bytes_per_pixel=2; info.depth=info.significant_bits; if ((info.depth == 0) || (info.depth > 16)) ThrowDCMException(CorruptImageError,"ImproperImageHeader"); info.max_value=(1UL << info.significant_bits)-1; info.mask=(size_t) GetQuantumRange(info.significant_bits); image->depth=info.depth; break; } case 0x0102: { /* High bit. */ break; } case 0x0103: { /* Pixel representation. */ info.signed_data=(size_t) datum; break; } case 0x1050: { /* Visible pixel range: center. */ if (data != (unsigned char *) NULL) info.window_center=StringToDouble((char *) data,(char **) NULL); break; } case 0x1051: { /* Visible pixel range: width. */ if (data != (unsigned char *) NULL) info.window_width=StringToDouble((char *) data,(char **) NULL); break; } case 0x1052: { /* Rescale intercept */ if (data != (unsigned char *) NULL) info.rescale_intercept=StringToDouble((char *) data, (char **) NULL); break; } case 0x1053: { /* Rescale slope */ if (data != (unsigned char *) NULL) info.rescale_slope=StringToDouble((char *) data,(char **) NULL); break; } case 0x1200: case 0x3006: { /* Populate graymap. */ if (data == (unsigned char *) NULL) break; colors=(size_t) (length/info.bytes_per_pixel); datum=(int) colors; if (graymap != (int *) NULL) graymap=(int *) RelinquishMagickMemory(graymap); graymap=(int *) AcquireQuantumMemory(MagickMax(colors,65536), sizeof(*graymap)); if (graymap == (int *) NULL) ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(graymap,0,MagickMax(colors,65536)* sizeof(*graymap)); for (i=0; i < (ssize_t) colors; i++) if (info.bytes_per_pixel == 1) graymap[i]=(int) data[i]; else graymap[i]=(int) ((short *) data)[i]; break; } case 0x1201: { unsigned short index; /* Populate redmap. */ if (data == (unsigned char *) NULL) break; colors=(size_t) (length/2); datum=(int) colors; if (redmap != (int *) NULL) redmap=(int *) RelinquishMagickMemory(redmap); redmap=(int *) AcquireQuantumMemory(MagickMax(colors,65536), sizeof(*redmap)); if (redmap == (int *) NULL) ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(redmap,0,MagickMax(colors,65536)* sizeof(*redmap)); p=data; for (i=0; i < (ssize_t) colors; i++) { if (image->endian == MSBEndian) index=(unsigned short) ((*p << 8) | *(p+1)); else index=(unsigned short) (*p | (*(p+1) << 8)); redmap[i]=(int) index; p+=2; } break; } case 0x1202: { unsigned short index; /* Populate greenmap. */ if (data == (unsigned char *) NULL) break; colors=(size_t) (length/2); datum=(int) colors; if (greenmap != (int *) NULL) greenmap=(int *) RelinquishMagickMemory(greenmap); greenmap=(int *) AcquireQuantumMemory(MagickMax(colors,65536), sizeof(*greenmap)); if (greenmap == (int *) NULL) ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(greenmap,0,MagickMax(colors,65536)* sizeof(*greenmap)); p=data; for (i=0; i < (ssize_t) colors; i++) { if (image->endian == MSBEndian) index=(unsigned short) ((*p << 8) | *(p+1)); else index=(unsigned short) (*p | (*(p+1) << 8)); greenmap[i]=(int) index; p+=2; } break; } case 0x1203: { unsigned short index; /* Populate bluemap. */ if (data == (unsigned char *) NULL) break; colors=(size_t) (length/2); datum=(int) colors; if (bluemap != (int *) NULL) bluemap=(int *) RelinquishMagickMemory(bluemap); bluemap=(int *) AcquireQuantumMemory(MagickMax(colors,65536), sizeof(*bluemap)); if (bluemap == (int *) NULL) ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(bluemap,0,MagickMax(colors,65536)* sizeof(*bluemap)); p=data; for (i=0; i < (ssize_t) colors; i++) { if (image->endian == MSBEndian) index=(unsigned short) ((*p << 8) | *(p+1)); else index=(unsigned short) (*p | (*(p+1) << 8)); bluemap[i]=(int) index; p+=2; } break; } default: break; } break; } case 0x2050: { switch (element) { case 0x0020: { if ((data != (unsigned char *) NULL) && (strncmp((char *) data,"INVERSE",7) == 0)) info.polarity=MagickTrue; break; } default: break; } break; } default: break; } if (data != (unsigned char *) NULL) { char *attribute; for (i=0; dicom_info[i].description != (char *) NULL; i++) if ((group == dicom_info[i].group) && (element == dicom_info[i].element)) break; if (dicom_info[i].description != (char *) NULL) { attribute=AcquireString("dcm:"); (void) ConcatenateString(&attribute,dicom_info[i].description); for (i=0; i < (ssize_t) MagickMax(length,4); i++) if (isprint((int) data[i]) == MagickFalse) break; if ((i == (ssize_t) length) || (length > 4)) { (void) SubstituteString(&attribute," ",""); (void) SetImageProperty(image,attribute,(char *) data, exception); } attribute=DestroyString(attribute); } } if (image_info->verbose != MagickFalse) { if (data == (unsigned char *) NULL) (void) FormatLocaleFile(stdout,"%d\n",datum); else { /* Display group data. */ for (i=0; i < (ssize_t) MagickMax(length,4); i++) if (isprint((int) data[i]) == MagickFalse) break; if ((i != (ssize_t) length) && (length <= 4)) { ssize_t j; datum=0; for (j=(ssize_t) length-1; j >= 0; j--) datum=(256*datum+data[j]); (void) FormatLocaleFile(stdout,"%d",datum); } else for (i=0; i < (ssize_t) length; i++) if (isprint((int) data[i]) != MagickFalse) (void) FormatLocaleFile(stdout,"%c",data[i]); else (void) FormatLocaleFile(stdout,"%c",'.'); (void) FormatLocaleFile(stdout,"\n"); } } if (data != (unsigned char *) NULL) data=(unsigned char *) RelinquishMagickMemory(data); if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); break; } } if ((group == 0xfffc) && (element == 0xfffc)) { Image *last; last=RemoveLastImageFromList(&image); if (last != (Image *) NULL) last=DestroyImage(last); break; } if ((width == 0) || (height == 0)) ThrowDCMException(CorruptImageError,"ImproperImageHeader"); image->columns=(size_t) width; image->rows=(size_t) height; if (info.signed_data == 0xffff) info.signed_data=(size_t) (info.significant_bits == 16 ? 1 : 0); if ((image->compression == JPEGCompression) || (image->compression == JPEG2000Compression)) { Image *images; ImageInfo *read_info; int c; /* Read offset table. */ for (i=0; i < (ssize_t) stream_info->remaining; i++) if (ReadBlobByte(image) == EOF) break; (void) (((ssize_t) ReadBlobLSBShort(image) << 16) | ReadBlobLSBShort(image)); length=(size_t) ReadBlobLSBLong(image); if (length > (size_t) GetBlobSize(image)) ThrowDCMException(CorruptImageError,"InsufficientImageDataInFile"); stream_info->offset_count=length >> 2; if (stream_info->offset_count != 0) { if (stream_info->offsets != (ssize_t *) NULL) stream_info->offsets=(ssize_t *) RelinquishMagickMemory( stream_info->offsets); stream_info->offsets=(ssize_t *) AcquireQuantumMemory( stream_info->offset_count,sizeof(*stream_info->offsets)); if (stream_info->offsets == (ssize_t *) NULL) ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) stream_info->offset_count; i++) stream_info->offsets[i]=(ssize_t) ReadBlobLSBSignedLong(image); offset=TellBlob(image); for (i=0; i < (ssize_t) stream_info->offset_count; i++) stream_info->offsets[i]+=offset; } /* Handle non-native image formats. */ read_info=CloneImageInfo(image_info); SetImageInfoBlob(read_info,(void *) NULL,0); images=NewImageList(); for (scene=0; scene < (ssize_t) number_scenes; scene++) { char filename[MagickPathExtent]; const char *property; FILE *file; Image *jpeg_image; int unique_file; unsigned int tag; tag=((unsigned int) ReadBlobLSBShort(image) << 16) | ReadBlobLSBShort(image); length=(size_t) ReadBlobLSBLong(image); if (length > (size_t) GetBlobSize(image)) ThrowDCMException(CorruptImageError,"InsufficientImageDataInFile"); if (tag == 0xFFFEE0DD) break; /* sequence delimiter tag */ if (tag != 0xFFFEE000) { read_info=DestroyImageInfo(read_info); ThrowDCMException(CorruptImageError,"ImproperImageHeader"); } file=(FILE *) NULL; unique_file=AcquireUniqueFileResource(filename); if (unique_file != -1) file=fdopen(unique_file,"wb"); if (file == (FILE *) NULL) { (void) RelinquishUniqueFileResource(filename); ThrowFileException(exception,FileOpenError, "UnableToCreateTemporaryFile",filename); break; } for (c=EOF; length != 0; length--) { c=ReadBlobByte(image); if (c == EOF) { ThrowFileException(exception,CorruptImageError, "UnexpectedEndOfFile",image->filename); break; } if (fputc(c,file) != c) break; } (void) fclose(file); if (c == EOF) break; (void) FormatLocaleString(read_info->filename,MagickPathExtent, "jpeg:%s",filename); if (image->compression == JPEG2000Compression) (void) FormatLocaleString(read_info->filename,MagickPathExtent, "j2k:%s",filename); jpeg_image=ReadImage(read_info,exception); if (jpeg_image != (Image *) NULL) { ResetImagePropertyIterator(image); property=GetNextImageProperty(image); while (property != (const char *) NULL) { (void) SetImageProperty(jpeg_image,property, GetImageProperty(image,property,exception),exception); property=GetNextImageProperty(image); } AppendImageToList(&images,jpeg_image); } (void) RelinquishUniqueFileResource(filename); } read_info=DestroyImageInfo(read_info); if (stream_info->offsets != (ssize_t *) NULL) stream_info->offsets=(ssize_t *) RelinquishMagickMemory(stream_info->offsets); stream_info=(DCMStreamInfo *) RelinquishMagickMemory(stream_info); if (info.scale != (Quantum *) NULL) info.scale=(Quantum *) RelinquishMagickMemory(info.scale); if (graymap != (int *) NULL) graymap=(int *) RelinquishMagickMemory(graymap); if (bluemap != (int *) NULL) bluemap=(int *) RelinquishMagickMemory(bluemap); if (greenmap != (int *) NULL) greenmap=(int *) RelinquishMagickMemory(greenmap); if (redmap != (int *) NULL) redmap=(int *) RelinquishMagickMemory(redmap); image=DestroyImageList(image); return(GetFirstImageInList(images)); } if (info.depth != (1UL*MAGICKCORE_QUANTUM_DEPTH)) { QuantumAny range; /* Compute pixel scaling table. */ length=(size_t) (GetQuantumRange(info.depth)+1); if (length > (size_t) GetBlobSize(image)) ThrowDCMException(CorruptImageError,"InsufficientImageDataInFile"); if (info.scale != (Quantum *) NULL) info.scale=(Quantum *) RelinquishMagickMemory(info.scale); info.scale=(Quantum *) AcquireQuantumMemory(MagickMax(length,256), sizeof(*info.scale)); if (info.scale == (Quantum *) NULL) ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(info.scale,0,MagickMax(length,256)* sizeof(*info.scale)); range=GetQuantumRange(info.depth); for (i=0; i <= (ssize_t) GetQuantumRange(info.depth); i++) info.scale[i]=ScaleAnyToQuantum((size_t) i,range); } if (image->compression == RLECompression) { unsigned int tag; /* Read RLE offset table. */ for (i=0; i < (ssize_t) stream_info->remaining; i++) { int c; c=ReadBlobByte(image); if (c == EOF) break; } tag=((unsigned int) ReadBlobLSBShort(image) << 16) | ReadBlobLSBShort(image); (void) tag; length=(size_t) ReadBlobLSBLong(image); if (length > (size_t) GetBlobSize(image)) ThrowDCMException(CorruptImageError,"InsufficientImageDataInFile"); stream_info->offset_count=length >> 2; if (stream_info->offset_count != 0) { if (stream_info->offsets != (ssize_t *) NULL) stream_info->offsets=(ssize_t *) RelinquishMagickMemory(stream_info->offsets); stream_info->offsets=(ssize_t *) AcquireQuantumMemory( stream_info->offset_count,sizeof(*stream_info->offsets)); if (stream_info->offsets == (ssize_t *) NULL) ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) stream_info->offset_count; i++) { stream_info->offsets[i]=(ssize_t) ReadBlobLSBSignedLong(image); if (EOFBlob(image) != MagickFalse) break; } offset=TellBlob(image)+8; for (i=0; i < (ssize_t) stream_info->offset_count; i++) stream_info->offsets[i]+=offset; } } for (scene=0; scene < (ssize_t) number_scenes; scene++) { image->columns=(size_t) width; image->rows=(size_t) height; image->depth=info.depth; status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) break; image->colorspace=RGBColorspace; (void) SetImageBackgroundColor(image,exception); if ((image->colormap == (PixelInfo *) NULL) && (info.samples_per_pixel == 1)) { int index; size_t one; one=1; if (colors == 0) colors=one << info.depth; if (AcquireImageColormap(image,colors,exception) == MagickFalse) ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed"); if (redmap != (int *) NULL) for (i=0; i < (ssize_t) colors; i++) { index=redmap[i]; if ((info.scale != (Quantum *) NULL) && (index >= 0) && (index <= (int) info.max_value)) index=(int) info.scale[index]; image->colormap[i].red=(MagickRealType) index; } if (greenmap != (int *) NULL) for (i=0; i < (ssize_t) colors; i++) { index=greenmap[i]; if ((info.scale != (Quantum *) NULL) && (index >= 0) && (index <= (int) info.max_value)) index=(int) info.scale[index]; image->colormap[i].green=(MagickRealType) index; } if (bluemap != (int *) NULL) for (i=0; i < (ssize_t) colors; i++) { index=bluemap[i]; if ((info.scale != (Quantum *) NULL) && (index >= 0) && (index <= (int) info.max_value)) index=(int) info.scale[index]; image->colormap[i].blue=(MagickRealType) index; } if (graymap != (int *) NULL) for (i=0; i < (ssize_t) colors; i++) { index=graymap[i]; if ((info.scale != (Quantum *) NULL) && (index >= 0) && (index <= (int) info.max_value)) index=(int) info.scale[index]; image->colormap[i].red=(MagickRealType) index; image->colormap[i].green=(MagickRealType) index; image->colormap[i].blue=(MagickRealType) index; } } if (image->compression == RLECompression) { unsigned int tag; /* Read RLE segment table. */ for (i=0; i < (ssize_t) stream_info->remaining; i++) { int c; c=ReadBlobByte(image); if (c == EOF) break; } tag=((unsigned int) ReadBlobLSBShort(image) << 16) | ReadBlobLSBShort(image); stream_info->remaining=(size_t) ReadBlobLSBLong(image); if ((tag != 0xFFFEE000) || (stream_info->remaining <= 64) || (EOFBlob(image) != MagickFalse)) { if (stream_info->offsets != (ssize_t *) NULL) stream_info->offsets=(ssize_t *) RelinquishMagickMemory(stream_info->offsets); ThrowDCMException(CorruptImageError,"ImproperImageHeader"); } stream_info->count=0; stream_info->segment_count=ReadBlobLSBLong(image); for (i=0; i < 15; i++) stream_info->segments[i]=(ssize_t) ReadBlobLSBSignedLong(image); stream_info->remaining-=64; if (stream_info->segment_count > 1) { info.bytes_per_pixel=1; info.depth=8; if (stream_info->offset_count > 0) (void) SeekBlob(image,(MagickOffsetType) stream_info->offsets[0]+stream_info->segments[0],SEEK_SET); } } if ((info.samples_per_pixel > 1) && (image->interlace == PlaneInterlace)) { register ssize_t x; register Quantum *q; ssize_t y; /* Convert Planar RGB DCM Medical image to pixel packets. */ for (i=0; i < (ssize_t) info.samples_per_pixel; i++) { for (y=0; y < (ssize_t) image->rows; y++) { q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { switch ((int) i) { case 0: { SetPixelRed(image,ScaleCharToQuantum((unsigned char) ReadDCMByte(stream_info,image)),q); break; } case 1: { SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ReadDCMByte(stream_info,image)),q); break; } case 2: { SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ReadDCMByte(stream_info,image)),q); break; } case 3: { SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) ReadDCMByte(stream_info,image)),q); break; } default: break; } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } } } else { const char *option; /* Convert DCM Medical image to pixel packets. */ option=GetImageOption(image_info,"dcm:display-range"); if (option != (const char *) NULL) { if (LocaleCompare(option,"reset") == 0) info.window_width=0; } option=GetImageOption(image_info,"dcm:window"); if (option != (char *) NULL) { GeometryInfo geometry_info; MagickStatusType flags; flags=ParseGeometry(option,&geometry_info); if (flags & RhoValue) info.window_center=geometry_info.rho; if (flags & SigmaValue) info.window_width=geometry_info.sigma; info.rescale=MagickTrue; } option=GetImageOption(image_info,"dcm:rescale"); if (option != (char *) NULL) info.rescale=IsStringTrue(option); if ((info.window_center != 0) && (info.window_width == 0)) info.window_width=info.window_center; status=ReadDCMPixels(image,&info,stream_info,MagickTrue,exception); if ((status != MagickFalse) && (stream_info->segment_count > 1)) { if (stream_info->offset_count > 0) (void) SeekBlob(image,(MagickOffsetType) stream_info->offsets[0]+stream_info->segments[1],SEEK_SET); (void) ReadDCMPixels(image,&info,stream_info,MagickFalse, exception); } } if (SetImageGray(image,exception) != MagickFalse) (void) SetImageColorspace(image,GRAYColorspace,exception); if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); break; } /* Proceed to next image. */ if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; if (scene < (ssize_t) (number_scenes-1)) { /* Allocate next image structure. */ AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) { status=MagickFalse; break; } image=SyncNextImageInList(image); status=SetImageProgress(image,LoadImagesTag,TellBlob(image), GetBlobSize(image)); if (status == MagickFalse) break; } } if (TellBlob(image) < (MagickOffsetType) GetBlobSize(image)) { /* Allocate next image structure. */ AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) { status=MagickFalse; break; } image=SyncNextImageInList(image); status=SetImageProgress(image,LoadImagesTag,TellBlob(image), GetBlobSize(image)); if (status == MagickFalse) break; } } /* Free resources. */ if (stream_info->offsets != (ssize_t *) NULL) stream_info->offsets=(ssize_t *) RelinquishMagickMemory(stream_info->offsets); stream_info=(DCMStreamInfo *) RelinquishMagickMemory(stream_info); if (info.scale != (Quantum *) NULL) info.scale=(Quantum *) RelinquishMagickMemory(info.scale); if (graymap != (int *) NULL) graymap=(int *) RelinquishMagickMemory(graymap); if (bluemap != (int *) NULL) bluemap=(int *) RelinquishMagickMemory(bluemap); if (greenmap != (int *) NULL) greenmap=(int *) RelinquishMagickMemory(greenmap); if (redmap != (int *) NULL) redmap=(int *) RelinquishMagickMemory(redmap); if (image == (Image *) NULL) return(image); (void) CloseBlob(image); if (status == MagickFalse) return(DestroyImageList(image)); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r D C M I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterDCMImage() adds attributes for the DCM image format to % the list of supported formats. The attributes include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterDCMImage method is: % % size_t RegisterDCMImage(void) % */ ModuleExport size_t RegisterDCMImage(void) { MagickInfo *entry; static const char *DCMNote= { "DICOM is used by the medical community for images like X-rays. The\n" "specification, \"Digital Imaging and Communications in Medicine\n" "(DICOM)\", is available at http://medical.nema.org/. In particular,\n" "see part 5 which describes the image encoding (RLE, JPEG, JPEG-LS),\n" "and supplement 61 which adds JPEG-2000 encoding." }; entry=AcquireMagickInfo("DCM","DCM", "Digital Imaging and Communications in Medicine image"); entry->decoder=(DecodeImageHandler *) ReadDCMImage; entry->magick=(IsImageFormatHandler *) IsDCM; entry->flags^=CoderAdjoinFlag; entry->flags|=CoderDecoderSeekableStreamFlag; entry->note=ConstantString(DCMNote); (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r D C M I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterDCMImage() removes format registrations made by the % DCM module from the list of supported formats. % % The format of the UnregisterDCMImage method is: % % UnregisterDCMImage(void) % */ ModuleExport void UnregisterDCMImage(void) { (void) UnregisterMagickInfo("DCM"); }
./CrossVul/dataset_final_sorted/CWE-20/c/good_365_0
crossvul-cpp_data_bad_3290_0
/* * Central processing for nfsd. * * Authors: Olaf Kirch (okir@monad.swb.de) * * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de> */ #include <linux/sched/signal.h> #include <linux/freezer.h> #include <linux/module.h> #include <linux/fs_struct.h> #include <linux/swap.h> #include <linux/sunrpc/stats.h> #include <linux/sunrpc/svcsock.h> #include <linux/sunrpc/svc_xprt.h> #include <linux/lockd/bind.h> #include <linux/nfsacl.h> #include <linux/seq_file.h> #include <linux/inetdevice.h> #include <net/addrconf.h> #include <net/ipv6.h> #include <net/net_namespace.h> #include "nfsd.h" #include "cache.h" #include "vfs.h" #include "netns.h" #define NFSDDBG_FACILITY NFSDDBG_SVC extern struct svc_program nfsd_program; static int nfsd(void *vrqstp); /* * nfsd_mutex protects nn->nfsd_serv -- both the pointer itself and the members * of the svc_serv struct. In particular, ->sv_nrthreads but also to some * extent ->sv_temp_socks and ->sv_permsocks. It also protects nfsdstats.th_cnt * * If (out side the lock) nn->nfsd_serv is non-NULL, then it must point to a * properly initialised 'struct svc_serv' with ->sv_nrthreads > 0. That number * of nfsd threads must exist and each must listed in ->sp_all_threads in each * entry of ->sv_pools[]. * * Transitions of the thread count between zero and non-zero are of particular * interest since the svc_serv needs to be created and initialized at that * point, or freed. * * Finally, the nfsd_mutex also protects some of the global variables that are * accessed when nfsd starts and that are settable via the write_* routines in * nfsctl.c. In particular: * * user_recovery_dirname * user_lease_time * nfsd_versions */ DEFINE_MUTEX(nfsd_mutex); /* * nfsd_drc_lock protects nfsd_drc_max_pages and nfsd_drc_pages_used. * nfsd_drc_max_pages limits the total amount of memory available for * version 4.1 DRC caches. * nfsd_drc_pages_used tracks the current version 4.1 DRC memory usage. */ spinlock_t nfsd_drc_lock; unsigned long nfsd_drc_max_mem; unsigned long nfsd_drc_mem_used; #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) static struct svc_stat nfsd_acl_svcstats; static struct svc_version * nfsd_acl_version[] = { [2] = &nfsd_acl_version2, [3] = &nfsd_acl_version3, }; #define NFSD_ACL_MINVERS 2 #define NFSD_ACL_NRVERS ARRAY_SIZE(nfsd_acl_version) static struct svc_version *nfsd_acl_versions[NFSD_ACL_NRVERS]; static struct svc_program nfsd_acl_program = { .pg_prog = NFS_ACL_PROGRAM, .pg_nvers = NFSD_ACL_NRVERS, .pg_vers = nfsd_acl_versions, .pg_name = "nfsacl", .pg_class = "nfsd", .pg_stats = &nfsd_acl_svcstats, .pg_authenticate = &svc_set_client, }; static struct svc_stat nfsd_acl_svcstats = { .program = &nfsd_acl_program, }; #endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */ static struct svc_version * nfsd_version[] = { [2] = &nfsd_version2, #if defined(CONFIG_NFSD_V3) [3] = &nfsd_version3, #endif #if defined(CONFIG_NFSD_V4) [4] = &nfsd_version4, #endif }; #define NFSD_MINVERS 2 #define NFSD_NRVERS ARRAY_SIZE(nfsd_version) static struct svc_version *nfsd_versions[NFSD_NRVERS]; struct svc_program nfsd_program = { #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) .pg_next = &nfsd_acl_program, #endif .pg_prog = NFS_PROGRAM, /* program number */ .pg_nvers = NFSD_NRVERS, /* nr of entries in nfsd_version */ .pg_vers = nfsd_versions, /* version table */ .pg_name = "nfsd", /* program name */ .pg_class = "nfsd", /* authentication class */ .pg_stats = &nfsd_svcstats, /* version table */ .pg_authenticate = &svc_set_client, /* export authentication */ }; static bool nfsd_supported_minorversions[NFSD_SUPPORTED_MINOR_VERSION + 1] = { [0] = 1, [1] = 1, [2] = 1, }; int nfsd_vers(int vers, enum vers_op change) { if (vers < NFSD_MINVERS || vers >= NFSD_NRVERS) return 0; switch(change) { case NFSD_SET: nfsd_versions[vers] = nfsd_version[vers]; #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) if (vers < NFSD_ACL_NRVERS) nfsd_acl_versions[vers] = nfsd_acl_version[vers]; #endif break; case NFSD_CLEAR: nfsd_versions[vers] = NULL; #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) if (vers < NFSD_ACL_NRVERS) nfsd_acl_versions[vers] = NULL; #endif break; case NFSD_TEST: return nfsd_versions[vers] != NULL; case NFSD_AVAIL: return nfsd_version[vers] != NULL; } return 0; } static void nfsd_adjust_nfsd_versions4(void) { unsigned i; for (i = 0; i <= NFSD_SUPPORTED_MINOR_VERSION; i++) { if (nfsd_supported_minorversions[i]) return; } nfsd_vers(4, NFSD_CLEAR); } int nfsd_minorversion(u32 minorversion, enum vers_op change) { if (minorversion > NFSD_SUPPORTED_MINOR_VERSION && change != NFSD_AVAIL) return -1; switch(change) { case NFSD_SET: nfsd_supported_minorversions[minorversion] = true; nfsd_vers(4, NFSD_SET); break; case NFSD_CLEAR: nfsd_supported_minorversions[minorversion] = false; nfsd_adjust_nfsd_versions4(); break; case NFSD_TEST: return nfsd_supported_minorversions[minorversion]; case NFSD_AVAIL: return minorversion <= NFSD_SUPPORTED_MINOR_VERSION; } return 0; } /* * Maximum number of nfsd processes */ #define NFSD_MAXSERVS 8192 int nfsd_nrthreads(struct net *net) { int rv = 0; struct nfsd_net *nn = net_generic(net, nfsd_net_id); mutex_lock(&nfsd_mutex); if (nn->nfsd_serv) rv = nn->nfsd_serv->sv_nrthreads; mutex_unlock(&nfsd_mutex); return rv; } static int nfsd_init_socks(struct net *net) { int error; struct nfsd_net *nn = net_generic(net, nfsd_net_id); if (!list_empty(&nn->nfsd_serv->sv_permsocks)) return 0; error = svc_create_xprt(nn->nfsd_serv, "udp", net, PF_INET, NFS_PORT, SVC_SOCK_DEFAULTS); if (error < 0) return error; error = svc_create_xprt(nn->nfsd_serv, "tcp", net, PF_INET, NFS_PORT, SVC_SOCK_DEFAULTS); if (error < 0) return error; return 0; } static int nfsd_users = 0; static int nfsd_startup_generic(int nrservs) { int ret; if (nfsd_users++) return 0; /* * Readahead param cache - will no-op if it already exists. * (Note therefore results will be suboptimal if number of * threads is modified after nfsd start.) */ ret = nfsd_racache_init(2*nrservs); if (ret) goto dec_users; ret = nfs4_state_start(); if (ret) goto out_racache; return 0; out_racache: nfsd_racache_shutdown(); dec_users: nfsd_users--; return ret; } static void nfsd_shutdown_generic(void) { if (--nfsd_users) return; nfs4_state_shutdown(); nfsd_racache_shutdown(); } static bool nfsd_needs_lockd(void) { #if defined(CONFIG_NFSD_V3) return (nfsd_versions[2] != NULL) || (nfsd_versions[3] != NULL); #else return (nfsd_versions[2] != NULL); #endif } static int nfsd_startup_net(int nrservs, struct net *net) { struct nfsd_net *nn = net_generic(net, nfsd_net_id); int ret; if (nn->nfsd_net_up) return 0; ret = nfsd_startup_generic(nrservs); if (ret) return ret; ret = nfsd_init_socks(net); if (ret) goto out_socks; if (nfsd_needs_lockd() && !nn->lockd_up) { ret = lockd_up(net); if (ret) goto out_socks; nn->lockd_up = 1; } ret = nfs4_state_start_net(net); if (ret) goto out_lockd; nn->nfsd_net_up = true; return 0; out_lockd: if (nn->lockd_up) { lockd_down(net); nn->lockd_up = 0; } out_socks: nfsd_shutdown_generic(); return ret; } static void nfsd_shutdown_net(struct net *net) { struct nfsd_net *nn = net_generic(net, nfsd_net_id); nfs4_state_shutdown_net(net); if (nn->lockd_up) { lockd_down(net); nn->lockd_up = 0; } nn->nfsd_net_up = false; nfsd_shutdown_generic(); } static int nfsd_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr) { struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; struct net_device *dev = ifa->ifa_dev->dev; struct net *net = dev_net(dev); struct nfsd_net *nn = net_generic(net, nfsd_net_id); struct sockaddr_in sin; if (event != NETDEV_DOWN) goto out; if (nn->nfsd_serv) { dprintk("nfsd_inetaddr_event: removed %pI4\n", &ifa->ifa_local); sin.sin_family = AF_INET; sin.sin_addr.s_addr = ifa->ifa_local; svc_age_temp_xprts_now(nn->nfsd_serv, (struct sockaddr *)&sin); } out: return NOTIFY_DONE; } static struct notifier_block nfsd_inetaddr_notifier = { .notifier_call = nfsd_inetaddr_event, }; #if IS_ENABLED(CONFIG_IPV6) static int nfsd_inet6addr_event(struct notifier_block *this, unsigned long event, void *ptr) { struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; struct net_device *dev = ifa->idev->dev; struct net *net = dev_net(dev); struct nfsd_net *nn = net_generic(net, nfsd_net_id); struct sockaddr_in6 sin6; if (event != NETDEV_DOWN) goto out; if (nn->nfsd_serv) { dprintk("nfsd_inet6addr_event: removed %pI6\n", &ifa->addr); sin6.sin6_family = AF_INET6; sin6.sin6_addr = ifa->addr; if (ipv6_addr_type(&sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL) sin6.sin6_scope_id = ifa->idev->dev->ifindex; svc_age_temp_xprts_now(nn->nfsd_serv, (struct sockaddr *)&sin6); } out: return NOTIFY_DONE; } static struct notifier_block nfsd_inet6addr_notifier = { .notifier_call = nfsd_inet6addr_event, }; #endif /* Only used under nfsd_mutex, so this atomic may be overkill: */ static atomic_t nfsd_notifier_refcount = ATOMIC_INIT(0); static void nfsd_last_thread(struct svc_serv *serv, struct net *net) { struct nfsd_net *nn = net_generic(net, nfsd_net_id); /* check if the notifier still has clients */ if (atomic_dec_return(&nfsd_notifier_refcount) == 0) { unregister_inetaddr_notifier(&nfsd_inetaddr_notifier); #if IS_ENABLED(CONFIG_IPV6) unregister_inet6addr_notifier(&nfsd_inet6addr_notifier); #endif } /* * write_ports can create the server without actually starting * any threads--if we get shut down before any threads are * started, then nfsd_last_thread will be run before any of this * other initialization has been done except the rpcb information. */ svc_rpcb_cleanup(serv, net); if (!nn->nfsd_net_up) return; nfsd_shutdown_net(net); printk(KERN_WARNING "nfsd: last server has exited, flushing export " "cache\n"); nfsd_export_flush(net); } void nfsd_reset_versions(void) { int i; for (i = 0; i < NFSD_NRVERS; i++) if (nfsd_vers(i, NFSD_TEST)) return; for (i = 0; i < NFSD_NRVERS; i++) if (i != 4) nfsd_vers(i, NFSD_SET); else { int minor = 0; while (nfsd_minorversion(minor, NFSD_SET) >= 0) minor++; } } /* * Each session guarantees a negotiated per slot memory cache for replies * which in turn consumes memory beyond the v2/v3/v4.0 server. A dedicated * NFSv4.1 server might want to use more memory for a DRC than a machine * with mutiple services. * * Impose a hard limit on the number of pages for the DRC which varies * according to the machines free pages. This is of course only a default. * * For now this is a #defined shift which could be under admin control * in the future. */ static void set_max_drc(void) { #define NFSD_DRC_SIZE_SHIFT 10 nfsd_drc_max_mem = (nr_free_buffer_pages() >> NFSD_DRC_SIZE_SHIFT) * PAGE_SIZE; nfsd_drc_mem_used = 0; spin_lock_init(&nfsd_drc_lock); dprintk("%s nfsd_drc_max_mem %lu \n", __func__, nfsd_drc_max_mem); } static int nfsd_get_default_max_blksize(void) { struct sysinfo i; unsigned long long target; unsigned long ret; si_meminfo(&i); target = (i.totalram - i.totalhigh) << PAGE_SHIFT; /* * Aim for 1/4096 of memory per thread This gives 1MB on 4Gig * machines, but only uses 32K on 128M machines. Bottom out at * 8K on 32M and smaller. Of course, this is only a default. */ target >>= 12; ret = NFSSVC_MAXBLKSIZE; while (ret > target && ret >= 8*1024*2) ret /= 2; return ret; } static struct svc_serv_ops nfsd_thread_sv_ops = { .svo_shutdown = nfsd_last_thread, .svo_function = nfsd, .svo_enqueue_xprt = svc_xprt_do_enqueue, .svo_setup = svc_set_num_threads, .svo_module = THIS_MODULE, }; int nfsd_create_serv(struct net *net) { int error; struct nfsd_net *nn = net_generic(net, nfsd_net_id); WARN_ON(!mutex_is_locked(&nfsd_mutex)); if (nn->nfsd_serv) { svc_get(nn->nfsd_serv); return 0; } if (nfsd_max_blksize == 0) nfsd_max_blksize = nfsd_get_default_max_blksize(); nfsd_reset_versions(); nn->nfsd_serv = svc_create_pooled(&nfsd_program, nfsd_max_blksize, &nfsd_thread_sv_ops); if (nn->nfsd_serv == NULL) return -ENOMEM; nn->nfsd_serv->sv_maxconn = nn->max_connections; error = svc_bind(nn->nfsd_serv, net); if (error < 0) { svc_destroy(nn->nfsd_serv); return error; } set_max_drc(); /* check if the notifier is already set */ if (atomic_inc_return(&nfsd_notifier_refcount) == 1) { register_inetaddr_notifier(&nfsd_inetaddr_notifier); #if IS_ENABLED(CONFIG_IPV6) register_inet6addr_notifier(&nfsd_inet6addr_notifier); #endif } do_gettimeofday(&nn->nfssvc_boot); /* record boot time */ return 0; } int nfsd_nrpools(struct net *net) { struct nfsd_net *nn = net_generic(net, nfsd_net_id); if (nn->nfsd_serv == NULL) return 0; else return nn->nfsd_serv->sv_nrpools; } int nfsd_get_nrthreads(int n, int *nthreads, struct net *net) { int i = 0; struct nfsd_net *nn = net_generic(net, nfsd_net_id); if (nn->nfsd_serv != NULL) { for (i = 0; i < nn->nfsd_serv->sv_nrpools && i < n; i++) nthreads[i] = nn->nfsd_serv->sv_pools[i].sp_nrthreads; } return 0; } void nfsd_destroy(struct net *net) { struct nfsd_net *nn = net_generic(net, nfsd_net_id); int destroy = (nn->nfsd_serv->sv_nrthreads == 1); if (destroy) svc_shutdown_net(nn->nfsd_serv, net); svc_destroy(nn->nfsd_serv); if (destroy) nn->nfsd_serv = NULL; } int nfsd_set_nrthreads(int n, int *nthreads, struct net *net) { int i = 0; int tot = 0; int err = 0; struct nfsd_net *nn = net_generic(net, nfsd_net_id); WARN_ON(!mutex_is_locked(&nfsd_mutex)); if (nn->nfsd_serv == NULL || n <= 0) return 0; if (n > nn->nfsd_serv->sv_nrpools) n = nn->nfsd_serv->sv_nrpools; /* enforce a global maximum number of threads */ tot = 0; for (i = 0; i < n; i++) { nthreads[i] = min(nthreads[i], NFSD_MAXSERVS); tot += nthreads[i]; } if (tot > NFSD_MAXSERVS) { /* total too large: scale down requested numbers */ for (i = 0; i < n && tot > 0; i++) { int new = nthreads[i] * NFSD_MAXSERVS / tot; tot -= (nthreads[i] - new); nthreads[i] = new; } for (i = 0; i < n && tot > 0; i++) { nthreads[i]--; tot--; } } /* * There must always be a thread in pool 0; the admin * can't shut down NFS completely using pool_threads. */ if (nthreads[0] == 0) nthreads[0] = 1; /* apply the new numbers */ svc_get(nn->nfsd_serv); for (i = 0; i < n; i++) { err = nn->nfsd_serv->sv_ops->svo_setup(nn->nfsd_serv, &nn->nfsd_serv->sv_pools[i], nthreads[i]); if (err) break; } nfsd_destroy(net); return err; } /* * Adjust the number of threads and return the new number of threads. * This is also the function that starts the server if necessary, if * this is the first time nrservs is nonzero. */ int nfsd_svc(int nrservs, struct net *net) { int error; bool nfsd_up_before; struct nfsd_net *nn = net_generic(net, nfsd_net_id); mutex_lock(&nfsd_mutex); dprintk("nfsd: creating service\n"); nrservs = max(nrservs, 0); nrservs = min(nrservs, NFSD_MAXSERVS); error = 0; if (nrservs == 0 && nn->nfsd_serv == NULL) goto out; error = nfsd_create_serv(net); if (error) goto out; nfsd_up_before = nn->nfsd_net_up; error = nfsd_startup_net(nrservs, net); if (error) goto out_destroy; error = nn->nfsd_serv->sv_ops->svo_setup(nn->nfsd_serv, NULL, nrservs); if (error) goto out_shutdown; /* We are holding a reference to nn->nfsd_serv which * we don't want to count in the return value, * so subtract 1 */ error = nn->nfsd_serv->sv_nrthreads - 1; out_shutdown: if (error < 0 && !nfsd_up_before) nfsd_shutdown_net(net); out_destroy: nfsd_destroy(net); /* Release server */ out: mutex_unlock(&nfsd_mutex); return error; } /* * This is the NFS server kernel thread */ static int nfsd(void *vrqstp) { struct svc_rqst *rqstp = (struct svc_rqst *) vrqstp; struct svc_xprt *perm_sock = list_entry(rqstp->rq_server->sv_permsocks.next, typeof(struct svc_xprt), xpt_list); struct net *net = perm_sock->xpt_net; struct nfsd_net *nn = net_generic(net, nfsd_net_id); int err; /* Lock module and set up kernel thread */ mutex_lock(&nfsd_mutex); /* At this point, the thread shares current->fs * with the init process. We need to create files with the * umask as defined by the client instead of init's umask. */ if (unshare_fs_struct() < 0) { printk("Unable to start nfsd thread: out of memory\n"); goto out; } current->fs->umask = 0; /* * thread is spawned with all signals set to SIG_IGN, re-enable * the ones that will bring down the thread */ allow_signal(SIGKILL); allow_signal(SIGHUP); allow_signal(SIGINT); allow_signal(SIGQUIT); nfsdstats.th_cnt++; mutex_unlock(&nfsd_mutex); set_freezable(); /* * The main request loop */ for (;;) { /* Update sv_maxconn if it has changed */ rqstp->rq_server->sv_maxconn = nn->max_connections; /* * Find a socket with data available and call its * recvfrom routine. */ while ((err = svc_recv(rqstp, 60*60*HZ)) == -EAGAIN) ; if (err == -EINTR) break; validate_process_creds(); svc_process(rqstp); validate_process_creds(); } /* Clear signals before calling svc_exit_thread() */ flush_signals(current); mutex_lock(&nfsd_mutex); nfsdstats.th_cnt --; out: rqstp->rq_server = NULL; /* Release the thread */ svc_exit_thread(rqstp); nfsd_destroy(net); /* Release module */ mutex_unlock(&nfsd_mutex); module_put_and_exit(0); return 0; } static __be32 map_new_errors(u32 vers, __be32 nfserr) { if (nfserr == nfserr_jukebox && vers == 2) return nfserr_dropit; if (nfserr == nfserr_wrongsec && vers < 4) return nfserr_acces; return nfserr; } int nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp) { struct svc_procedure *proc; kxdrproc_t xdr; __be32 nfserr; __be32 *nfserrp; dprintk("nfsd_dispatch: vers %d proc %d\n", rqstp->rq_vers, rqstp->rq_proc); proc = rqstp->rq_procinfo; /* * Give the xdr decoder a chance to change this if it wants * (necessary in the NFSv4.0 compound case) */ rqstp->rq_cachetype = proc->pc_cachetype; /* Decode arguments */ xdr = proc->pc_decode; if (xdr && !xdr(rqstp, (__be32*)rqstp->rq_arg.head[0].iov_base, rqstp->rq_argp)) { dprintk("nfsd: failed to decode arguments!\n"); *statp = rpc_garbage_args; return 1; } /* Check whether we have this call in the cache. */ switch (nfsd_cache_lookup(rqstp)) { case RC_DROPIT: return 0; case RC_REPLY: return 1; case RC_DOIT:; /* do it */ } /* need to grab the location to store the status, as * nfsv4 does some encoding while processing */ nfserrp = rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len; rqstp->rq_res.head[0].iov_len += sizeof(__be32); /* Now call the procedure handler, and encode NFS status. */ nfserr = proc->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp); nfserr = map_new_errors(rqstp->rq_vers, nfserr); if (nfserr == nfserr_dropit || test_bit(RQ_DROPME, &rqstp->rq_flags)) { dprintk("nfsd: Dropping request; may be revisited later\n"); nfsd_cache_update(rqstp, RC_NOCACHE, NULL); return 0; } if (rqstp->rq_proc != 0) *nfserrp++ = nfserr; /* Encode result. * For NFSv2, additional info is never returned in case of an error. */ if (!(nfserr && rqstp->rq_vers == 2)) { xdr = proc->pc_encode; if (xdr && !xdr(rqstp, nfserrp, rqstp->rq_resp)) { /* Failed to encode result. Release cache entry */ dprintk("nfsd: failed to encode result!\n"); nfsd_cache_update(rqstp, RC_NOCACHE, NULL); *statp = rpc_system_err; return 1; } } /* Store reply in cache. */ nfsd_cache_update(rqstp, rqstp->rq_cachetype, statp + 1); return 1; } int nfsd_pool_stats_open(struct inode *inode, struct file *file) { int ret; struct nfsd_net *nn = net_generic(inode->i_sb->s_fs_info, nfsd_net_id); mutex_lock(&nfsd_mutex); if (nn->nfsd_serv == NULL) { mutex_unlock(&nfsd_mutex); return -ENODEV; } /* bump up the psudo refcount while traversing */ svc_get(nn->nfsd_serv); ret = svc_pool_stats_open(nn->nfsd_serv, file); mutex_unlock(&nfsd_mutex); return ret; } int nfsd_pool_stats_release(struct inode *inode, struct file *file) { int ret = seq_release(inode, file); struct net *net = inode->i_sb->s_fs_info; mutex_lock(&nfsd_mutex); /* this function really, really should have been called svc_put() */ nfsd_destroy(net); mutex_unlock(&nfsd_mutex); return ret; }
./CrossVul/dataset_final_sorted/CWE-20/c/bad_3290_0
crossvul-cpp_data_bad_3477_0
/* * security/tomoyo/mount.c * * Copyright (C) 2005-2010 NTT DATA CORPORATION */ #include <linux/slab.h> #include "common.h" /* Keywords for mount restrictions. */ /* Allow to call 'mount --bind /source_dir /dest_dir' */ #define TOMOYO_MOUNT_BIND_KEYWORD "--bind" /* Allow to call 'mount --move /old_dir /new_dir ' */ #define TOMOYO_MOUNT_MOVE_KEYWORD "--move" /* Allow to call 'mount -o remount /dir ' */ #define TOMOYO_MOUNT_REMOUNT_KEYWORD "--remount" /* Allow to call 'mount --make-unbindable /dir' */ #define TOMOYO_MOUNT_MAKE_UNBINDABLE_KEYWORD "--make-unbindable" /* Allow to call 'mount --make-private /dir' */ #define TOMOYO_MOUNT_MAKE_PRIVATE_KEYWORD "--make-private" /* Allow to call 'mount --make-slave /dir' */ #define TOMOYO_MOUNT_MAKE_SLAVE_KEYWORD "--make-slave" /* Allow to call 'mount --make-shared /dir' */ #define TOMOYO_MOUNT_MAKE_SHARED_KEYWORD "--make-shared" /** * tomoyo_audit_mount_log - Audit mount log. * * @r: Pointer to "struct tomoyo_request_info". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_audit_mount_log(struct tomoyo_request_info *r) { const char *dev = r->param.mount.dev->name; const char *dir = r->param.mount.dir->name; const char *type = r->param.mount.type->name; const unsigned long flags = r->param.mount.flags; if (r->granted) return 0; if (!strcmp(type, TOMOYO_MOUNT_REMOUNT_KEYWORD)) tomoyo_warn_log(r, "mount -o remount %s 0x%lX", dir, flags); else if (!strcmp(type, TOMOYO_MOUNT_BIND_KEYWORD) || !strcmp(type, TOMOYO_MOUNT_MOVE_KEYWORD)) tomoyo_warn_log(r, "mount %s %s %s 0x%lX", type, dev, dir, flags); else if (!strcmp(type, TOMOYO_MOUNT_MAKE_UNBINDABLE_KEYWORD) || !strcmp(type, TOMOYO_MOUNT_MAKE_PRIVATE_KEYWORD) || !strcmp(type, TOMOYO_MOUNT_MAKE_SLAVE_KEYWORD) || !strcmp(type, TOMOYO_MOUNT_MAKE_SHARED_KEYWORD)) tomoyo_warn_log(r, "mount %s %s 0x%lX", type, dir, flags); else tomoyo_warn_log(r, "mount -t %s %s %s 0x%lX", type, dev, dir, flags); return tomoyo_supervisor(r, TOMOYO_KEYWORD_ALLOW_MOUNT "%s %s %s 0x%lX\n", tomoyo_pattern(r->param.mount.dev), tomoyo_pattern(r->param.mount.dir), type, flags); } static bool tomoyo_check_mount_acl(struct tomoyo_request_info *r, const struct tomoyo_acl_info *ptr) { const struct tomoyo_mount_acl *acl = container_of(ptr, typeof(*acl), head); return tomoyo_compare_number_union(r->param.mount.flags, &acl->flags) && tomoyo_compare_name_union(r->param.mount.type, &acl->fs_type) && tomoyo_compare_name_union(r->param.mount.dir, &acl->dir_name) && (!r->param.mount.need_dev || tomoyo_compare_name_union(r->param.mount.dev, &acl->dev_name)); } /** * tomoyo_mount_acl - Check permission for mount() operation. * * @r: Pointer to "struct tomoyo_request_info". * @dev_name: Name of device file. * @dir: Pointer to "struct path". * @type: Name of filesystem type. * @flags: Mount options. * * Returns 0 on success, negative value otherwise. * * Caller holds tomoyo_read_lock(). */ static int tomoyo_mount_acl(struct tomoyo_request_info *r, char *dev_name, struct path *dir, char *type, unsigned long flags) { struct path path; struct file_system_type *fstype = NULL; const char *requested_type = NULL; const char *requested_dir_name = NULL; const char *requested_dev_name = NULL; struct tomoyo_path_info rtype; struct tomoyo_path_info rdev; struct tomoyo_path_info rdir; int need_dev = 0; int error = -ENOMEM; /* Get fstype. */ requested_type = tomoyo_encode(type); if (!requested_type) goto out; rtype.name = requested_type; tomoyo_fill_path_info(&rtype); /* Get mount point. */ requested_dir_name = tomoyo_realpath_from_path(dir); if (!requested_dir_name) { error = -ENOMEM; goto out; } rdir.name = requested_dir_name; tomoyo_fill_path_info(&rdir); /* Compare fs name. */ if (!strcmp(type, TOMOYO_MOUNT_REMOUNT_KEYWORD)) { /* dev_name is ignored. */ } else if (!strcmp(type, TOMOYO_MOUNT_MAKE_UNBINDABLE_KEYWORD) || !strcmp(type, TOMOYO_MOUNT_MAKE_PRIVATE_KEYWORD) || !strcmp(type, TOMOYO_MOUNT_MAKE_SLAVE_KEYWORD) || !strcmp(type, TOMOYO_MOUNT_MAKE_SHARED_KEYWORD)) { /* dev_name is ignored. */ } else if (!strcmp(type, TOMOYO_MOUNT_BIND_KEYWORD) || !strcmp(type, TOMOYO_MOUNT_MOVE_KEYWORD)) { need_dev = -1; /* dev_name is a directory */ } else { fstype = get_fs_type(type); if (!fstype) { error = -ENODEV; goto out; } if (fstype->fs_flags & FS_REQUIRES_DEV) /* dev_name is a block device file. */ need_dev = 1; } if (need_dev) { /* Get mount point or device file. */ if (kern_path(dev_name, LOOKUP_FOLLOW, &path)) { error = -ENOENT; goto out; } requested_dev_name = tomoyo_realpath_from_path(&path); path_put(&path); if (!requested_dev_name) { error = -ENOENT; goto out; } } else { /* Map dev_name to "<NULL>" if no dev_name given. */ if (!dev_name) dev_name = "<NULL>"; requested_dev_name = tomoyo_encode(dev_name); if (!requested_dev_name) { error = -ENOMEM; goto out; } } rdev.name = requested_dev_name; tomoyo_fill_path_info(&rdev); r->param_type = TOMOYO_TYPE_MOUNT_ACL; r->param.mount.need_dev = need_dev; r->param.mount.dev = &rdev; r->param.mount.dir = &rdir; r->param.mount.type = &rtype; r->param.mount.flags = flags; do { tomoyo_check_acl(r, tomoyo_check_mount_acl); error = tomoyo_audit_mount_log(r); } while (error == TOMOYO_RETRY_REQUEST); out: kfree(requested_dev_name); kfree(requested_dir_name); if (fstype) put_filesystem(fstype); kfree(requested_type); return error; } /** * tomoyo_mount_permission - Check permission for mount() operation. * * @dev_name: Name of device file. * @path: Pointer to "struct path". * @type: Name of filesystem type. May be NULL. * @flags: Mount options. * @data_page: Optional data. May be NULL. * * Returns 0 on success, negative value otherwise. */ int tomoyo_mount_permission(char *dev_name, struct path *path, char *type, unsigned long flags, void *data_page) { struct tomoyo_request_info r; int error; int idx; if (tomoyo_init_request_info(&r, NULL, TOMOYO_MAC_FILE_MOUNT) == TOMOYO_CONFIG_DISABLED) return 0; if ((flags & MS_MGC_MSK) == MS_MGC_VAL) flags &= ~MS_MGC_MSK; if (flags & MS_REMOUNT) { type = TOMOYO_MOUNT_REMOUNT_KEYWORD; flags &= ~MS_REMOUNT; } if (flags & MS_MOVE) { type = TOMOYO_MOUNT_MOVE_KEYWORD; flags &= ~MS_MOVE; } if (flags & MS_BIND) { type = TOMOYO_MOUNT_BIND_KEYWORD; flags &= ~MS_BIND; } if (flags & MS_UNBINDABLE) { type = TOMOYO_MOUNT_MAKE_UNBINDABLE_KEYWORD; flags &= ~MS_UNBINDABLE; } if (flags & MS_PRIVATE) { type = TOMOYO_MOUNT_MAKE_PRIVATE_KEYWORD; flags &= ~MS_PRIVATE; } if (flags & MS_SLAVE) { type = TOMOYO_MOUNT_MAKE_SLAVE_KEYWORD; flags &= ~MS_SLAVE; } if (flags & MS_SHARED) { type = TOMOYO_MOUNT_MAKE_SHARED_KEYWORD; flags &= ~MS_SHARED; } if (!type) type = "<NULL>"; idx = tomoyo_read_lock(); error = tomoyo_mount_acl(&r, dev_name, path, type, flags); tomoyo_read_unlock(idx); return error; } static bool tomoyo_same_mount_acl(const struct tomoyo_acl_info *a, const struct tomoyo_acl_info *b) { const struct tomoyo_mount_acl *p1 = container_of(a, typeof(*p1), head); const struct tomoyo_mount_acl *p2 = container_of(b, typeof(*p2), head); return tomoyo_same_acl_head(&p1->head, &p2->head) && tomoyo_same_name_union(&p1->dev_name, &p2->dev_name) && tomoyo_same_name_union(&p1->dir_name, &p2->dir_name) && tomoyo_same_name_union(&p1->fs_type, &p2->fs_type) && tomoyo_same_number_union(&p1->flags, &p2->flags); } /** * tomoyo_write_mount - Write "struct tomoyo_mount_acl" list. * * @data: String to parse. * @domain: Pointer to "struct tomoyo_domain_info". * @is_delete: True if it is a delete request. * * Returns 0 on success, negative value otherwise. * * Caller holds tomoyo_read_lock(). */ int tomoyo_write_mount(char *data, struct tomoyo_domain_info *domain, const bool is_delete) { struct tomoyo_mount_acl e = { .head.type = TOMOYO_TYPE_MOUNT_ACL }; int error = is_delete ? -ENOENT : -ENOMEM; char *w[4]; if (!tomoyo_tokenize(data, w, sizeof(w)) || !w[3][0]) return -EINVAL; if (!tomoyo_parse_name_union(w[0], &e.dev_name) || !tomoyo_parse_name_union(w[1], &e.dir_name) || !tomoyo_parse_name_union(w[2], &e.fs_type) || !tomoyo_parse_number_union(w[3], &e.flags)) goto out; error = tomoyo_update_domain(&e.head, sizeof(e), is_delete, domain, tomoyo_same_mount_acl, NULL); out: tomoyo_put_name_union(&e.dev_name); tomoyo_put_name_union(&e.dir_name); tomoyo_put_name_union(&e.fs_type); tomoyo_put_number_union(&e.flags); return error; }
./CrossVul/dataset_final_sorted/CWE-20/c/bad_3477_0
crossvul-cpp_data_good_4829_4
/* too_few_image_data.gd2 claims to have a size of 12336x48 pixels, but doesn't provide as much image data. We test that gdImageCreateFromGd2Ctx() returns NULL in this case. */ #include "gd.h" #include "gdtest.h" int main() { gdImagePtr im; FILE *fp; fp = gdTestFileOpen2("gd2", "too_few_image_data.gd2"); gdTestAssert(fp != NULL); im = gdImageCreateFromGd2(fp); gdTestAssert(im == NULL); fclose(fp); return gdNumFailures(); }
./CrossVul/dataset_final_sorted/CWE-20/c/good_4829_4
crossvul-cpp_data_good_397_0
#include "cache.h" #include "object.h" #include "blob.h" #include "tree.h" #include "tree-walk.h" #include "commit.h" #include "tag.h" #include "fsck.h" #include "refs.h" #include "utf8.h" #include "sha1-array.h" #include "decorate.h" #include "oidset.h" #include "packfile.h" #include "submodule-config.h" #include "config.h" static struct oidset gitmodules_found = OIDSET_INIT; static struct oidset gitmodules_done = OIDSET_INIT; #define FSCK_FATAL -1 #define FSCK_INFO -2 #define FOREACH_MSG_ID(FUNC) \ /* fatal errors */ \ FUNC(NUL_IN_HEADER, FATAL) \ FUNC(UNTERMINATED_HEADER, FATAL) \ /* errors */ \ FUNC(BAD_DATE, ERROR) \ FUNC(BAD_DATE_OVERFLOW, ERROR) \ FUNC(BAD_EMAIL, ERROR) \ FUNC(BAD_NAME, ERROR) \ FUNC(BAD_OBJECT_SHA1, ERROR) \ FUNC(BAD_PARENT_SHA1, ERROR) \ FUNC(BAD_TAG_OBJECT, ERROR) \ FUNC(BAD_TIMEZONE, ERROR) \ FUNC(BAD_TREE, ERROR) \ FUNC(BAD_TREE_SHA1, ERROR) \ FUNC(BAD_TYPE, ERROR) \ FUNC(DUPLICATE_ENTRIES, ERROR) \ FUNC(MISSING_AUTHOR, ERROR) \ FUNC(MISSING_COMMITTER, ERROR) \ FUNC(MISSING_EMAIL, ERROR) \ FUNC(MISSING_GRAFT, ERROR) \ FUNC(MISSING_NAME_BEFORE_EMAIL, ERROR) \ FUNC(MISSING_OBJECT, ERROR) \ FUNC(MISSING_PARENT, ERROR) \ FUNC(MISSING_SPACE_BEFORE_DATE, ERROR) \ FUNC(MISSING_SPACE_BEFORE_EMAIL, ERROR) \ FUNC(MISSING_TAG, ERROR) \ FUNC(MISSING_TAG_ENTRY, ERROR) \ FUNC(MISSING_TAG_OBJECT, ERROR) \ FUNC(MISSING_TREE, ERROR) \ FUNC(MISSING_TREE_OBJECT, ERROR) \ FUNC(MISSING_TYPE, ERROR) \ FUNC(MISSING_TYPE_ENTRY, ERROR) \ FUNC(MULTIPLE_AUTHORS, ERROR) \ FUNC(TAG_OBJECT_NOT_TAG, ERROR) \ FUNC(TREE_NOT_SORTED, ERROR) \ FUNC(UNKNOWN_TYPE, ERROR) \ FUNC(ZERO_PADDED_DATE, ERROR) \ FUNC(GITMODULES_MISSING, ERROR) \ FUNC(GITMODULES_BLOB, ERROR) \ FUNC(GITMODULES_PARSE, ERROR) \ FUNC(GITMODULES_NAME, ERROR) \ FUNC(GITMODULES_SYMLINK, ERROR) \ FUNC(GITMODULES_URL, ERROR) \ /* warnings */ \ FUNC(BAD_FILEMODE, WARN) \ FUNC(EMPTY_NAME, WARN) \ FUNC(FULL_PATHNAME, WARN) \ FUNC(HAS_DOT, WARN) \ FUNC(HAS_DOTDOT, WARN) \ FUNC(HAS_DOTGIT, WARN) \ FUNC(NULL_SHA1, WARN) \ FUNC(ZERO_PADDED_FILEMODE, WARN) \ FUNC(NUL_IN_COMMIT, WARN) \ /* infos (reported as warnings, but ignored by default) */ \ FUNC(BAD_TAG_NAME, INFO) \ FUNC(MISSING_TAGGER_ENTRY, INFO) #define MSG_ID(id, msg_type) FSCK_MSG_##id, enum fsck_msg_id { FOREACH_MSG_ID(MSG_ID) FSCK_MSG_MAX }; #undef MSG_ID #define STR(x) #x #define MSG_ID(id, msg_type) { STR(id), NULL, FSCK_##msg_type }, static struct { const char *id_string; const char *downcased; int msg_type; } msg_id_info[FSCK_MSG_MAX + 1] = { FOREACH_MSG_ID(MSG_ID) { NULL, NULL, -1 } }; #undef MSG_ID static int parse_msg_id(const char *text) { int i; if (!msg_id_info[0].downcased) { /* convert id_string to lower case, without underscores. */ for (i = 0; i < FSCK_MSG_MAX; i++) { const char *p = msg_id_info[i].id_string; int len = strlen(p); char *q = xmalloc(len); msg_id_info[i].downcased = q; while (*p) if (*p == '_') p++; else *(q)++ = tolower(*(p)++); *q = '\0'; } } for (i = 0; i < FSCK_MSG_MAX; i++) if (!strcmp(text, msg_id_info[i].downcased)) return i; return -1; } static int fsck_msg_type(enum fsck_msg_id msg_id, struct fsck_options *options) { int msg_type; assert(msg_id >= 0 && msg_id < FSCK_MSG_MAX); if (options->msg_type) msg_type = options->msg_type[msg_id]; else { msg_type = msg_id_info[msg_id].msg_type; if (options->strict && msg_type == FSCK_WARN) msg_type = FSCK_ERROR; } return msg_type; } static void init_skiplist(struct fsck_options *options, const char *path) { static struct oid_array skiplist = OID_ARRAY_INIT; int sorted, fd; char buffer[GIT_MAX_HEXSZ + 1]; struct object_id oid; if (options->skiplist) sorted = options->skiplist->sorted; else { sorted = 1; options->skiplist = &skiplist; } fd = open(path, O_RDONLY); if (fd < 0) die("Could not open skip list: %s", path); for (;;) { const char *p; int result = read_in_full(fd, buffer, sizeof(buffer)); if (result < 0) die_errno("Could not read '%s'", path); if (!result) break; if (parse_oid_hex(buffer, &oid, &p) || *p != '\n') die("Invalid SHA-1: %s", buffer); oid_array_append(&skiplist, &oid); if (sorted && skiplist.nr > 1 && oidcmp(&skiplist.oid[skiplist.nr - 2], &oid) > 0) sorted = 0; } close(fd); if (sorted) skiplist.sorted = 1; } static int parse_msg_type(const char *str) { if (!strcmp(str, "error")) return FSCK_ERROR; else if (!strcmp(str, "warn")) return FSCK_WARN; else if (!strcmp(str, "ignore")) return FSCK_IGNORE; else die("Unknown fsck message type: '%s'", str); } int is_valid_msg_type(const char *msg_id, const char *msg_type) { if (parse_msg_id(msg_id) < 0) return 0; parse_msg_type(msg_type); return 1; } void fsck_set_msg_type(struct fsck_options *options, const char *msg_id, const char *msg_type) { int id = parse_msg_id(msg_id), type; if (id < 0) die("Unhandled message id: %s", msg_id); type = parse_msg_type(msg_type); if (type != FSCK_ERROR && msg_id_info[id].msg_type == FSCK_FATAL) die("Cannot demote %s to %s", msg_id, msg_type); if (!options->msg_type) { int i; int *msg_type; ALLOC_ARRAY(msg_type, FSCK_MSG_MAX); for (i = 0; i < FSCK_MSG_MAX; i++) msg_type[i] = fsck_msg_type(i, options); options->msg_type = msg_type; } options->msg_type[id] = type; } void fsck_set_msg_types(struct fsck_options *options, const char *values) { char *buf = xstrdup(values), *to_free = buf; int done = 0; while (!done) { int len = strcspn(buf, " ,|"), equal; done = !buf[len]; if (!len) { buf++; continue; } buf[len] = '\0'; for (equal = 0; equal < len && buf[equal] != '=' && buf[equal] != ':'; equal++) buf[equal] = tolower(buf[equal]); buf[equal] = '\0'; if (!strcmp(buf, "skiplist")) { if (equal == len) die("skiplist requires a path"); init_skiplist(options, buf + equal + 1); buf += len + 1; continue; } if (equal == len) die("Missing '=': '%s'", buf); fsck_set_msg_type(options, buf, buf + equal + 1); buf += len + 1; } free(to_free); } static void append_msg_id(struct strbuf *sb, const char *msg_id) { for (;;) { char c = *(msg_id)++; if (!c) break; if (c != '_') strbuf_addch(sb, tolower(c)); else { assert(*msg_id); strbuf_addch(sb, *(msg_id)++); } } strbuf_addstr(sb, ": "); } __attribute__((format (printf, 4, 5))) static int report(struct fsck_options *options, struct object *object, enum fsck_msg_id id, const char *fmt, ...) { va_list ap; struct strbuf sb = STRBUF_INIT; int msg_type = fsck_msg_type(id, options), result; if (msg_type == FSCK_IGNORE) return 0; if (options->skiplist && object && oid_array_lookup(options->skiplist, &object->oid) >= 0) return 0; if (msg_type == FSCK_FATAL) msg_type = FSCK_ERROR; else if (msg_type == FSCK_INFO) msg_type = FSCK_WARN; append_msg_id(&sb, msg_id_info[id].id_string); va_start(ap, fmt); strbuf_vaddf(&sb, fmt, ap); result = options->error_func(options, object, msg_type, sb.buf); strbuf_release(&sb); va_end(ap); return result; } static char *get_object_name(struct fsck_options *options, struct object *obj) { if (!options->object_names) return NULL; return lookup_decoration(options->object_names, obj); } static void put_object_name(struct fsck_options *options, struct object *obj, const char *fmt, ...) { va_list ap; struct strbuf buf = STRBUF_INIT; char *existing; if (!options->object_names) return; existing = lookup_decoration(options->object_names, obj); if (existing) return; va_start(ap, fmt); strbuf_vaddf(&buf, fmt, ap); add_decoration(options->object_names, obj, strbuf_detach(&buf, NULL)); va_end(ap); } static const char *describe_object(struct fsck_options *o, struct object *obj) { static struct strbuf buf = STRBUF_INIT; char *name; strbuf_reset(&buf); strbuf_addstr(&buf, oid_to_hex(&obj->oid)); if (o->object_names && (name = lookup_decoration(o->object_names, obj))) strbuf_addf(&buf, " (%s)", name); return buf.buf; } static int fsck_walk_tree(struct tree *tree, void *data, struct fsck_options *options) { struct tree_desc desc; struct name_entry entry; int res = 0; const char *name; if (parse_tree(tree)) return -1; name = get_object_name(options, &tree->object); if (init_tree_desc_gently(&desc, tree->buffer, tree->size)) return -1; while (tree_entry_gently(&desc, &entry)) { struct object *obj; int result; if (S_ISGITLINK(entry.mode)) continue; if (S_ISDIR(entry.mode)) { obj = (struct object *)lookup_tree(entry.oid); if (name && obj) put_object_name(options, obj, "%s%s/", name, entry.path); result = options->walk(obj, OBJ_TREE, data, options); } else if (S_ISREG(entry.mode) || S_ISLNK(entry.mode)) { obj = (struct object *)lookup_blob(entry.oid); if (name && obj) put_object_name(options, obj, "%s%s", name, entry.path); result = options->walk(obj, OBJ_BLOB, data, options); } else { result = error("in tree %s: entry %s has bad mode %.6o", describe_object(options, &tree->object), entry.path, entry.mode); } if (result < 0) return result; if (!res) res = result; } return res; } static int fsck_walk_commit(struct commit *commit, void *data, struct fsck_options *options) { int counter = 0, generation = 0, name_prefix_len = 0; struct commit_list *parents; int res; int result; const char *name; if (parse_commit(commit)) return -1; name = get_object_name(options, &commit->object); if (name) put_object_name(options, &commit->tree->object, "%s:", name); result = options->walk((struct object *)commit->tree, OBJ_TREE, data, options); if (result < 0) return result; res = result; parents = commit->parents; if (name && parents) { int len = strlen(name), power; if (len && name[len - 1] == '^') { generation = 1; name_prefix_len = len - 1; } else { /* parse ~<generation> suffix */ for (generation = 0, power = 1; len && isdigit(name[len - 1]); power *= 10) generation += power * (name[--len] - '0'); if (power > 1 && len && name[len - 1] == '~') name_prefix_len = len - 1; } } while (parents) { if (name) { struct object *obj = &parents->item->object; if (++counter > 1) put_object_name(options, obj, "%s^%d", name, counter); else if (generation > 0) put_object_name(options, obj, "%.*s~%d", name_prefix_len, name, generation + 1); else put_object_name(options, obj, "%s^", name); } result = options->walk((struct object *)parents->item, OBJ_COMMIT, data, options); if (result < 0) return result; if (!res) res = result; parents = parents->next; } return res; } static int fsck_walk_tag(struct tag *tag, void *data, struct fsck_options *options) { char *name = get_object_name(options, &tag->object); if (parse_tag(tag)) return -1; if (name) put_object_name(options, tag->tagged, "%s", name); return options->walk(tag->tagged, OBJ_ANY, data, options); } int fsck_walk(struct object *obj, void *data, struct fsck_options *options) { if (!obj) return -1; if (obj->type == OBJ_NONE) parse_object(&obj->oid); switch (obj->type) { case OBJ_BLOB: return 0; case OBJ_TREE: return fsck_walk_tree((struct tree *)obj, data, options); case OBJ_COMMIT: return fsck_walk_commit((struct commit *)obj, data, options); case OBJ_TAG: return fsck_walk_tag((struct tag *)obj, data, options); default: error("Unknown object type for %s", describe_object(options, obj)); return -1; } } /* * The entries in a tree are ordered in the _path_ order, * which means that a directory entry is ordered by adding * a slash to the end of it. * * So a directory called "a" is ordered _after_ a file * called "a.c", because "a/" sorts after "a.c". */ #define TREE_UNORDERED (-1) #define TREE_HAS_DUPS (-2) static int verify_ordered(unsigned mode1, const char *name1, unsigned mode2, const char *name2) { int len1 = strlen(name1); int len2 = strlen(name2); int len = len1 < len2 ? len1 : len2; unsigned char c1, c2; int cmp; cmp = memcmp(name1, name2, len); if (cmp < 0) return 0; if (cmp > 0) return TREE_UNORDERED; /* * Ok, the first <len> characters are the same. * Now we need to order the next one, but turn * a '\0' into a '/' for a directory entry. */ c1 = name1[len]; c2 = name2[len]; if (!c1 && !c2) /* * git-write-tree used to write out a nonsense tree that has * entries with the same name, one blob and one tree. Make * sure we do not have duplicate entries. */ return TREE_HAS_DUPS; if (!c1 && S_ISDIR(mode1)) c1 = '/'; if (!c2 && S_ISDIR(mode2)) c2 = '/'; return c1 < c2 ? 0 : TREE_UNORDERED; } static int fsck_tree(struct tree *item, struct fsck_options *options) { int retval = 0; int has_null_sha1 = 0; int has_full_path = 0; int has_empty_name = 0; int has_dot = 0; int has_dotdot = 0; int has_dotgit = 0; int has_zero_pad = 0; int has_bad_modes = 0; int has_dup_entries = 0; int not_properly_sorted = 0; struct tree_desc desc; unsigned o_mode; const char *o_name; if (init_tree_desc_gently(&desc, item->buffer, item->size)) { retval += report(options, &item->object, FSCK_MSG_BAD_TREE, "cannot be parsed as a tree"); return retval; } o_mode = 0; o_name = NULL; while (desc.size) { unsigned mode; const char *name; const struct object_id *oid; oid = tree_entry_extract(&desc, &name, &mode); has_null_sha1 |= is_null_oid(oid); has_full_path |= !!strchr(name, '/'); has_empty_name |= !*name; has_dot |= !strcmp(name, "."); has_dotdot |= !strcmp(name, ".."); has_dotgit |= is_hfs_dotgit(name) || is_ntfs_dotgit(name); has_zero_pad |= *(char *)desc.buffer == '0'; if (is_hfs_dotgitmodules(name) || is_ntfs_dotgitmodules(name)) { if (!S_ISLNK(mode)) oidset_insert(&gitmodules_found, oid); else retval += report(options, &item->object, FSCK_MSG_GITMODULES_SYMLINK, ".gitmodules is a symbolic link"); } if (update_tree_entry_gently(&desc)) { retval += report(options, &item->object, FSCK_MSG_BAD_TREE, "cannot be parsed as a tree"); break; } switch (mode) { /* * Standard modes.. */ case S_IFREG | 0755: case S_IFREG | 0644: case S_IFLNK: case S_IFDIR: case S_IFGITLINK: break; /* * This is nonstandard, but we had a few of these * early on when we honored the full set of mode * bits.. */ case S_IFREG | 0664: if (!options->strict) break; /* fallthrough */ default: has_bad_modes = 1; } if (o_name) { switch (verify_ordered(o_mode, o_name, mode, name)) { case TREE_UNORDERED: not_properly_sorted = 1; break; case TREE_HAS_DUPS: has_dup_entries = 1; break; default: break; } } o_mode = mode; o_name = name; } if (has_null_sha1) retval += report(options, &item->object, FSCK_MSG_NULL_SHA1, "contains entries pointing to null sha1"); if (has_full_path) retval += report(options, &item->object, FSCK_MSG_FULL_PATHNAME, "contains full pathnames"); if (has_empty_name) retval += report(options, &item->object, FSCK_MSG_EMPTY_NAME, "contains empty pathname"); if (has_dot) retval += report(options, &item->object, FSCK_MSG_HAS_DOT, "contains '.'"); if (has_dotdot) retval += report(options, &item->object, FSCK_MSG_HAS_DOTDOT, "contains '..'"); if (has_dotgit) retval += report(options, &item->object, FSCK_MSG_HAS_DOTGIT, "contains '.git'"); if (has_zero_pad) retval += report(options, &item->object, FSCK_MSG_ZERO_PADDED_FILEMODE, "contains zero-padded file modes"); if (has_bad_modes) retval += report(options, &item->object, FSCK_MSG_BAD_FILEMODE, "contains bad file modes"); if (has_dup_entries) retval += report(options, &item->object, FSCK_MSG_DUPLICATE_ENTRIES, "contains duplicate file entries"); if (not_properly_sorted) retval += report(options, &item->object, FSCK_MSG_TREE_NOT_SORTED, "not properly sorted"); return retval; } static int verify_headers(const void *data, unsigned long size, struct object *obj, struct fsck_options *options) { const char *buffer = (const char *)data; unsigned long i; for (i = 0; i < size; i++) { switch (buffer[i]) { case '\0': return report(options, obj, FSCK_MSG_NUL_IN_HEADER, "unterminated header: NUL at offset %ld", i); case '\n': if (i + 1 < size && buffer[i + 1] == '\n') return 0; } } /* * We did not find double-LF that separates the header * and the body. Not having a body is not a crime but * we do want to see the terminating LF for the last header * line. */ if (size && buffer[size - 1] == '\n') return 0; return report(options, obj, FSCK_MSG_UNTERMINATED_HEADER, "unterminated header"); } static int fsck_ident(const char **ident, struct object *obj, struct fsck_options *options) { const char *p = *ident; char *end; *ident = strchrnul(*ident, '\n'); if (**ident == '\n') (*ident)++; if (*p == '<') return report(options, obj, FSCK_MSG_MISSING_NAME_BEFORE_EMAIL, "invalid author/committer line - missing space before email"); p += strcspn(p, "<>\n"); if (*p == '>') return report(options, obj, FSCK_MSG_BAD_NAME, "invalid author/committer line - bad name"); if (*p != '<') return report(options, obj, FSCK_MSG_MISSING_EMAIL, "invalid author/committer line - missing email"); if (p[-1] != ' ') return report(options, obj, FSCK_MSG_MISSING_SPACE_BEFORE_EMAIL, "invalid author/committer line - missing space before email"); p++; p += strcspn(p, "<>\n"); if (*p != '>') return report(options, obj, FSCK_MSG_BAD_EMAIL, "invalid author/committer line - bad email"); p++; if (*p != ' ') return report(options, obj, FSCK_MSG_MISSING_SPACE_BEFORE_DATE, "invalid author/committer line - missing space before date"); p++; if (*p == '0' && p[1] != ' ') return report(options, obj, FSCK_MSG_ZERO_PADDED_DATE, "invalid author/committer line - zero-padded date"); if (date_overflows(parse_timestamp(p, &end, 10))) return report(options, obj, FSCK_MSG_BAD_DATE_OVERFLOW, "invalid author/committer line - date causes integer overflow"); if ((end == p || *end != ' ')) return report(options, obj, FSCK_MSG_BAD_DATE, "invalid author/committer line - bad date"); p = end + 1; if ((*p != '+' && *p != '-') || !isdigit(p[1]) || !isdigit(p[2]) || !isdigit(p[3]) || !isdigit(p[4]) || (p[5] != '\n')) return report(options, obj, FSCK_MSG_BAD_TIMEZONE, "invalid author/committer line - bad time zone"); p += 6; return 0; } static int fsck_commit_buffer(struct commit *commit, const char *buffer, unsigned long size, struct fsck_options *options) { unsigned char tree_sha1[20], sha1[20]; struct commit_graft *graft; unsigned parent_count, parent_line_count = 0, author_count; int err; const char *buffer_begin = buffer; if (verify_headers(buffer, size, &commit->object, options)) return -1; if (!skip_prefix(buffer, "tree ", &buffer)) return report(options, &commit->object, FSCK_MSG_MISSING_TREE, "invalid format - expected 'tree' line"); if (get_sha1_hex(buffer, tree_sha1) || buffer[40] != '\n') { err = report(options, &commit->object, FSCK_MSG_BAD_TREE_SHA1, "invalid 'tree' line format - bad sha1"); if (err) return err; } buffer += 41; while (skip_prefix(buffer, "parent ", &buffer)) { if (get_sha1_hex(buffer, sha1) || buffer[40] != '\n') { err = report(options, &commit->object, FSCK_MSG_BAD_PARENT_SHA1, "invalid 'parent' line format - bad sha1"); if (err) return err; } buffer += 41; parent_line_count++; } graft = lookup_commit_graft(&commit->object.oid); parent_count = commit_list_count(commit->parents); if (graft) { if (graft->nr_parent == -1 && !parent_count) ; /* shallow commit */ else if (graft->nr_parent != parent_count) { err = report(options, &commit->object, FSCK_MSG_MISSING_GRAFT, "graft objects missing"); if (err) return err; } } else { if (parent_count != parent_line_count) { err = report(options, &commit->object, FSCK_MSG_MISSING_PARENT, "parent objects missing"); if (err) return err; } } author_count = 0; while (skip_prefix(buffer, "author ", &buffer)) { author_count++; err = fsck_ident(&buffer, &commit->object, options); if (err) return err; } if (author_count < 1) err = report(options, &commit->object, FSCK_MSG_MISSING_AUTHOR, "invalid format - expected 'author' line"); else if (author_count > 1) err = report(options, &commit->object, FSCK_MSG_MULTIPLE_AUTHORS, "invalid format - multiple 'author' lines"); if (err) return err; if (!skip_prefix(buffer, "committer ", &buffer)) return report(options, &commit->object, FSCK_MSG_MISSING_COMMITTER, "invalid format - expected 'committer' line"); err = fsck_ident(&buffer, &commit->object, options); if (err) return err; if (!commit->tree) { err = report(options, &commit->object, FSCK_MSG_BAD_TREE, "could not load commit's tree %s", sha1_to_hex(tree_sha1)); if (err) return err; } if (memchr(buffer_begin, '\0', size)) { err = report(options, &commit->object, FSCK_MSG_NUL_IN_COMMIT, "NUL byte in the commit object body"); if (err) return err; } return 0; } static int fsck_commit(struct commit *commit, const char *data, unsigned long size, struct fsck_options *options) { const char *buffer = data ? data : get_commit_buffer(commit, &size); int ret = fsck_commit_buffer(commit, buffer, size, options); if (!data) unuse_commit_buffer(commit, buffer); return ret; } static int fsck_tag_buffer(struct tag *tag, const char *data, unsigned long size, struct fsck_options *options) { unsigned char sha1[20]; int ret = 0; const char *buffer; char *to_free = NULL, *eol; struct strbuf sb = STRBUF_INIT; if (data) buffer = data; else { enum object_type type; buffer = to_free = read_sha1_file(tag->object.oid.hash, &type, &size); if (!buffer) return report(options, &tag->object, FSCK_MSG_MISSING_TAG_OBJECT, "cannot read tag object"); if (type != OBJ_TAG) { ret = report(options, &tag->object, FSCK_MSG_TAG_OBJECT_NOT_TAG, "expected tag got %s", type_name(type)); goto done; } } ret = verify_headers(buffer, size, &tag->object, options); if (ret) goto done; if (!skip_prefix(buffer, "object ", &buffer)) { ret = report(options, &tag->object, FSCK_MSG_MISSING_OBJECT, "invalid format - expected 'object' line"); goto done; } if (get_sha1_hex(buffer, sha1) || buffer[40] != '\n') { ret = report(options, &tag->object, FSCK_MSG_BAD_OBJECT_SHA1, "invalid 'object' line format - bad sha1"); if (ret) goto done; } buffer += 41; if (!skip_prefix(buffer, "type ", &buffer)) { ret = report(options, &tag->object, FSCK_MSG_MISSING_TYPE_ENTRY, "invalid format - expected 'type' line"); goto done; } eol = strchr(buffer, '\n'); if (!eol) { ret = report(options, &tag->object, FSCK_MSG_MISSING_TYPE, "invalid format - unexpected end after 'type' line"); goto done; } if (type_from_string_gently(buffer, eol - buffer, 1) < 0) ret = report(options, &tag->object, FSCK_MSG_BAD_TYPE, "invalid 'type' value"); if (ret) goto done; buffer = eol + 1; if (!skip_prefix(buffer, "tag ", &buffer)) { ret = report(options, &tag->object, FSCK_MSG_MISSING_TAG_ENTRY, "invalid format - expected 'tag' line"); goto done; } eol = strchr(buffer, '\n'); if (!eol) { ret = report(options, &tag->object, FSCK_MSG_MISSING_TAG, "invalid format - unexpected end after 'type' line"); goto done; } strbuf_addf(&sb, "refs/tags/%.*s", (int)(eol - buffer), buffer); if (check_refname_format(sb.buf, 0)) { ret = report(options, &tag->object, FSCK_MSG_BAD_TAG_NAME, "invalid 'tag' name: %.*s", (int)(eol - buffer), buffer); if (ret) goto done; } buffer = eol + 1; if (!skip_prefix(buffer, "tagger ", &buffer)) { /* early tags do not contain 'tagger' lines; warn only */ ret = report(options, &tag->object, FSCK_MSG_MISSING_TAGGER_ENTRY, "invalid format - expected 'tagger' line"); if (ret) goto done; } else ret = fsck_ident(&buffer, &tag->object, options); done: strbuf_release(&sb); free(to_free); return ret; } static int fsck_tag(struct tag *tag, const char *data, unsigned long size, struct fsck_options *options) { struct object *tagged = tag->tagged; if (!tagged) return report(options, &tag->object, FSCK_MSG_BAD_TAG_OBJECT, "could not load tagged object"); return fsck_tag_buffer(tag, data, size, options); } struct fsck_gitmodules_data { struct object *obj; struct fsck_options *options; int ret; }; static int fsck_gitmodules_fn(const char *var, const char *value, void *vdata) { struct fsck_gitmodules_data *data = vdata; const char *subsection, *key; int subsection_len; char *name; if (parse_config_key(var, "submodule", &subsection, &subsection_len, &key) < 0 || !subsection) return 0; name = xmemdupz(subsection, subsection_len); if (check_submodule_name(name) < 0) data->ret |= report(data->options, data->obj, FSCK_MSG_GITMODULES_NAME, "disallowed submodule name: %s", name); if (!strcmp(key, "url") && value && looks_like_command_line_option(value)) data->ret |= report(data->options, data->obj, FSCK_MSG_GITMODULES_URL, "disallowed submodule url: %s", value); free(name); return 0; } static int fsck_blob(struct blob *blob, const char *buf, unsigned long size, struct fsck_options *options) { struct fsck_gitmodules_data data; if (!oidset_contains(&gitmodules_found, &blob->object.oid)) return 0; oidset_insert(&gitmodules_done, &blob->object.oid); if (!buf) { /* * A missing buffer here is a sign that the caller found the * blob too gigantic to load into memory. Let's just consider * that an error. */ return report(options, &blob->object, FSCK_MSG_GITMODULES_PARSE, ".gitmodules too large to parse"); } data.obj = &blob->object; data.options = options; data.ret = 0; if (git_config_from_mem(fsck_gitmodules_fn, CONFIG_ORIGIN_BLOB, ".gitmodules", buf, size, &data)) data.ret |= report(options, &blob->object, FSCK_MSG_GITMODULES_PARSE, "could not parse gitmodules blob"); return data.ret; } int fsck_object(struct object *obj, void *data, unsigned long size, struct fsck_options *options) { if (!obj) return report(options, obj, FSCK_MSG_BAD_OBJECT_SHA1, "no valid object to fsck"); if (obj->type == OBJ_BLOB) return fsck_blob((struct blob *)obj, data, size, options); if (obj->type == OBJ_TREE) return fsck_tree((struct tree *) obj, options); if (obj->type == OBJ_COMMIT) return fsck_commit((struct commit *) obj, (const char *) data, size, options); if (obj->type == OBJ_TAG) return fsck_tag((struct tag *) obj, (const char *) data, size, options); return report(options, obj, FSCK_MSG_UNKNOWN_TYPE, "unknown type '%d' (internal fsck error)", obj->type); } int fsck_error_function(struct fsck_options *o, struct object *obj, int msg_type, const char *message) { if (msg_type == FSCK_WARN) { warning("object %s: %s", describe_object(o, obj), message); return 0; } error("object %s: %s", describe_object(o, obj), message); return 1; } int fsck_finish(struct fsck_options *options) { int ret = 0; struct oidset_iter iter; const struct object_id *oid; oidset_iter_init(&gitmodules_found, &iter); while ((oid = oidset_iter_next(&iter))) { struct blob *blob; enum object_type type; unsigned long size; char *buf; if (oidset_contains(&gitmodules_done, oid)) continue; blob = lookup_blob(oid); if (!blob) { ret |= report(options, &blob->object, FSCK_MSG_GITMODULES_BLOB, "non-blob found at .gitmodules"); continue; } buf = read_sha1_file(oid->hash, &type, &size); if (!buf) { if (is_promisor_object(&blob->object.oid)) continue; ret |= report(options, &blob->object, FSCK_MSG_GITMODULES_MISSING, "unable to read .gitmodules blob"); continue; } if (type == OBJ_BLOB) ret |= fsck_blob(blob, buf, size, options); else ret |= report(options, &blob->object, FSCK_MSG_GITMODULES_BLOB, "non-blob found at .gitmodules"); free(buf); } oidset_clear(&gitmodules_found); oidset_clear(&gitmodules_done); return ret; }
./CrossVul/dataset_final_sorted/CWE-20/c/good_397_0
crossvul-cpp_data_good_2578_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % JJJJJ PPPP EEEEE GGGG % % J P P E G % % J PPPP EEE G GG % % J J P E G G % % JJJ P EEEEE GGG % % % % % % Read/Write JPEG Image Format % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % This software is based in part on the work of the Independent JPEG Group. % See ftp://ftp.uu.net/graphics/jpeg/jpegsrc.v6b.tar.gz for copyright and % licensing restrictions. Blob support contributed by Glenn Randers-Pehrson. % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/color.h" #include "magick/colormap-private.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/constitute.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/geometry.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/module.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/option-private.h" #include "magick/pixel-accessor.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/quantum-private.h" #include "magick/resource_.h" #include "magick/semaphore.h" #include "magick/splay-tree.h" #include "magick/static.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/xml-tree.h" #include <setjmp.h> #if defined(MAGICKCORE_JPEG_DELEGATE) #define JPEG_INTERNAL_OPTIONS #if defined(__MINGW32__) || defined(__MINGW64__) # define XMD_H 1 /* Avoid conflicting typedef for INT32 */ #endif #undef HAVE_STDLIB_H #include "jpeglib.h" #include "jerror.h" #endif /* Define declarations. */ #define ICC_MARKER (JPEG_APP0+2) #define ICC_PROFILE "ICC_PROFILE" #define IPTC_MARKER (JPEG_APP0+13) #define XML_MARKER (JPEG_APP0+1) #define MaxBufferExtent 16384 /* Typedef declarations. */ #if defined(MAGICKCORE_JPEG_DELEGATE) typedef struct _DestinationManager { struct jpeg_destination_mgr manager; Image *image; JOCTET *buffer; } DestinationManager; typedef struct _ErrorManager { Image *image; MagickBooleanType finished; StringInfo *profile; jmp_buf error_recovery; } ErrorManager; typedef struct _SourceManager { struct jpeg_source_mgr manager; Image *image; JOCTET *buffer; boolean start_of_blob; } SourceManager; #endif typedef struct _QuantizationTable { char *slot, *description; size_t width, height; double divisor; unsigned int *levels; } QuantizationTable; /* Forward declarations. */ #if defined(MAGICKCORE_JPEG_DELEGATE) static MagickBooleanType WriteJPEGImage(const ImageInfo *,Image *); #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s J P E G % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsJPEG() returns MagickTrue if the image format type, identified by the % magick string, is JPEG. % % The format of the IsJPEG method is: % % MagickBooleanType IsJPEG(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsJPEG(const unsigned char *magick,const size_t length) { if (length < 3) return(MagickFalse); if (memcmp(magick,"\377\330\377",3) == 0) return(MagickTrue); return(MagickFalse); } #if defined(MAGICKCORE_JPEG_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d J P E G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadJPEGImage() reads a JPEG image file and returns it. It allocates % the memory necessary for the new Image structure and returns a pointer to % the new image. % % The format of the ReadJPEGImage method is: % % Image *ReadJPEGImage(const ImageInfo *image_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static boolean FillInputBuffer(j_decompress_ptr cinfo) { SourceManager *source; source=(SourceManager *) cinfo->src; source->manager.bytes_in_buffer=(size_t) ReadBlob(source->image, MaxBufferExtent,source->buffer); if (source->manager.bytes_in_buffer == 0) { if (source->start_of_blob != FALSE) ERREXIT(cinfo,JERR_INPUT_EMPTY); WARNMS(cinfo,JWRN_JPEG_EOF); source->buffer[0]=(JOCTET) 0xff; source->buffer[1]=(JOCTET) JPEG_EOI; source->manager.bytes_in_buffer=2; } source->manager.next_input_byte=source->buffer; source->start_of_blob=FALSE; return(TRUE); } static int GetCharacter(j_decompress_ptr jpeg_info) { if (jpeg_info->src->bytes_in_buffer == 0) (void) (*jpeg_info->src->fill_input_buffer)(jpeg_info); jpeg_info->src->bytes_in_buffer--; return((int) GETJOCTET(*jpeg_info->src->next_input_byte++)); } static void InitializeSource(j_decompress_ptr cinfo) { SourceManager *source; source=(SourceManager *) cinfo->src; source->start_of_blob=TRUE; } static MagickBooleanType IsITUFaxImage(const Image *image) { const StringInfo *profile; const unsigned char *datum; profile=GetImageProfile(image,"8bim"); if (profile == (const StringInfo *) NULL) return(MagickFalse); if (GetStringInfoLength(profile) < 5) return(MagickFalse); datum=GetStringInfoDatum(profile); if ((datum[0] == 0x47) && (datum[1] == 0x33) && (datum[2] == 0x46) && (datum[3] == 0x41) && (datum[4] == 0x58)) return(MagickTrue); return(MagickFalse); } static void JPEGErrorHandler(j_common_ptr jpeg_info) { char message[JMSG_LENGTH_MAX]; ErrorManager *error_manager; Image *image; *message='\0'; error_manager=(ErrorManager *) jpeg_info->client_data; image=error_manager->image; (jpeg_info->err->format_message)(jpeg_info,message); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "[%s] JPEG Trace: \"%s\"",image->filename,message); if (error_manager->finished != MagickFalse) (void) ThrowMagickException(&image->exception,GetMagickModule(), CorruptImageWarning,(char *) message,"`%s'",image->filename); else (void) ThrowMagickException(&image->exception,GetMagickModule(), CorruptImageError,(char *) message,"`%s'",image->filename); longjmp(error_manager->error_recovery,1); } static MagickBooleanType JPEGWarningHandler(j_common_ptr jpeg_info,int level) { #define JPEGExcessiveWarnings 1000 char message[JMSG_LENGTH_MAX]; ErrorManager *error_manager; Image *image; *message='\0'; error_manager=(ErrorManager *) jpeg_info->client_data; image=error_manager->image; if (level < 0) { /* Process warning message. */ (jpeg_info->err->format_message)(jpeg_info,message); if (jpeg_info->err->num_warnings++ < JPEGExcessiveWarnings) ThrowBinaryException(CorruptImageWarning,(char *) message, image->filename); } else if ((image->debug != MagickFalse) && (level >= jpeg_info->err->trace_level)) { /* Process trace message. */ (jpeg_info->err->format_message)(jpeg_info,message); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "[%s] JPEG Trace: \"%s\"",image->filename,message); } return(MagickTrue); } static boolean ReadComment(j_decompress_ptr jpeg_info) { ErrorManager *error_manager; Image *image; register unsigned char *p; register ssize_t i; size_t length; StringInfo *comment; /* Determine length of comment. */ error_manager=(ErrorManager *) jpeg_info->client_data; image=error_manager->image; length=(size_t) ((size_t) GetCharacter(jpeg_info) << 8); length+=GetCharacter(jpeg_info); if (length <= 2) return(TRUE); length-=2; comment=BlobToStringInfo((const void *) NULL,length); if (comment == (StringInfo *) NULL) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(FALSE); } /* Read comment. */ error_manager->profile=comment; p=GetStringInfoDatum(comment); for (i=0; i < (ssize_t) GetStringInfoLength(comment); i++) *p++=(unsigned char) GetCharacter(jpeg_info); *p='\0'; error_manager->profile=NULL; p=GetStringInfoDatum(comment); (void) SetImageProperty(image,"comment",(const char *) p); comment=DestroyStringInfo(comment); return(TRUE); } static boolean ReadICCProfile(j_decompress_ptr jpeg_info) { char magick[12]; ErrorManager *error_manager; Image *image; MagickBooleanType status; register ssize_t i; register unsigned char *p; size_t length; StringInfo *icc_profile, *profile; /* Read color profile. */ length=(size_t) ((size_t) GetCharacter(jpeg_info) << 8); length+=(size_t) GetCharacter(jpeg_info); length-=2; if (length <= 14) { while (length-- > 0) (void) GetCharacter(jpeg_info); return(TRUE); } for (i=0; i < 12; i++) magick[i]=(char) GetCharacter(jpeg_info); if (LocaleCompare(magick,ICC_PROFILE) != 0) { /* Not a ICC profile, return. */ for (i=0; i < (ssize_t) (length-12); i++) (void) GetCharacter(jpeg_info); return(TRUE); } (void) GetCharacter(jpeg_info); /* id */ (void) GetCharacter(jpeg_info); /* markers */ length-=14; error_manager=(ErrorManager *) jpeg_info->client_data; image=error_manager->image; profile=BlobToStringInfo((const void *) NULL,length); if (profile == (StringInfo *) NULL) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(FALSE); } error_manager->profile=profile; p=GetStringInfoDatum(profile); for (i=(ssize_t) GetStringInfoLength(profile)-1; i >= 0; i--) *p++=(unsigned char) GetCharacter(jpeg_info); error_manager->profile=NULL; icc_profile=(StringInfo *) GetImageProfile(image,"icc"); if (icc_profile != (StringInfo *) NULL) { ConcatenateStringInfo(icc_profile,profile); profile=DestroyStringInfo(profile); } else { status=SetImageProfile(image,"icc",profile); profile=DestroyStringInfo(profile); if (status == MagickFalse) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(FALSE); } } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Profile: ICC, %.20g bytes",(double) length); return(TRUE); } static boolean ReadIPTCProfile(j_decompress_ptr jpeg_info) { char magick[MaxTextExtent]; ErrorManager *error_manager; Image *image; MagickBooleanType status; register ssize_t i; register unsigned char *p; size_t length; StringInfo *iptc_profile, *profile; /* Determine length of binary data stored here. */ length=(size_t) ((size_t) GetCharacter(jpeg_info) << 8); length+=(size_t) GetCharacter(jpeg_info); length-=2; if (length <= 14) { while (length-- > 0) (void) GetCharacter(jpeg_info); return(TRUE); } /* Validate that this was written as a Photoshop resource format slug. */ for (i=0; i < 10; i++) magick[i]=(char) GetCharacter(jpeg_info); magick[10]='\0'; length-=10; if (length <= 10) return(TRUE); if (LocaleCompare(magick,"Photoshop ") != 0) { /* Not a IPTC profile, return. */ for (i=0; i < (ssize_t) length; i++) (void) GetCharacter(jpeg_info); return(TRUE); } /* Remove the version number. */ for (i=0; i < 4; i++) (void) GetCharacter(jpeg_info); if (length <= 11) return(TRUE); length-=4; error_manager=(ErrorManager *) jpeg_info->client_data; image=error_manager->image; profile=BlobToStringInfo((const void *) NULL,length); if (profile == (StringInfo *) NULL) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(FALSE); } error_manager->profile=profile; p=GetStringInfoDatum(profile); for (i=0; i < (ssize_t) GetStringInfoLength(profile); i++) *p++=(unsigned char) GetCharacter(jpeg_info); error_manager->profile=NULL; iptc_profile=(StringInfo *) GetImageProfile(image,"8bim"); if (iptc_profile != (StringInfo *) NULL) { ConcatenateStringInfo(iptc_profile,profile); profile=DestroyStringInfo(profile); } else { status=SetImageProfile(image,"8bim",profile); profile=DestroyStringInfo(profile); if (status == MagickFalse) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(FALSE); } } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Profile: iptc, %.20g bytes",(double) length); return(TRUE); } static boolean ReadProfile(j_decompress_ptr jpeg_info) { char name[MaxTextExtent]; const StringInfo *previous_profile; ErrorManager *error_manager; Image *image; int marker; MagickBooleanType status; register ssize_t i; register unsigned char *p; size_t length; StringInfo *profile; /* Read generic profile. */ length=(size_t) ((size_t) GetCharacter(jpeg_info) << 8); length+=(size_t) GetCharacter(jpeg_info); if (length <= 2) return(TRUE); length-=2; marker=jpeg_info->unread_marker-JPEG_APP0; (void) FormatLocaleString(name,MaxTextExtent,"APP%d",marker); error_manager=(ErrorManager *) jpeg_info->client_data; image=error_manager->image; profile=BlobToStringInfo((const void *) NULL,length); if (profile == (StringInfo *) NULL) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(FALSE); } error_manager->profile=profile; p=GetStringInfoDatum(profile); for (i=0; i < (ssize_t) GetStringInfoLength(profile); i++) *p++=(unsigned char) GetCharacter(jpeg_info); error_manager->profile=NULL; if (marker == 1) { p=GetStringInfoDatum(profile); if ((length > 4) && (LocaleNCompare((char *) p,"exif",4) == 0)) (void) CopyMagickString(name,"exif",MaxTextExtent); if ((length > 5) && (LocaleNCompare((char *) p,"http:",5) == 0)) { ssize_t j; /* Extract namespace from XMP profile. */ p=GetStringInfoDatum(profile); for (j=0; j < (ssize_t) GetStringInfoLength(profile); j++) { if (*p == '\0') break; p++; } if (j < (ssize_t) GetStringInfoLength(profile)) (void) DestroyStringInfo(SplitStringInfo(profile,(size_t) (j+1))); (void) CopyMagickString(name,"xmp",MaxTextExtent); } } previous_profile=GetImageProfile(image,name); if (previous_profile != (const StringInfo *) NULL) { size_t length; length=GetStringInfoLength(profile); SetStringInfoLength(profile,GetStringInfoLength(profile)+ GetStringInfoLength(previous_profile)); (void) memmove(GetStringInfoDatum(profile)+ GetStringInfoLength(previous_profile),GetStringInfoDatum(profile), length); (void) memcpy(GetStringInfoDatum(profile), GetStringInfoDatum(previous_profile), GetStringInfoLength(previous_profile)); } status=SetImageProfile(image,name,profile); profile=DestroyStringInfo(profile); if (status == MagickFalse) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(FALSE); } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Profile: %s, %.20g bytes",name,(double) length); return(TRUE); } static void SkipInputData(j_decompress_ptr cinfo,long number_bytes) { SourceManager *source; if (number_bytes <= 0) return; source=(SourceManager *) cinfo->src; while (number_bytes > (long) source->manager.bytes_in_buffer) { number_bytes-=(long) source->manager.bytes_in_buffer; (void) FillInputBuffer(cinfo); } source->manager.next_input_byte+=number_bytes; source->manager.bytes_in_buffer-=number_bytes; } static void TerminateSource(j_decompress_ptr cinfo) { (void) cinfo; } static void JPEGSourceManager(j_decompress_ptr cinfo,Image *image) { SourceManager *source; cinfo->src=(struct jpeg_source_mgr *) (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo,JPOOL_IMAGE,sizeof(SourceManager)); source=(SourceManager *) cinfo->src; source->buffer=(JOCTET *) (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo,JPOOL_IMAGE,MaxBufferExtent*sizeof(JOCTET)); source=(SourceManager *) cinfo->src; source->manager.init_source=InitializeSource; source->manager.fill_input_buffer=FillInputBuffer; source->manager.skip_input_data=SkipInputData; source->manager.resync_to_restart=jpeg_resync_to_restart; source->manager.term_source=TerminateSource; source->manager.bytes_in_buffer=0; source->manager.next_input_byte=NULL; source->image=image; } static void JPEGSetImageQuality(struct jpeg_decompress_struct *jpeg_info, Image *image) { image->quality=UndefinedCompressionQuality; #if defined(D_PROGRESSIVE_SUPPORTED) if (image->compression == LosslessJPEGCompression) { image->quality=100; (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Quality: 100 (lossless)"); } else #endif { ssize_t j, qvalue, sum; register ssize_t i; /* Determine the JPEG compression quality from the quantization tables. */ sum=0; for (i=0; i < NUM_QUANT_TBLS; i++) { if (jpeg_info->quant_tbl_ptrs[i] != NULL) for (j=0; j < DCTSIZE2; j++) sum+=jpeg_info->quant_tbl_ptrs[i]->quantval[j]; } if ((jpeg_info->quant_tbl_ptrs[0] != NULL) && (jpeg_info->quant_tbl_ptrs[1] != NULL)) { ssize_t hash[101] = { 1020, 1015, 932, 848, 780, 735, 702, 679, 660, 645, 632, 623, 613, 607, 600, 594, 589, 585, 581, 571, 555, 542, 529, 514, 494, 474, 457, 439, 424, 410, 397, 386, 373, 364, 351, 341, 334, 324, 317, 309, 299, 294, 287, 279, 274, 267, 262, 257, 251, 247, 243, 237, 232, 227, 222, 217, 213, 207, 202, 198, 192, 188, 183, 177, 173, 168, 163, 157, 153, 148, 143, 139, 132, 128, 125, 119, 115, 108, 104, 99, 94, 90, 84, 79, 74, 70, 64, 59, 55, 49, 45, 40, 34, 30, 25, 20, 15, 11, 6, 4, 0 }, sums[101] = { 32640, 32635, 32266, 31495, 30665, 29804, 29146, 28599, 28104, 27670, 27225, 26725, 26210, 25716, 25240, 24789, 24373, 23946, 23572, 22846, 21801, 20842, 19949, 19121, 18386, 17651, 16998, 16349, 15800, 15247, 14783, 14321, 13859, 13535, 13081, 12702, 12423, 12056, 11779, 11513, 11135, 10955, 10676, 10392, 10208, 9928, 9747, 9564, 9369, 9193, 9017, 8822, 8639, 8458, 8270, 8084, 7896, 7710, 7527, 7347, 7156, 6977, 6788, 6607, 6422, 6236, 6054, 5867, 5684, 5495, 5305, 5128, 4945, 4751, 4638, 4442, 4248, 4065, 3888, 3698, 3509, 3326, 3139, 2957, 2775, 2586, 2405, 2216, 2037, 1846, 1666, 1483, 1297, 1109, 927, 735, 554, 375, 201, 128, 0 }; qvalue=(ssize_t) (jpeg_info->quant_tbl_ptrs[0]->quantval[2]+ jpeg_info->quant_tbl_ptrs[0]->quantval[53]+ jpeg_info->quant_tbl_ptrs[1]->quantval[0]+ jpeg_info->quant_tbl_ptrs[1]->quantval[DCTSIZE2-1]); for (i=0; i < 100; i++) { if ((qvalue < hash[i]) && (sum < sums[i])) continue; if (((qvalue <= hash[i]) && (sum <= sums[i])) || (i >= 50)) image->quality=(size_t) i+1; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Quality: %.20g (%s)",(double) i+1,(qvalue <= hash[i]) && (sum <= sums[i]) ? "exact" : "approximate"); break; } } else if (jpeg_info->quant_tbl_ptrs[0] != NULL) { ssize_t hash[101] = { 510, 505, 422, 380, 355, 338, 326, 318, 311, 305, 300, 297, 293, 291, 288, 286, 284, 283, 281, 280, 279, 278, 277, 273, 262, 251, 243, 233, 225, 218, 211, 205, 198, 193, 186, 181, 177, 172, 168, 164, 158, 156, 152, 148, 145, 142, 139, 136, 133, 131, 129, 126, 123, 120, 118, 115, 113, 110, 107, 105, 102, 100, 97, 94, 92, 89, 87, 83, 81, 79, 76, 74, 70, 68, 66, 63, 61, 57, 55, 52, 50, 48, 44, 42, 39, 37, 34, 31, 29, 26, 24, 21, 18, 16, 13, 11, 8, 6, 3, 2, 0 }, sums[101] = { 16320, 16315, 15946, 15277, 14655, 14073, 13623, 13230, 12859, 12560, 12240, 11861, 11456, 11081, 10714, 10360, 10027, 9679, 9368, 9056, 8680, 8331, 7995, 7668, 7376, 7084, 6823, 6562, 6345, 6125, 5939, 5756, 5571, 5421, 5240, 5086, 4976, 4829, 4719, 4616, 4463, 4393, 4280, 4166, 4092, 3980, 3909, 3835, 3755, 3688, 3621, 3541, 3467, 3396, 3323, 3247, 3170, 3096, 3021, 2952, 2874, 2804, 2727, 2657, 2583, 2509, 2437, 2362, 2290, 2211, 2136, 2068, 1996, 1915, 1858, 1773, 1692, 1620, 1552, 1477, 1398, 1326, 1251, 1179, 1109, 1031, 961, 884, 814, 736, 667, 592, 518, 441, 369, 292, 221, 151, 86, 64, 0 }; qvalue=(ssize_t) (jpeg_info->quant_tbl_ptrs[0]->quantval[2]+ jpeg_info->quant_tbl_ptrs[0]->quantval[53]); for (i=0; i < 100; i++) { if ((qvalue < hash[i]) && (sum < sums[i])) continue; if (((qvalue <= hash[i]) && (sum <= sums[i])) || (i >= 50)) image->quality=(size_t) i+1; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Quality: %.20g (%s)",(double) i+1,(qvalue <= hash[i]) && (sum <= sums[i]) ? "exact" : "approximate"); break; } } } } static void JPEGSetImageSamplingFactor(struct jpeg_decompress_struct *jpeg_info, Image *image) { char sampling_factor[MaxTextExtent]; switch (jpeg_info->out_color_space) { case JCS_CMYK: { (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Colorspace: CMYK"); (void) FormatLocaleString(sampling_factor,MaxTextExtent, "%dx%d,%dx%d,%dx%d,%dx%d",jpeg_info->comp_info[0].h_samp_factor, jpeg_info->comp_info[0].v_samp_factor, jpeg_info->comp_info[1].h_samp_factor, jpeg_info->comp_info[1].v_samp_factor, jpeg_info->comp_info[2].h_samp_factor, jpeg_info->comp_info[2].v_samp_factor, jpeg_info->comp_info[3].h_samp_factor, jpeg_info->comp_info[3].v_samp_factor); break; } case JCS_GRAYSCALE: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Colorspace: GRAYSCALE"); (void) FormatLocaleString(sampling_factor,MaxTextExtent,"%dx%d", jpeg_info->comp_info[0].h_samp_factor, jpeg_info->comp_info[0].v_samp_factor); break; } case JCS_RGB: { (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Colorspace: RGB"); (void) FormatLocaleString(sampling_factor,MaxTextExtent, "%dx%d,%dx%d,%dx%d",jpeg_info->comp_info[0].h_samp_factor, jpeg_info->comp_info[0].v_samp_factor, jpeg_info->comp_info[1].h_samp_factor, jpeg_info->comp_info[1].v_samp_factor, jpeg_info->comp_info[2].h_samp_factor, jpeg_info->comp_info[2].v_samp_factor); break; } default: { (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Colorspace: %d", jpeg_info->out_color_space); (void) FormatLocaleString(sampling_factor,MaxTextExtent, "%dx%d,%dx%d,%dx%d,%dx%d",jpeg_info->comp_info[0].h_samp_factor, jpeg_info->comp_info[0].v_samp_factor, jpeg_info->comp_info[1].h_samp_factor, jpeg_info->comp_info[1].v_samp_factor, jpeg_info->comp_info[2].h_samp_factor, jpeg_info->comp_info[2].v_samp_factor, jpeg_info->comp_info[3].h_samp_factor, jpeg_info->comp_info[3].v_samp_factor); break; } } (void) SetImageProperty(image,"jpeg:sampling-factor",sampling_factor); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Sampling Factors: %s", sampling_factor); } static Image *ReadJPEGImage(const ImageInfo *image_info, ExceptionInfo *exception) { char value[MaxTextExtent]; const char *option; ErrorManager error_manager; Image *image; IndexPacket index; JSAMPLE *volatile jpeg_pixels; JSAMPROW scanline[1]; MagickBooleanType debug, status; MagickSizeType number_pixels; MemoryInfo *memory_info; register ssize_t i; struct jpeg_decompress_struct jpeg_info; struct jpeg_error_mgr jpeg_error; register JSAMPLE *p; size_t units; ssize_t y; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); debug=IsEventLogging(); (void) debug; image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Verify that file size large enough to contain a JPEG datastream. */ if (GetBlobSize(image) < 107) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); /* Initialize JPEG parameters. */ (void) ResetMagickMemory(&error_manager,0,sizeof(error_manager)); (void) ResetMagickMemory(&jpeg_info,0,sizeof(jpeg_info)); (void) ResetMagickMemory(&jpeg_error,0,sizeof(jpeg_error)); jpeg_info.err=jpeg_std_error(&jpeg_error); jpeg_info.err->emit_message=(void (*)(j_common_ptr,int)) JPEGWarningHandler; jpeg_info.err->error_exit=(void (*)(j_common_ptr)) JPEGErrorHandler; memory_info=(MemoryInfo *) NULL; error_manager.image=image; if (setjmp(error_manager.error_recovery) != 0) { jpeg_destroy_decompress(&jpeg_info); if (error_manager.profile != (StringInfo *) NULL) error_manager.profile=DestroyStringInfo(error_manager.profile); (void) CloseBlob(image); number_pixels=(MagickSizeType) image->columns*image->rows; if (number_pixels != 0) return(GetFirstImageInList(image)); InheritException(exception,&image->exception); return(DestroyImage(image)); } jpeg_info.client_data=(void *) &error_manager; jpeg_create_decompress(&jpeg_info); JPEGSourceManager(&jpeg_info,image); jpeg_set_marker_processor(&jpeg_info,JPEG_COM,ReadComment); option=GetImageOption(image_info,"profile:skip"); if (IsOptionMember("ICC",option) == MagickFalse) jpeg_set_marker_processor(&jpeg_info,ICC_MARKER,ReadICCProfile); if (IsOptionMember("IPTC",option) == MagickFalse) jpeg_set_marker_processor(&jpeg_info,IPTC_MARKER,ReadIPTCProfile); for (i=1; i < 16; i++) if ((i != 2) && (i != 13) && (i != 14)) if (IsOptionMember("APP",option) == MagickFalse) jpeg_set_marker_processor(&jpeg_info,(int) (JPEG_APP0+i),ReadProfile); i=(ssize_t) jpeg_read_header(&jpeg_info,TRUE); if ((image_info->colorspace == YCbCrColorspace) || (image_info->colorspace == Rec601YCbCrColorspace) || (image_info->colorspace == Rec709YCbCrColorspace)) jpeg_info.out_color_space=JCS_YCbCr; /* Set image resolution. */ units=0; if ((jpeg_info.saw_JFIF_marker != 0) && (jpeg_info.X_density != 1) && (jpeg_info.Y_density != 1)) { image->x_resolution=(double) jpeg_info.X_density; image->y_resolution=(double) jpeg_info.Y_density; units=(size_t) jpeg_info.density_unit; } if (units == 1) image->units=PixelsPerInchResolution; if (units == 2) image->units=PixelsPerCentimeterResolution; number_pixels=(MagickSizeType) image->columns*image->rows; option=GetImageOption(image_info,"jpeg:size"); if ((option != (const char *) NULL) && (jpeg_info.out_color_space != JCS_YCbCr)) { double scale_factor; GeometryInfo geometry_info; MagickStatusType flags; /* Scale the image. */ flags=ParseGeometry(option,&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; jpeg_calc_output_dimensions(&jpeg_info); image->magick_columns=jpeg_info.output_width; image->magick_rows=jpeg_info.output_height; scale_factor=1.0; if (geometry_info.rho != 0.0) scale_factor=jpeg_info.output_width/geometry_info.rho; if ((geometry_info.sigma != 0.0) && (scale_factor > (jpeg_info.output_height/geometry_info.sigma))) scale_factor=jpeg_info.output_height/geometry_info.sigma; jpeg_info.scale_num=1U; jpeg_info.scale_denom=(unsigned int) scale_factor; jpeg_calc_output_dimensions(&jpeg_info); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Scale factor: %.20g",(double) scale_factor); } #if (JPEG_LIB_VERSION >= 61) && defined(D_PROGRESSIVE_SUPPORTED) #if defined(D_LOSSLESS_SUPPORTED) image->interlace=jpeg_info.process == JPROC_PROGRESSIVE ? JPEGInterlace : NoInterlace; image->compression=jpeg_info.process == JPROC_LOSSLESS ? LosslessJPEGCompression : JPEGCompression; if (jpeg_info.data_precision > 8) (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "12-bit JPEG not supported. Reducing pixel data to 8 bits","`%s'", image->filename); if (jpeg_info.data_precision == 16) jpeg_info.data_precision=12; #else image->interlace=jpeg_info.progressive_mode != 0 ? JPEGInterlace : NoInterlace; image->compression=JPEGCompression; #endif #else image->compression=JPEGCompression; image->interlace=JPEGInterlace; #endif option=GetImageOption(image_info,"jpeg:colors"); if (option != (const char *) NULL) { /* Let the JPEG library quantize for us. */ jpeg_info.quantize_colors=TRUE; jpeg_info.desired_number_of_colors=(int) StringToUnsignedLong(option); } option=GetImageOption(image_info,"jpeg:block-smoothing"); if (option != (const char *) NULL) jpeg_info.do_block_smoothing=IsStringTrue(option) != MagickFalse ? TRUE : FALSE; jpeg_info.dct_method=JDCT_FLOAT; option=GetImageOption(image_info,"jpeg:dct-method"); if (option != (const char *) NULL) switch (*option) { case 'D': case 'd': { if (LocaleCompare(option,"default") == 0) jpeg_info.dct_method=JDCT_DEFAULT; break; } case 'F': case 'f': { if (LocaleCompare(option,"fastest") == 0) jpeg_info.dct_method=JDCT_FASTEST; if (LocaleCompare(option,"float") == 0) jpeg_info.dct_method=JDCT_FLOAT; break; } case 'I': case 'i': { if (LocaleCompare(option,"ifast") == 0) jpeg_info.dct_method=JDCT_IFAST; if (LocaleCompare(option,"islow") == 0) jpeg_info.dct_method=JDCT_ISLOW; break; } } option=GetImageOption(image_info,"jpeg:fancy-upsampling"); if (option != (const char *) NULL) jpeg_info.do_fancy_upsampling=IsStringTrue(option) != MagickFalse ? TRUE : FALSE; (void) jpeg_start_decompress(&jpeg_info); image->columns=jpeg_info.output_width; image->rows=jpeg_info.output_height; image->depth=(size_t) jpeg_info.data_precision; switch (jpeg_info.out_color_space) { case JCS_RGB: default: { (void) SetImageColorspace(image,sRGBColorspace); break; } case JCS_GRAYSCALE: { (void) SetImageColorspace(image,GRAYColorspace); break; } case JCS_YCbCr: { (void) SetImageColorspace(image,YCbCrColorspace); break; } case JCS_CMYK: { (void) SetImageColorspace(image,CMYKColorspace); break; } } if (IsITUFaxImage(image) != MagickFalse) { (void) SetImageColorspace(image,LabColorspace); jpeg_info.out_color_space=JCS_YCbCr; } option=GetImageOption(image_info,"jpeg:colors"); if (option != (const char *) NULL) if (AcquireImageColormap(image,StringToUnsignedLong(option)) == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } if ((jpeg_info.output_components == 1) && (jpeg_info.quantize_colors == 0)) { size_t colors; colors=(size_t) GetQuantumRange(image->depth)+1; if (AcquireImageColormap(image,colors) == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } } if (image->debug != MagickFalse) { if (image->interlace != NoInterlace) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Interlace: progressive"); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Interlace: nonprogressive"); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Data precision: %d", (int) jpeg_info.data_precision); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Geometry: %dx%d", (int) jpeg_info.output_width,(int) jpeg_info.output_height); } JPEGSetImageQuality(&jpeg_info,image); JPEGSetImageSamplingFactor(&jpeg_info,image); (void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double) jpeg_info.out_color_space); (void) SetImageProperty(image,"jpeg:colorspace",value); if (image_info->ping != MagickFalse) { jpeg_destroy_decompress(&jpeg_info); (void) CloseBlob(image); return(GetFirstImageInList(image)); } status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { jpeg_destroy_decompress(&jpeg_info); InheritException(exception,&image->exception); return(DestroyImageList(image)); } if ((jpeg_info.output_components != 1) && (jpeg_info.output_components != 3) && (jpeg_info.output_components != 4)) { jpeg_destroy_decompress(&jpeg_info); ThrowReaderException(CorruptImageError,"ImageTypeNotSupported"); } memory_info=AcquireVirtualMemory((size_t) image->columns, jpeg_info.output_components*sizeof(*jpeg_pixels)); if (memory_info == (MemoryInfo *) NULL) { jpeg_destroy_decompress(&jpeg_info); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } jpeg_pixels=(JSAMPLE *) GetVirtualMemoryBlob(memory_info); /* Convert JPEG pixels to pixel packets. */ if (setjmp(error_manager.error_recovery) != 0) { if (memory_info != (MemoryInfo *) NULL) memory_info=RelinquishVirtualMemory(memory_info); jpeg_destroy_decompress(&jpeg_info); (void) CloseBlob(image); number_pixels=(MagickSizeType) image->columns*image->rows; if (number_pixels != 0) return(GetFirstImageInList(image)); return(DestroyImage(image)); } if (jpeg_info.quantize_colors != 0) { image->colors=(size_t) jpeg_info.actual_number_of_colors; if (jpeg_info.out_color_space == JCS_GRAYSCALE) for (i=0; i < (ssize_t) image->colors; i++) { image->colormap[i].red=ScaleCharToQuantum(jpeg_info.colormap[0][i]); image->colormap[i].green=image->colormap[i].red; image->colormap[i].blue=image->colormap[i].red; image->colormap[i].opacity=OpaqueOpacity; } else for (i=0; i < (ssize_t) image->colors; i++) { image->colormap[i].red=ScaleCharToQuantum(jpeg_info.colormap[0][i]); image->colormap[i].green=ScaleCharToQuantum(jpeg_info.colormap[1][i]); image->colormap[i].blue=ScaleCharToQuantum(jpeg_info.colormap[2][i]); image->colormap[i].opacity=OpaqueOpacity; } } scanline[0]=(JSAMPROW) jpeg_pixels; for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (jpeg_read_scanlines(&jpeg_info,scanline,1) != 1) { (void) ThrowMagickException(exception,GetMagickModule(), CorruptImageWarning,"SkipToSyncByte","`%s'",image->filename); continue; } p=jpeg_pixels; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); if (jpeg_info.data_precision > 8) { unsigned short scale; scale=65535/(unsigned short) GetQuantumRange((size_t) jpeg_info.data_precision); if (jpeg_info.output_components == 1) for (x=0; x < (ssize_t) image->columns; x++) { size_t pixel; pixel=(size_t) (scale*GETJSAMPLE(*p)); index=ConstrainColormapIndex(image,pixel); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); p++; q++; } else if (image->colorspace != CMYKColorspace) for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ScaleShortToQuantum((unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelGreen(q,ScaleShortToQuantum((unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelBlue(q,ScaleShortToQuantum((unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelOpacity(q,OpaqueOpacity); q++; } else for (x=0; x < (ssize_t) image->columns; x++) { SetPixelCyan(q,QuantumRange-ScaleShortToQuantum( (unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelMagenta(q,QuantumRange-ScaleShortToQuantum( (unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelYellow(q,QuantumRange-ScaleShortToQuantum( (unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelBlack(indexes+x,QuantumRange-ScaleShortToQuantum( (unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelOpacity(q,OpaqueOpacity); q++; } } else if (jpeg_info.output_components == 1) for (x=0; x < (ssize_t) image->columns; x++) { index=ConstrainColormapIndex(image,(size_t) GETJSAMPLE(*p)); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); p++; q++; } else if (image->colorspace != CMYKColorspace) for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelOpacity(q,OpaqueOpacity); q++; } else for (x=0; x < (ssize_t) image->columns; x++) { SetPixelCyan(q,QuantumRange-ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelMagenta(q,QuantumRange-ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelYellow(q,QuantumRange-ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelBlack(indexes+x,QuantumRange-ScaleCharToQuantum( (unsigned char) GETJSAMPLE(*p++))); SetPixelOpacity(q,OpaqueOpacity); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) { jpeg_abort_decompress(&jpeg_info); break; } } if (status != MagickFalse) { error_manager.finished=MagickTrue; if (setjmp(error_manager.error_recovery) == 0) (void) jpeg_finish_decompress(&jpeg_info); } /* Free jpeg resources. */ jpeg_destroy_decompress(&jpeg_info); memory_info=RelinquishVirtualMemory(memory_info); (void) CloseBlob(image); return(GetFirstImageInList(image)); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r J P E G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterJPEGImage() adds properties for the JPEG image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterJPEGImage method is: % % size_t RegisterJPEGImage(void) % */ ModuleExport size_t RegisterJPEGImage(void) { char version[MaxTextExtent]; MagickInfo *entry; static const char description[] = "Joint Photographic Experts Group JFIF format"; *version='\0'; #if defined(JPEG_LIB_VERSION) (void) FormatLocaleString(version,MaxTextExtent,"%d",JPEG_LIB_VERSION); #endif entry=SetMagickInfo("JPE"); #if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION) entry->thread_support=NoThreadSupport; #endif #if defined(MAGICKCORE_JPEG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadJPEGImage; entry->encoder=(EncodeImageHandler *) WriteJPEGImage; #endif entry->magick=(IsImageFormatHandler *) IsJPEG; entry->adjoin=MagickFalse; entry->seekable_stream=MagickTrue; entry->description=ConstantString(description); if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("image/jpeg"); entry->module=ConstantString("JPEG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("JPEG"); #if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION) entry->thread_support=NoThreadSupport; #endif #if defined(MAGICKCORE_JPEG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadJPEGImage; entry->encoder=(EncodeImageHandler *) WriteJPEGImage; #endif entry->magick=(IsImageFormatHandler *) IsJPEG; entry->adjoin=MagickFalse; entry->seekable_stream=MagickTrue; entry->description=ConstantString(description); if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("image/jpeg"); entry->module=ConstantString("JPEG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("JPG"); #if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION) entry->thread_support=NoThreadSupport; #endif #if defined(MAGICKCORE_JPEG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadJPEGImage; entry->encoder=(EncodeImageHandler *) WriteJPEGImage; #endif entry->adjoin=MagickFalse; entry->seekable_stream=MagickTrue; entry->description=ConstantString(description); if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("image/jpeg"); entry->module=ConstantString("JPEG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("JPS"); #if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION) entry->thread_support=NoThreadSupport; #endif #if defined(MAGICKCORE_JPEG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadJPEGImage; entry->encoder=(EncodeImageHandler *) WriteJPEGImage; #endif entry->adjoin=MagickFalse; entry->seekable_stream=MagickTrue; entry->description=ConstantString(description); if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("image/jpeg"); entry->module=ConstantString("JPEG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PJPEG"); #if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION) entry->thread_support=NoThreadSupport; #endif #if defined(MAGICKCORE_JPEG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadJPEGImage; entry->encoder=(EncodeImageHandler *) WriteJPEGImage; #endif entry->adjoin=MagickFalse; entry->seekable_stream=MagickTrue; entry->description=ConstantString(description); if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("image/jpeg"); entry->module=ConstantString("JPEG"); (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r J P E G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterJPEGImage() removes format registrations made by the % JPEG module from the list of supported formats. % % The format of the UnregisterJPEGImage method is: % % UnregisterJPEGImage(void) % */ ModuleExport void UnregisterJPEGImage(void) { (void) UnregisterMagickInfo("PJPG"); (void) UnregisterMagickInfo("JPS"); (void) UnregisterMagickInfo("JPG"); (void) UnregisterMagickInfo("JPG"); (void) UnregisterMagickInfo("JPEG"); (void) UnregisterMagickInfo("JPE"); } #if defined(MAGICKCORE_JPEG_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e J P E G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteJPEGImage() writes a JPEG image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the WriteJPEGImage method is: % % MagickBooleanType WriteJPEGImage(const ImageInfo *image_info, % Image *image) % % A description of each parameter follows: % % o image_info: the image info. % % o jpeg_image: The image. % % */ static QuantizationTable *DestroyQuantizationTable(QuantizationTable *table) { assert(table != (QuantizationTable *) NULL); if (table->slot != (char *) NULL) table->slot=DestroyString(table->slot); if (table->description != (char *) NULL) table->description=DestroyString(table->description); if (table->levels != (unsigned int *) NULL) table->levels=(unsigned int *) RelinquishMagickMemory(table->levels); table=(QuantizationTable *) RelinquishMagickMemory(table); return(table); } static boolean EmptyOutputBuffer(j_compress_ptr cinfo) { DestinationManager *destination; destination=(DestinationManager *) cinfo->dest; destination->manager.free_in_buffer=(size_t) WriteBlob(destination->image, MaxBufferExtent,destination->buffer); if (destination->manager.free_in_buffer != MaxBufferExtent) ERREXIT(cinfo,JERR_FILE_WRITE); destination->manager.next_output_byte=destination->buffer; return(TRUE); } static QuantizationTable *GetQuantizationTable(const char *filename, const char *slot,ExceptionInfo *exception) { char *p, *xml; const char *attribute, *content; double value; register ssize_t i; QuantizationTable *table; size_t length; ssize_t j; XMLTreeInfo *description, *levels, *quantization_tables, *table_iterator; (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading quantization tables \"%s\" ...",filename); table=(QuantizationTable *) NULL; xml=FileToString(filename,~0UL,exception); if (xml == (char *) NULL) return(table); quantization_tables=NewXMLTree(xml,exception); if (quantization_tables == (XMLTreeInfo *) NULL) { xml=DestroyString(xml); return(table); } for (table_iterator=GetXMLTreeChild(quantization_tables,"table"); table_iterator != (XMLTreeInfo *) NULL; table_iterator=GetNextXMLTreeTag(table_iterator)) { attribute=GetXMLTreeAttribute(table_iterator,"slot"); if ((attribute != (char *) NULL) && (LocaleCompare(slot,attribute) == 0)) break; attribute=GetXMLTreeAttribute(table_iterator,"alias"); if ((attribute != (char *) NULL) && (LocaleCompare(slot,attribute) == 0)) break; } if (table_iterator == (XMLTreeInfo *) NULL) { xml=DestroyString(xml); return(table); } description=GetXMLTreeChild(table_iterator,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement","<description>, slot \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); xml=DestroyString(xml); return(table); } levels=GetXMLTreeChild(table_iterator,"levels"); if (levels == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement","<levels>, slot \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); xml=DestroyString(xml); return(table); } table=(QuantizationTable *) AcquireMagickMemory(sizeof(*table)); if (table == (QuantizationTable *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAcquireQuantizationTable"); table->slot=(char *) NULL; table->description=(char *) NULL; table->levels=(unsigned int *) NULL; attribute=GetXMLTreeAttribute(table_iterator,"slot"); if (attribute != (char *) NULL) table->slot=ConstantString(attribute); content=GetXMLTreeContent(description); if (content != (char *) NULL) table->description=ConstantString(content); attribute=GetXMLTreeAttribute(levels,"width"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute","<levels width>, slot \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } table->width=StringToUnsignedLong(attribute); if (table->width == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute","<levels width>, table \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } attribute=GetXMLTreeAttribute(levels,"height"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute","<levels height>, table \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } table->height=StringToUnsignedLong(attribute); if (table->height == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute","<levels height>, table \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } attribute=GetXMLTreeAttribute(levels,"divisor"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute","<levels divisor>, table \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } table->divisor=InterpretLocaleValue(attribute,(char **) NULL); if (table->divisor == 0.0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute","<levels divisor>, table \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } content=GetXMLTreeContent(levels); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent","<levels>, table \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } length=(size_t) table->width*table->height; if (length < 64) length=64; table->levels=(unsigned int *) AcquireQuantumMemory(length, sizeof(*table->levels)); if (table->levels == (unsigned int *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAcquireQuantizationTable"); for (i=0; i < (ssize_t) (table->width*table->height); i++) { table->levels[i]=(unsigned int) (InterpretLocaleValue(content,&p)/ table->divisor+0.5); while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; content=p; } value=InterpretLocaleValue(content,&p); (void) value; if (p != content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent","<level> too many values, table \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } for (j=i; j < 64; j++) table->levels[j]=table->levels[j-1]; quantization_tables=DestroyXMLTree(quantization_tables); xml=DestroyString(xml); return(table); } static void InitializeDestination(j_compress_ptr cinfo) { DestinationManager *destination; destination=(DestinationManager *) cinfo->dest; destination->buffer=(JOCTET *) (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo,JPOOL_IMAGE,MaxBufferExtent*sizeof(JOCTET)); destination->manager.next_output_byte=destination->buffer; destination->manager.free_in_buffer=MaxBufferExtent; } static void TerminateDestination(j_compress_ptr cinfo) { DestinationManager *destination; destination=(DestinationManager *) cinfo->dest; if ((MaxBufferExtent-(int) destination->manager.free_in_buffer) > 0) { ssize_t count; count=WriteBlob(destination->image,MaxBufferExtent- destination->manager.free_in_buffer,destination->buffer); if (count != (ssize_t) (MaxBufferExtent-destination->manager.free_in_buffer)) ERREXIT(cinfo,JERR_FILE_WRITE); } } static void WriteProfile(j_compress_ptr jpeg_info,Image *image) { const char *name; const StringInfo *profile; MagickBooleanType iptc; register ssize_t i; size_t length, tag_length; StringInfo *custom_profile; /* Save image profile as a APP marker. */ iptc=MagickFalse; custom_profile=AcquireStringInfo(65535L); ResetImageProfileIterator(image); for (name=GetNextImageProfile(image); name != (const char *) NULL; ) { register unsigned char *p; profile=GetImageProfile(image,name); p=GetStringInfoDatum(custom_profile); if (LocaleCompare(name,"EXIF") == 0) { length=GetStringInfoLength(profile); if (length > 65533L) { (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderWarning,"ExifProfileSizeExceedsLimit","`%s'", image->filename); length=65533L; } jpeg_write_marker(jpeg_info,XML_MARKER,GetStringInfoDatum(profile), (unsigned int) length); } if (LocaleCompare(name,"ICC") == 0) { register unsigned char *p; tag_length=strlen(ICC_PROFILE); p=GetStringInfoDatum(custom_profile); (void) CopyMagickMemory(p,ICC_PROFILE,tag_length); p[tag_length]='\0'; for (i=0; i < (ssize_t) GetStringInfoLength(profile); i+=65519L) { length=MagickMin(GetStringInfoLength(profile)-i,65519L); p[12]=(unsigned char) ((i/65519L)+1); p[13]=(unsigned char) (GetStringInfoLength(profile)/65519L+1); (void) CopyMagickMemory(p+tag_length+3,GetStringInfoDatum(profile)+i, length); jpeg_write_marker(jpeg_info,ICC_MARKER,GetStringInfoDatum( custom_profile),(unsigned int) (length+tag_length+3)); } } if (((LocaleCompare(name,"IPTC") == 0) || (LocaleCompare(name,"8BIM") == 0)) && (iptc == MagickFalse)) { size_t roundup; iptc=MagickTrue; for (i=0; i < (ssize_t) GetStringInfoLength(profile); i+=65500L) { length=MagickMin(GetStringInfoLength(profile)-i,65500L); roundup=(size_t) (length & 0x01); if (LocaleNCompare((char *) GetStringInfoDatum(profile),"8BIM",4) == 0) { (void) memcpy(p,"Photoshop 3.0 ",14); tag_length=14; } else { (void) CopyMagickMemory(p,"Photoshop 3.0 8BIM\04\04\0\0\0\0",24); tag_length=26; p[24]=(unsigned char) (length >> 8); p[25]=(unsigned char) (length & 0xff); } p[13]=0x00; (void) memcpy(p+tag_length,GetStringInfoDatum(profile)+i,length); if (roundup != 0) p[length+tag_length]='\0'; jpeg_write_marker(jpeg_info,IPTC_MARKER,GetStringInfoDatum( custom_profile),(unsigned int) (length+tag_length+roundup)); } } if (LocaleCompare(name,"XMP") == 0) { StringInfo *xmp_profile; /* Add namespace to XMP profile. */ xmp_profile=StringToStringInfo("http://ns.adobe.com/xap/1.0/ "); if (xmp_profile != (StringInfo *) NULL) { if (profile != (StringInfo *) NULL) ConcatenateStringInfo(xmp_profile,profile); GetStringInfoDatum(xmp_profile)[28]='\0'; for (i=0; i < (ssize_t) GetStringInfoLength(xmp_profile); i+=65533L) { length=MagickMin(GetStringInfoLength(xmp_profile)-i,65533L); jpeg_write_marker(jpeg_info,XML_MARKER, GetStringInfoDatum(xmp_profile)+i,(unsigned int) length); } xmp_profile=DestroyStringInfo(xmp_profile); } } (void) LogMagickEvent(CoderEvent,GetMagickModule(), "%s profile: %.20g bytes",name,(double) GetStringInfoLength(profile)); name=GetNextImageProfile(image); } custom_profile=DestroyStringInfo(custom_profile); } static void JPEGDestinationManager(j_compress_ptr cinfo,Image * image) { DestinationManager *destination; cinfo->dest=(struct jpeg_destination_mgr *) (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo,JPOOL_IMAGE,sizeof(DestinationManager)); destination=(DestinationManager *) cinfo->dest; destination->manager.init_destination=InitializeDestination; destination->manager.empty_output_buffer=EmptyOutputBuffer; destination->manager.term_destination=TerminateDestination; destination->image=image; } static char **SamplingFactorToList(const char *text) { char **textlist; register char *q; register const char *p; register ssize_t i; if (text == (char *) NULL) return((char **) NULL); /* Convert string to an ASCII list. */ textlist=(char **) AcquireQuantumMemory((size_t) MAX_COMPONENTS, sizeof(*textlist)); if (textlist == (char **) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToConvertText"); p=text; for (i=0; i < (ssize_t) MAX_COMPONENTS; i++) { for (q=(char *) p; *q != '\0'; q++) if (*q == ',') break; textlist[i]=(char *) AcquireQuantumMemory((size_t) (q-p)+MaxTextExtent, sizeof(*textlist[i])); if (textlist[i] == (char *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToConvertText"); (void) CopyMagickString(textlist[i],p,(size_t) (q-p+1)); if (*q == '\r') q++; if (*q == '\0') break; p=q+1; } for (i++; i < (ssize_t) MAX_COMPONENTS; i++) textlist[i]=ConstantString("1x1"); return(textlist); } static MagickBooleanType WriteJPEGImage(const ImageInfo *image_info, Image *image) { const char *option, *sampling_factor, *value; ErrorManager error_manager; ExceptionInfo *exception; Image *volatile volatile_image; int colorspace, quality; JSAMPLE *volatile jpeg_pixels; JSAMPROW scanline[1]; MagickBooleanType status; MemoryInfo *memory_info; register JSAMPLE *q; register ssize_t i; ssize_t y; struct jpeg_compress_struct jpeg_info; struct jpeg_error_mgr jpeg_error; unsigned short scale; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=(&image->exception); if ((LocaleCompare(image_info->magick,"JPS") == 0) && (image->next != (Image *) NULL)) image=AppendImages(image,MagickFalse,exception); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); /* Initialize JPEG parameters. */ (void) ResetMagickMemory(&error_manager,0,sizeof(error_manager)); (void) ResetMagickMemory(&jpeg_info,0,sizeof(jpeg_info)); (void) ResetMagickMemory(&jpeg_error,0,sizeof(jpeg_error)); volatile_image=image; jpeg_info.client_data=(void *) volatile_image; jpeg_info.err=jpeg_std_error(&jpeg_error); jpeg_info.err->emit_message=(void (*)(j_common_ptr,int)) JPEGWarningHandler; jpeg_info.err->error_exit=(void (*)(j_common_ptr)) JPEGErrorHandler; error_manager.image=volatile_image; memory_info=(MemoryInfo *) NULL; if (setjmp(error_manager.error_recovery) != 0) { jpeg_destroy_compress(&jpeg_info); (void) CloseBlob(volatile_image); return(MagickFalse); } jpeg_info.client_data=(void *) &error_manager; jpeg_create_compress(&jpeg_info); JPEGDestinationManager(&jpeg_info,image); if ((image->columns != (unsigned int) image->columns) || (image->rows != (unsigned int) image->rows)) ThrowWriterException(ImageError,"WidthOrHeightExceedsLimit"); jpeg_info.image_width=(unsigned int) image->columns; jpeg_info.image_height=(unsigned int) image->rows; jpeg_info.input_components=3; jpeg_info.data_precision=8; jpeg_info.in_color_space=JCS_RGB; switch (image->colorspace) { case CMYKColorspace: { jpeg_info.input_components=4; jpeg_info.in_color_space=JCS_CMYK; break; } case YCbCrColorspace: case Rec601YCbCrColorspace: case Rec709YCbCrColorspace: { jpeg_info.in_color_space=JCS_YCbCr; break; } case GRAYColorspace: case Rec601LumaColorspace: case Rec709LumaColorspace: { if (image_info->type == TrueColorType) break; jpeg_info.input_components=1; jpeg_info.in_color_space=JCS_GRAYSCALE; break; } default: { (void) TransformImageColorspace(image,sRGBColorspace); if (image_info->type == TrueColorType) break; if (SetImageGray(image,&image->exception) != MagickFalse) { jpeg_info.input_components=1; jpeg_info.in_color_space=JCS_GRAYSCALE; } break; } } jpeg_set_defaults(&jpeg_info); if (jpeg_info.in_color_space == JCS_CMYK) jpeg_set_colorspace(&jpeg_info,JCS_YCCK); if ((jpeg_info.data_precision != 12) && (image->depth <= 8)) jpeg_info.data_precision=8; else jpeg_info.data_precision=BITS_IN_JSAMPLE; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Image resolution: %.20g,%.20g",image->x_resolution,image->y_resolution); if ((image->x_resolution != 0.0) && (image->y_resolution != 0.0)) { /* Set image resolution. */ jpeg_info.write_JFIF_header=TRUE; jpeg_info.X_density=(UINT16) image->x_resolution; jpeg_info.Y_density=(UINT16) image->y_resolution; /* Set image resolution units. */ if (image->units == PixelsPerInchResolution) jpeg_info.density_unit=(UINT8) 1; if (image->units == PixelsPerCentimeterResolution) jpeg_info.density_unit=(UINT8) 2; } jpeg_info.dct_method=JDCT_FLOAT; option=GetImageOption(image_info,"jpeg:dct-method"); if (option != (const char *) NULL) switch (*option) { case 'D': case 'd': { if (LocaleCompare(option,"default") == 0) jpeg_info.dct_method=JDCT_DEFAULT; break; } case 'F': case 'f': { if (LocaleCompare(option,"fastest") == 0) jpeg_info.dct_method=JDCT_FASTEST; if (LocaleCompare(option,"float") == 0) jpeg_info.dct_method=JDCT_FLOAT; break; } case 'I': case 'i': { if (LocaleCompare(option,"ifast") == 0) jpeg_info.dct_method=JDCT_IFAST; if (LocaleCompare(option,"islow") == 0) jpeg_info.dct_method=JDCT_ISLOW; break; } } option=GetImageOption(image_info,"jpeg:optimize-coding"); if (option != (const char *) NULL) jpeg_info.optimize_coding=IsStringTrue(option) != MagickFalse ? TRUE : FALSE; else { MagickSizeType length; length=(MagickSizeType) jpeg_info.input_components*image->columns* image->rows*sizeof(JSAMPLE); if (length == (MagickSizeType) ((size_t) length)) { /* Perform optimization only if available memory resources permit it. */ status=AcquireMagickResource(MemoryResource,length); RelinquishMagickResource(MemoryResource,length); jpeg_info.optimize_coding=status == MagickFalse ? FALSE : TRUE; } } #if (JPEG_LIB_VERSION >= 61) && defined(C_PROGRESSIVE_SUPPORTED) if ((LocaleCompare(image_info->magick,"PJPEG") == 0) || (image_info->interlace != NoInterlace)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Interlace: progressive"); jpeg_simple_progression(&jpeg_info); } else if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Interlace: non-progressive"); #else if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Interlace: nonprogressive"); #endif quality=92; if ((image_info->compression != LosslessJPEGCompression) && (image->quality <= 100)) { if (image->quality != UndefinedCompressionQuality) quality=(int) image->quality; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Quality: %.20g", (double) image->quality); } else { #if !defined(C_LOSSLESS_SUPPORTED) quality=100; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Quality: 100"); #else if (image->quality < 100) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderWarning,"LosslessToLossyJPEGConversion","`%s'",image->filename); else { int point_transform, predictor; predictor=image->quality/100; /* range 1-7 */ point_transform=image->quality % 20; /* range 0-15 */ jpeg_simple_lossless(&jpeg_info,predictor,point_transform); if (image->debug != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Compression: lossless"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Predictor: %d",predictor); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Point Transform: %d",point_transform); } } #endif } option=GetImageOption(image_info,"jpeg:extent"); if (option != (const char *) NULL) { Image *jpeg_image; ImageInfo *jpeg_info; jpeg_info=CloneImageInfo(image_info); jpeg_info->blob=NULL; jpeg_image=CloneImage(image,0,0,MagickTrue,&image->exception); if (jpeg_image != (Image *) NULL) { MagickSizeType extent; size_t maximum, minimum; /* Search for compression quality that does not exceed image extent. */ jpeg_image->quality=0; extent=(MagickSizeType) SiPrefixToDoubleInterval(option,100.0); (void) DeleteImageOption(jpeg_info,"jpeg:extent"); (void) DeleteImageArtifact(jpeg_image,"jpeg:extent"); maximum=image_info->quality; if (maximum < 2) maximum=101; for (minimum=2; minimum < maximum; ) { (void) AcquireUniqueFilename(jpeg_image->filename); jpeg_image->quality=minimum+(maximum-minimum+1)/2; (void) WriteJPEGImage(jpeg_info,jpeg_image); if (GetBlobSize(jpeg_image) <= extent) minimum=jpeg_image->quality+1; else maximum=jpeg_image->quality-1; (void) RelinquishUniqueFileResource(jpeg_image->filename); } quality=(int) minimum-1; jpeg_image=DestroyImage(jpeg_image); } jpeg_info=DestroyImageInfo(jpeg_info); } jpeg_set_quality(&jpeg_info,quality,TRUE); #if (JPEG_LIB_VERSION >= 70) option=GetImageOption(image_info,"quality"); if (option != (const char *) NULL) { GeometryInfo geometry_info; int flags; /* Set quality scaling for luminance and chrominance separately. */ flags=ParseGeometry(option,&geometry_info); if (((flags & RhoValue) != 0) && ((flags & SigmaValue) != 0)) { jpeg_info.q_scale_factor[0]=jpeg_quality_scaling((int) (geometry_info.rho+0.5)); jpeg_info.q_scale_factor[1]=jpeg_quality_scaling((int) (geometry_info.sigma+0.5)); jpeg_default_qtables(&jpeg_info,TRUE); } } #endif colorspace=jpeg_info.in_color_space; value=GetImageOption(image_info,"jpeg:colorspace"); if (value == (char *) NULL) value=GetImageProperty(image,"jpeg:colorspace"); if (value != (char *) NULL) colorspace=StringToInteger(value); sampling_factor=(const char *) NULL; if (colorspace == jpeg_info.in_color_space) { value=GetImageOption(image_info,"jpeg:sampling-factor"); if (value == (char *) NULL) value=GetImageProperty(image,"jpeg:sampling-factor"); if (value != (char *) NULL) { sampling_factor=value; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Input sampling-factors=%s",sampling_factor); } } if (image_info->sampling_factor != (char *) NULL) sampling_factor=image_info->sampling_factor; if (sampling_factor == (const char *) NULL) { if (quality >= 90) for (i=0; i < MAX_COMPONENTS; i++) { jpeg_info.comp_info[i].h_samp_factor=1; jpeg_info.comp_info[i].v_samp_factor=1; } } else { char **factors; GeometryInfo geometry_info; MagickStatusType flags; /* Set sampling factor. */ i=0; factors=SamplingFactorToList(sampling_factor); if (factors != (char **) NULL) { for (i=0; i < MAX_COMPONENTS; i++) { if (factors[i] == (char *) NULL) break; flags=ParseGeometry(factors[i],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; jpeg_info.comp_info[i].h_samp_factor=(int) geometry_info.rho; jpeg_info.comp_info[i].v_samp_factor=(int) geometry_info.sigma; factors[i]=(char *) RelinquishMagickMemory(factors[i]); } factors=(char **) RelinquishMagickMemory(factors); } for ( ; i < MAX_COMPONENTS; i++) { jpeg_info.comp_info[i].h_samp_factor=1; jpeg_info.comp_info[i].v_samp_factor=1; } } option=GetImageOption(image_info,"jpeg:q-table"); if (option != (const char *) NULL) { QuantizationTable *table; /* Custom quantization tables. */ table=GetQuantizationTable(option,"0",&image->exception); if (table != (QuantizationTable *) NULL) { for (i=0; i < MAX_COMPONENTS; i++) jpeg_info.comp_info[i].quant_tbl_no=0; jpeg_add_quant_table(&jpeg_info,0,table->levels, jpeg_quality_scaling(quality),0); table=DestroyQuantizationTable(table); } table=GetQuantizationTable(option,"1",&image->exception); if (table != (QuantizationTable *) NULL) { for (i=1; i < MAX_COMPONENTS; i++) jpeg_info.comp_info[i].quant_tbl_no=1; jpeg_add_quant_table(&jpeg_info,1,table->levels, jpeg_quality_scaling(quality),0); table=DestroyQuantizationTable(table); } table=GetQuantizationTable(option,"2",&image->exception); if (table != (QuantizationTable *) NULL) { for (i=2; i < MAX_COMPONENTS; i++) jpeg_info.comp_info[i].quant_tbl_no=2; jpeg_add_quant_table(&jpeg_info,2,table->levels, jpeg_quality_scaling(quality),0); table=DestroyQuantizationTable(table); } table=GetQuantizationTable(option,"3",&image->exception); if (table != (QuantizationTable *) NULL) { for (i=3; i < MAX_COMPONENTS; i++) jpeg_info.comp_info[i].quant_tbl_no=3; jpeg_add_quant_table(&jpeg_info,3,table->levels, jpeg_quality_scaling(quality),0); table=DestroyQuantizationTable(table); } } jpeg_start_compress(&jpeg_info,TRUE); if (image->debug != MagickFalse) { if (image->storage_class == PseudoClass) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Storage class: PseudoClass"); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Storage class: DirectClass"); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Depth: %.20g", (double) image->depth); if (image->colors != 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Number of colors: %.20g",(double) image->colors); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Number of colors: unspecified"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "JPEG data precision: %d",(int) jpeg_info.data_precision); switch (image->colorspace) { case CMYKColorspace: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Storage class: DirectClass"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Colorspace: CMYK"); break; } case YCbCrColorspace: case Rec601YCbCrColorspace: case Rec709YCbCrColorspace: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Colorspace: YCbCr"); break; } default: break; } switch (image->colorspace) { case CMYKColorspace: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Colorspace: CMYK"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Sampling factors: %dx%d,%dx%d,%dx%d,%dx%d", jpeg_info.comp_info[0].h_samp_factor, jpeg_info.comp_info[0].v_samp_factor, jpeg_info.comp_info[1].h_samp_factor, jpeg_info.comp_info[1].v_samp_factor, jpeg_info.comp_info[2].h_samp_factor, jpeg_info.comp_info[2].v_samp_factor, jpeg_info.comp_info[3].h_samp_factor, jpeg_info.comp_info[3].v_samp_factor); break; } case GRAYColorspace: case Rec601LumaColorspace: case Rec709LumaColorspace: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Colorspace: GRAY"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Sampling factors: %dx%d",jpeg_info.comp_info[0].h_samp_factor, jpeg_info.comp_info[0].v_samp_factor); break; } case sRGBColorspace: case RGBColorspace: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Image colorspace is RGB"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Sampling factors: %dx%d,%dx%d,%dx%d", jpeg_info.comp_info[0].h_samp_factor, jpeg_info.comp_info[0].v_samp_factor, jpeg_info.comp_info[1].h_samp_factor, jpeg_info.comp_info[1].v_samp_factor, jpeg_info.comp_info[2].h_samp_factor, jpeg_info.comp_info[2].v_samp_factor); break; } case YCbCrColorspace: case Rec601YCbCrColorspace: case Rec709YCbCrColorspace: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Colorspace: YCbCr"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Sampling factors: %dx%d,%dx%d,%dx%d", jpeg_info.comp_info[0].h_samp_factor, jpeg_info.comp_info[0].v_samp_factor, jpeg_info.comp_info[1].h_samp_factor, jpeg_info.comp_info[1].v_samp_factor, jpeg_info.comp_info[2].h_samp_factor, jpeg_info.comp_info[2].v_samp_factor); break; } default: { (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Colorspace: %d", image->colorspace); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Sampling factors: %dx%d,%dx%d,%dx%d,%dx%d", jpeg_info.comp_info[0].h_samp_factor, jpeg_info.comp_info[0].v_samp_factor, jpeg_info.comp_info[1].h_samp_factor, jpeg_info.comp_info[1].v_samp_factor, jpeg_info.comp_info[2].h_samp_factor, jpeg_info.comp_info[2].v_samp_factor, jpeg_info.comp_info[3].h_samp_factor, jpeg_info.comp_info[3].v_samp_factor); break; } } } /* Write JPEG profiles. */ value=GetImageProperty(image,"comment"); if (value != (char *) NULL) for (i=0; i < (ssize_t) strlen(value); i+=65533L) jpeg_write_marker(&jpeg_info,JPEG_COM,(unsigned char *) value+i, (unsigned int) MagickMin((size_t) strlen(value+i),65533L)); if (image->profiles != (void *) NULL) WriteProfile(&jpeg_info,image); /* Convert MIFF to JPEG raster pixels. */ memory_info=AcquireVirtualMemory((size_t) image->columns, jpeg_info.input_components*sizeof(*jpeg_pixels)); if (memory_info == (MemoryInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); jpeg_pixels=(JSAMPLE *) GetVirtualMemoryBlob(memory_info); if (setjmp(error_manager.error_recovery) != 0) { jpeg_destroy_compress(&jpeg_info); if (memory_info != (MemoryInfo *) NULL) memory_info=RelinquishVirtualMemory(memory_info); (void) CloseBlob(image); return(MagickFalse); } scanline[0]=(JSAMPROW) jpeg_pixels; scale=65535/(unsigned short) GetQuantumRange((size_t) jpeg_info.data_precision); if (scale == 0) scale=1; if (jpeg_info.data_precision <= 8) { if ((jpeg_info.in_color_space == JCS_RGB) || (jpeg_info.in_color_space == JCS_YCbCr)) for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; q=jpeg_pixels; for (x=0; x < (ssize_t) image->columns; x++) { *q++=(JSAMPLE) ScaleQuantumToChar(GetPixelRed(p)); *q++=(JSAMPLE) ScaleQuantumToChar(GetPixelGreen(p)); *q++=(JSAMPLE) ScaleQuantumToChar(GetPixelBlue(p)); p++; } (void) jpeg_write_scanlines(&jpeg_info,scanline,1); status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } else if (jpeg_info.in_color_space == JCS_GRAYSCALE) for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; q=jpeg_pixels; for (x=0; x < (ssize_t) image->columns; x++) { *q++=(JSAMPLE) ScaleQuantumToChar(ClampToQuantum( GetPixelLuma(image,p))); p++; } (void) jpeg_write_scanlines(&jpeg_info,scanline,1); status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } else for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; q=jpeg_pixels; indexes=GetVirtualIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { /* Convert DirectClass packets to contiguous CMYK scanlines. */ *q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange- GetPixelCyan(p)))); *q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange- GetPixelMagenta(p)))); *q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange- GetPixelYellow(p)))); *q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange- GetPixelBlack(indexes+x)))); p++; } (void) jpeg_write_scanlines(&jpeg_info,scanline,1); status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } else if (jpeg_info.in_color_space == JCS_GRAYSCALE) for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; q=jpeg_pixels; for (x=0; x < (ssize_t) image->columns; x++) { *q++=(JSAMPLE) (ScaleQuantumToShort(ClampToQuantum( GetPixelLuma(image,p)))/scale); p++; } (void) jpeg_write_scanlines(&jpeg_info,scanline,1); status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } else if ((jpeg_info.in_color_space == JCS_RGB) || (jpeg_info.in_color_space == JCS_YCbCr)) for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; q=jpeg_pixels; for (x=0; x < (ssize_t) image->columns; x++) { *q++=(JSAMPLE) (ScaleQuantumToShort(GetPixelRed(p))/scale); *q++=(JSAMPLE) (ScaleQuantumToShort(GetPixelGreen(p))/scale); *q++=(JSAMPLE) (ScaleQuantumToShort(GetPixelBlue(p))/scale); p++; } (void) jpeg_write_scanlines(&jpeg_info,scanline,1); status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } else for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; q=jpeg_pixels; indexes=GetVirtualIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { /* Convert DirectClass packets to contiguous CMYK scanlines. */ *q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange-GetPixelRed(p))/ scale); *q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange-GetPixelGreen(p))/ scale); *q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange-GetPixelBlue(p))/ scale); *q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange- GetPixelIndex(indexes+x))/scale); p++; } (void) jpeg_write_scanlines(&jpeg_info,scanline,1); status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } if (y == (ssize_t) image->rows) jpeg_finish_compress(&jpeg_info); /* Relinquish resources. */ jpeg_destroy_compress(&jpeg_info); memory_info=RelinquishVirtualMemory(memory_info); (void) CloseBlob(image); return(MagickTrue); } #endif
./CrossVul/dataset_final_sorted/CWE-20/c/good_2578_0
crossvul-cpp_data_good_5533_2
/* * Copyright (c) 2009-2010, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "redis.h" #ifdef HAVE_BACKTRACE #include <execinfo.h> #include <ucontext.h> #endif /* HAVE_BACKTRACE */ #include <time.h> #include <signal.h> #include <sys/wait.h> #include <errno.h> #include <assert.h> #include <ctype.h> #include <stdarg.h> #include <arpa/inet.h> #include <sys/stat.h> #include <fcntl.h> #include <sys/time.h> #include <sys/resource.h> #include <sys/uio.h> #include <limits.h> #include <float.h> #include <math.h> #include <pthread.h> #include <sys/resource.h> /* Our shared "common" objects */ struct sharedObjectsStruct shared; /* Global vars that are actally used as constants. The following double * values are used for double on-disk serialization, and are initialized * at runtime to avoid strange compiler optimizations. */ double R_Zero, R_PosInf, R_NegInf, R_Nan; /*================================= Globals ================================= */ /* Global vars */ struct redisServer server; /* server global state */ struct redisCommand *commandTable; struct redisCommand readonlyCommandTable[] = { {"get",getCommand,2,0,NULL,1,1,1}, {"set",setCommand,3,REDIS_CMD_DENYOOM,NULL,0,0,0}, {"setnx",setnxCommand,3,REDIS_CMD_DENYOOM,NULL,0,0,0}, {"setex",setexCommand,4,REDIS_CMD_DENYOOM,NULL,0,0,0}, {"append",appendCommand,3,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"strlen",strlenCommand,2,0,NULL,1,1,1}, {"del",delCommand,-2,0,NULL,0,0,0}, {"exists",existsCommand,2,0,NULL,1,1,1}, {"setbit",setbitCommand,4,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"getbit",getbitCommand,3,0,NULL,1,1,1}, {"setrange",setrangeCommand,4,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"getrange",getrangeCommand,4,0,NULL,1,1,1}, {"substr",getrangeCommand,4,0,NULL,1,1,1}, {"incr",incrCommand,2,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"decr",decrCommand,2,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"mget",mgetCommand,-2,0,NULL,1,-1,1}, {"rpush",rpushCommand,3,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"lpush",lpushCommand,3,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"rpushx",rpushxCommand,3,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"lpushx",lpushxCommand,3,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"linsert",linsertCommand,5,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"rpop",rpopCommand,2,0,NULL,1,1,1}, {"lpop",lpopCommand,2,0,NULL,1,1,1}, {"brpop",brpopCommand,-3,0,NULL,1,1,1}, {"brpoplpush",brpoplpushCommand,4,REDIS_CMD_DENYOOM,NULL,1,2,1}, {"blpop",blpopCommand,-3,0,NULL,1,1,1}, {"llen",llenCommand,2,0,NULL,1,1,1}, {"lindex",lindexCommand,3,0,NULL,1,1,1}, {"lset",lsetCommand,4,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"lrange",lrangeCommand,4,0,NULL,1,1,1}, {"ltrim",ltrimCommand,4,0,NULL,1,1,1}, {"lrem",lremCommand,4,0,NULL,1,1,1}, {"rpoplpush",rpoplpushCommand,3,REDIS_CMD_DENYOOM,NULL,1,2,1}, {"sadd",saddCommand,3,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"srem",sremCommand,3,0,NULL,1,1,1}, {"smove",smoveCommand,4,0,NULL,1,2,1}, {"sismember",sismemberCommand,3,0,NULL,1,1,1}, {"scard",scardCommand,2,0,NULL,1,1,1}, {"spop",spopCommand,2,0,NULL,1,1,1}, {"srandmember",srandmemberCommand,2,0,NULL,1,1,1}, {"sinter",sinterCommand,-2,REDIS_CMD_DENYOOM,NULL,1,-1,1}, {"sinterstore",sinterstoreCommand,-3,REDIS_CMD_DENYOOM,NULL,2,-1,1}, {"sunion",sunionCommand,-2,REDIS_CMD_DENYOOM,NULL,1,-1,1}, {"sunionstore",sunionstoreCommand,-3,REDIS_CMD_DENYOOM,NULL,2,-1,1}, {"sdiff",sdiffCommand,-2,REDIS_CMD_DENYOOM,NULL,1,-1,1}, {"sdiffstore",sdiffstoreCommand,-3,REDIS_CMD_DENYOOM,NULL,2,-1,1}, {"smembers",sinterCommand,2,0,NULL,1,1,1}, {"zadd",zaddCommand,4,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"zincrby",zincrbyCommand,4,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"zrem",zremCommand,3,0,NULL,1,1,1}, {"zremrangebyscore",zremrangebyscoreCommand,4,0,NULL,1,1,1}, {"zremrangebyrank",zremrangebyrankCommand,4,0,NULL,1,1,1}, {"zunionstore",zunionstoreCommand,-4,REDIS_CMD_DENYOOM,zunionInterBlockClientOnSwappedKeys,0,0,0}, {"zinterstore",zinterstoreCommand,-4,REDIS_CMD_DENYOOM,zunionInterBlockClientOnSwappedKeys,0,0,0}, {"zrange",zrangeCommand,-4,0,NULL,1,1,1}, {"zrangebyscore",zrangebyscoreCommand,-4,0,NULL,1,1,1}, {"zrevrangebyscore",zrevrangebyscoreCommand,-4,0,NULL,1,1,1}, {"zcount",zcountCommand,4,0,NULL,1,1,1}, {"zrevrange",zrevrangeCommand,-4,0,NULL,1,1,1}, {"zcard",zcardCommand,2,0,NULL,1,1,1}, {"zscore",zscoreCommand,3,0,NULL,1,1,1}, {"zrank",zrankCommand,3,0,NULL,1,1,1}, {"zrevrank",zrevrankCommand,3,0,NULL,1,1,1}, {"hset",hsetCommand,4,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"hsetnx",hsetnxCommand,4,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"hget",hgetCommand,3,0,NULL,1,1,1}, {"hmset",hmsetCommand,-4,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"hmget",hmgetCommand,-3,0,NULL,1,1,1}, {"hincrby",hincrbyCommand,4,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"hdel",hdelCommand,3,0,NULL,1,1,1}, {"hlen",hlenCommand,2,0,NULL,1,1,1}, {"hkeys",hkeysCommand,2,0,NULL,1,1,1}, {"hvals",hvalsCommand,2,0,NULL,1,1,1}, {"hgetall",hgetallCommand,2,0,NULL,1,1,1}, {"hexists",hexistsCommand,3,0,NULL,1,1,1}, {"incrby",incrbyCommand,3,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"decrby",decrbyCommand,3,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"getset",getsetCommand,3,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"mset",msetCommand,-3,REDIS_CMD_DENYOOM,NULL,1,-1,2}, {"msetnx",msetnxCommand,-3,REDIS_CMD_DENYOOM,NULL,1,-1,2}, {"randomkey",randomkeyCommand,1,0,NULL,0,0,0}, {"select",selectCommand,2,0,NULL,0,0,0}, {"move",moveCommand,3,0,NULL,1,1,1}, {"rename",renameCommand,3,0,NULL,1,1,1}, {"renamenx",renamenxCommand,3,0,NULL,1,1,1}, {"expire",expireCommand,3,0,NULL,0,0,0}, {"expireat",expireatCommand,3,0,NULL,0,0,0}, {"keys",keysCommand,2,0,NULL,0,0,0}, {"dbsize",dbsizeCommand,1,0,NULL,0,0,0}, {"auth",authCommand,2,0,NULL,0,0,0}, {"ping",pingCommand,1,0,NULL,0,0,0}, {"echo",echoCommand,2,0,NULL,0,0,0}, {"save",saveCommand,1,0,NULL,0,0,0}, {"bgsave",bgsaveCommand,1,0,NULL,0,0,0}, {"bgrewriteaof",bgrewriteaofCommand,1,0,NULL,0,0,0}, {"shutdown",shutdownCommand,1,0,NULL,0,0,0}, {"lastsave",lastsaveCommand,1,0,NULL,0,0,0}, {"type",typeCommand,2,0,NULL,1,1,1}, {"multi",multiCommand,1,0,NULL,0,0,0}, {"exec",execCommand,1,REDIS_CMD_DENYOOM,execBlockClientOnSwappedKeys,0,0,0}, {"discard",discardCommand,1,0,NULL,0,0,0}, {"sync",syncCommand,1,0,NULL,0,0,0}, {"flushdb",flushdbCommand,1,0,NULL,0,0,0}, {"flushall",flushallCommand,1,0,NULL,0,0,0}, {"sort",sortCommand,-2,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"info",infoCommand,1,0,NULL,0,0,0}, {"monitor",monitorCommand,1,0,NULL,0,0,0}, {"ttl",ttlCommand,2,0,NULL,1,1,1}, {"persist",persistCommand,2,0,NULL,1,1,1}, {"slaveof",slaveofCommand,3,0,NULL,0,0,0}, {"debug",debugCommand,-2,0,NULL,0,0,0}, {"config",configCommand,-2,0,NULL,0,0,0}, {"subscribe",subscribeCommand,-2,0,NULL,0,0,0}, {"unsubscribe",unsubscribeCommand,-1,0,NULL,0,0,0}, {"psubscribe",psubscribeCommand,-2,0,NULL,0,0,0}, {"punsubscribe",punsubscribeCommand,-1,0,NULL,0,0,0}, {"publish",publishCommand,3,REDIS_CMD_FORCE_REPLICATION,NULL,0,0,0}, {"watch",watchCommand,-2,0,NULL,0,0,0}, {"unwatch",unwatchCommand,1,0,NULL,0,0,0} }; /*============================ Utility functions ============================ */ void redisLog(int level, const char *fmt, ...) { const int syslogLevelMap[] = { LOG_DEBUG, LOG_INFO, LOG_NOTICE, LOG_WARNING }; const char *c = ".-*#"; time_t now = time(NULL); va_list ap; FILE *fp; char buf[64]; char msg[REDIS_MAX_LOGMSG_LEN]; if (level < server.verbosity) return; fp = (server.logfile == NULL) ? stdout : fopen(server.logfile,"a"); if (!fp) return; va_start(ap, fmt); vsnprintf(msg, sizeof(msg), fmt, ap); va_end(ap); strftime(buf,sizeof(buf),"%d %b %H:%M:%S",localtime(&now)); fprintf(fp,"[%d] %s %c %s\n",(int)getpid(),buf,c[level],msg); fflush(fp); if (server.logfile) fclose(fp); if (server.syslog_enabled) syslog(syslogLevelMap[level], "%s", msg); } /* Redis generally does not try to recover from out of memory conditions * when allocating objects or strings, it is not clear if it will be possible * to report this condition to the client since the networking layer itself * is based on heap allocation for send buffers, so we simply abort. * At least the code will be simpler to read... */ void oom(const char *msg) { redisLog(REDIS_WARNING, "%s: Out of memory\n",msg); sleep(1); abort(); } /*====================== Hash table type implementation ==================== */ /* This is an hash table type that uses the SDS dynamic strings libary as * keys and radis objects as values (objects can hold SDS strings, * lists, sets). */ void dictVanillaFree(void *privdata, void *val) { DICT_NOTUSED(privdata); zfree(val); } void dictListDestructor(void *privdata, void *val) { DICT_NOTUSED(privdata); listRelease((list*)val); } int dictSdsKeyCompare(void *privdata, const void *key1, const void *key2) { int l1,l2; DICT_NOTUSED(privdata); l1 = sdslen((sds)key1); l2 = sdslen((sds)key2); if (l1 != l2) return 0; return memcmp(key1, key2, l1) == 0; } /* A case insensitive version used for the command lookup table. */ int dictSdsKeyCaseCompare(void *privdata, const void *key1, const void *key2) { DICT_NOTUSED(privdata); return strcasecmp(key1, key2) == 0; } void dictRedisObjectDestructor(void *privdata, void *val) { DICT_NOTUSED(privdata); if (val == NULL) return; /* Values of swapped out keys as set to NULL */ decrRefCount(val); } void dictSdsDestructor(void *privdata, void *val) { DICT_NOTUSED(privdata); sdsfree(val); } int dictObjKeyCompare(void *privdata, const void *key1, const void *key2) { const robj *o1 = key1, *o2 = key2; return dictSdsKeyCompare(privdata,o1->ptr,o2->ptr); } unsigned int dictObjHash(const void *key) { const robj *o = key; return dictGenHashFunction(o->ptr, sdslen((sds)o->ptr)); } unsigned int dictSdsHash(const void *key) { return dictGenHashFunction((unsigned char*)key, sdslen((char*)key)); } unsigned int dictSdsCaseHash(const void *key) { return dictGenCaseHashFunction((unsigned char*)key, sdslen((char*)key)); } int dictEncObjKeyCompare(void *privdata, const void *key1, const void *key2) { robj *o1 = (robj*) key1, *o2 = (robj*) key2; int cmp; if (o1->encoding == REDIS_ENCODING_INT && o2->encoding == REDIS_ENCODING_INT) return o1->ptr == o2->ptr; o1 = getDecodedObject(o1); o2 = getDecodedObject(o2); cmp = dictSdsKeyCompare(privdata,o1->ptr,o2->ptr); decrRefCount(o1); decrRefCount(o2); return cmp; } unsigned int dictEncObjHash(const void *key) { robj *o = (robj*) key; if (o->encoding == REDIS_ENCODING_RAW) { return dictGenHashFunction(o->ptr, sdslen((sds)o->ptr)); } else { if (o->encoding == REDIS_ENCODING_INT) { char buf[32]; int len; len = ll2string(buf,32,(long)o->ptr); return dictGenHashFunction((unsigned char*)buf, len); } else { unsigned int hash; o = getDecodedObject(o); hash = dictGenHashFunction(o->ptr, sdslen((sds)o->ptr)); decrRefCount(o); return hash; } } } /* Sets type */ dictType setDictType = { dictEncObjHash, /* hash function */ NULL, /* key dup */ NULL, /* val dup */ dictEncObjKeyCompare, /* key compare */ dictRedisObjectDestructor, /* key destructor */ NULL /* val destructor */ }; /* Sorted sets hash (note: a skiplist is used in addition to the hash table) */ dictType zsetDictType = { dictEncObjHash, /* hash function */ NULL, /* key dup */ NULL, /* val dup */ dictEncObjKeyCompare, /* key compare */ dictRedisObjectDestructor, /* key destructor */ NULL /* val destructor */ }; /* Db->dict, keys are sds strings, vals are Redis objects. */ dictType dbDictType = { dictSdsHash, /* hash function */ NULL, /* key dup */ NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ dictSdsDestructor, /* key destructor */ dictRedisObjectDestructor /* val destructor */ }; /* Db->expires */ dictType keyptrDictType = { dictSdsHash, /* hash function */ NULL, /* key dup */ NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ NULL, /* key destructor */ NULL /* val destructor */ }; /* Command table. sds string -> command struct pointer. */ dictType commandTableDictType = { dictSdsCaseHash, /* hash function */ NULL, /* key dup */ NULL, /* val dup */ dictSdsKeyCaseCompare, /* key compare */ dictSdsDestructor, /* key destructor */ NULL /* val destructor */ }; /* Hash type hash table (note that small hashes are represented with zimpaps) */ dictType hashDictType = { dictEncObjHash, /* hash function */ NULL, /* key dup */ NULL, /* val dup */ dictEncObjKeyCompare, /* key compare */ dictRedisObjectDestructor, /* key destructor */ dictRedisObjectDestructor /* val destructor */ }; /* Keylist hash table type has unencoded redis objects as keys and * lists as values. It's used for blocking operations (BLPOP) and to * map swapped keys to a list of clients waiting for this keys to be loaded. */ dictType keylistDictType = { dictObjHash, /* hash function */ NULL, /* key dup */ NULL, /* val dup */ dictObjKeyCompare, /* key compare */ dictRedisObjectDestructor, /* key destructor */ dictListDestructor /* val destructor */ }; int htNeedsResize(dict *dict) { long long size, used; size = dictSlots(dict); used = dictSize(dict); return (size && used && size > DICT_HT_INITIAL_SIZE && (used*100/size < REDIS_HT_MINFILL)); } /* If the percentage of used slots in the HT reaches REDIS_HT_MINFILL * we resize the hash table to save memory */ void tryResizeHashTables(void) { int j; for (j = 0; j < server.dbnum; j++) { if (htNeedsResize(server.db[j].dict)) dictResize(server.db[j].dict); if (htNeedsResize(server.db[j].expires)) dictResize(server.db[j].expires); } } /* Our hash table implementation performs rehashing incrementally while * we write/read from the hash table. Still if the server is idle, the hash * table will use two tables for a long time. So we try to use 1 millisecond * of CPU time at every serverCron() loop in order to rehash some key. */ void incrementallyRehash(void) { int j; for (j = 0; j < server.dbnum; j++) { if (dictIsRehashing(server.db[j].dict)) { dictRehashMilliseconds(server.db[j].dict,1); break; /* already used our millisecond for this loop... */ } } } /* This function is called once a background process of some kind terminates, * as we want to avoid resizing the hash tables when there is a child in order * to play well with copy-on-write (otherwise when a resize happens lots of * memory pages are copied). The goal of this function is to update the ability * for dict.c to resize the hash tables accordingly to the fact we have o not * running childs. */ void updateDictResizePolicy(void) { if (server.bgsavechildpid == -1 && server.bgrewritechildpid == -1) dictEnableResize(); else dictDisableResize(); } /* ======================= Cron: called every 100 ms ======================== */ /* Try to expire a few timed out keys. The algorithm used is adaptive and * will use few CPU cycles if there are few expiring keys, otherwise * it will get more aggressive to avoid that too much memory is used by * keys that can be removed from the keyspace. */ void activeExpireCycle(void) { int j; for (j = 0; j < server.dbnum; j++) { int expired; redisDb *db = server.db+j; /* Continue to expire if at the end of the cycle more than 25% * of the keys were expired. */ do { long num = dictSize(db->expires); time_t now = time(NULL); expired = 0; if (num > REDIS_EXPIRELOOKUPS_PER_CRON) num = REDIS_EXPIRELOOKUPS_PER_CRON; while (num--) { dictEntry *de; time_t t; if ((de = dictGetRandomKey(db->expires)) == NULL) break; t = (time_t) dictGetEntryVal(de); if (now > t) { sds key = dictGetEntryKey(de); robj *keyobj = createStringObject(key,sdslen(key)); propagateExpire(db,keyobj); dbDelete(db,keyobj); decrRefCount(keyobj); expired++; server.stat_expiredkeys++; } } } while (expired > REDIS_EXPIRELOOKUPS_PER_CRON/4); } } void updateLRUClock(void) { server.lruclock = (time(NULL)/REDIS_LRU_CLOCK_RESOLUTION) & REDIS_LRU_CLOCK_MAX; } int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) { int j, loops = server.cronloops++; REDIS_NOTUSED(eventLoop); REDIS_NOTUSED(id); REDIS_NOTUSED(clientData); /* We take a cached value of the unix time in the global state because * with virtual memory and aging there is to store the current time * in objects at every object access, and accuracy is not needed. * To access a global var is faster than calling time(NULL) */ server.unixtime = time(NULL); /* We have just 22 bits per object for LRU information. * So we use an (eventually wrapping) LRU clock with 10 seconds resolution. * 2^22 bits with 10 seconds resoluton is more or less 1.5 years. * * Note that even if this will wrap after 1.5 years it's not a problem, * everything will still work but just some object will appear younger * to Redis. But for this to happen a given object should never be touched * for 1.5 years. * * Note that you can change the resolution altering the * REDIS_LRU_CLOCK_RESOLUTION define. */ updateLRUClock(); /* We received a SIGTERM, shutting down here in a safe way, as it is * not ok doing so inside the signal handler. */ if (server.shutdown_asap) { if (prepareForShutdown() == REDIS_OK) exit(0); redisLog(REDIS_WARNING,"SIGTERM received but errors trying to shut down the server, check the logs for more information"); } /* Show some info about non-empty databases */ for (j = 0; j < server.dbnum; j++) { long long size, used, vkeys; size = dictSlots(server.db[j].dict); used = dictSize(server.db[j].dict); vkeys = dictSize(server.db[j].expires); if (!(loops % 50) && (used || vkeys)) { redisLog(REDIS_VERBOSE,"DB %d: %lld keys (%lld volatile) in %lld slots HT.",j,used,vkeys,size); /* dictPrintStats(server.dict); */ } } /* We don't want to resize the hash tables while a bacground saving * is in progress: the saving child is created using fork() that is * implemented with a copy-on-write semantic in most modern systems, so * if we resize the HT while there is the saving child at work actually * a lot of memory movements in the parent will cause a lot of pages * copied. */ if (server.bgsavechildpid == -1 && server.bgrewritechildpid == -1) { if (!(loops % 10)) tryResizeHashTables(); if (server.activerehashing) incrementallyRehash(); } /* Show information about connected clients */ if (!(loops % 50)) { redisLog(REDIS_VERBOSE,"%d clients connected (%d slaves), %zu bytes in use", listLength(server.clients)-listLength(server.slaves), listLength(server.slaves), zmalloc_used_memory()); } /* Close connections of timedout clients */ if ((server.maxidletime && !(loops % 100)) || server.bpop_blocked_clients) closeTimedoutClients(); /* Check if a background saving or AOF rewrite in progress terminated */ if (server.bgsavechildpid != -1 || server.bgrewritechildpid != -1) { int statloc; pid_t pid; if ((pid = wait3(&statloc,WNOHANG,NULL)) != 0) { if (pid == server.bgsavechildpid) { backgroundSaveDoneHandler(statloc); } else { backgroundRewriteDoneHandler(statloc); } updateDictResizePolicy(); } } else { /* If there is not a background saving in progress check if * we have to save now */ time_t now = time(NULL); for (j = 0; j < server.saveparamslen; j++) { struct saveparam *sp = server.saveparams+j; if (server.dirty >= sp->changes && now-server.lastsave > sp->seconds) { redisLog(REDIS_NOTICE,"%d changes in %d seconds. Saving...", sp->changes, sp->seconds); rdbSaveBackground(server.dbfilename); break; } } } /* Expire a few keys per cycle, only if this is a master. * On slaves we wait for DEL operations synthesized by the master * in order to guarantee a strict consistency. */ if (server.masterhost == NULL) activeExpireCycle(); /* Remove a few cached objects from memory if we are over the * configured memory limit */ while (server.ds_enabled && zmalloc_used_memory() > server.cache_max_memory) { cacheFreeOneEntry(); } /* Replication cron function -- used to reconnect to master and * to detect transfer failures. */ if (!(loops % 10)) replicationCron(); return 100; } /* This function gets called every time Redis is entering the * main loop of the event driven library, that is, before to sleep * for ready file descriptors. */ void beforeSleep(struct aeEventLoop *eventLoop) { REDIS_NOTUSED(eventLoop); listNode *ln; redisClient *c; /* Awake clients that got all the on disk keys they requested */ if (server.ds_enabled && listLength(server.io_ready_clients)) { listIter li; listRewind(server.io_ready_clients,&li); while((ln = listNext(&li))) { c = ln->value; struct redisCommand *cmd; /* Resume the client. */ listDelNode(server.io_ready_clients,ln); c->flags &= (~REDIS_IO_WAIT); server.cache_blocked_clients--; aeCreateFileEvent(server.el, c->fd, AE_READABLE, readQueryFromClient, c); cmd = lookupCommand(c->argv[0]->ptr); redisAssert(cmd != NULL); call(c,cmd); resetClient(c); /* There may be more data to process in the input buffer. */ if (c->querybuf && sdslen(c->querybuf) > 0) processInputBuffer(c); } } /* Try to process pending commands for clients that were just unblocked. */ while (listLength(server.unblocked_clients)) { ln = listFirst(server.unblocked_clients); redisAssert(ln != NULL); c = ln->value; listDelNode(server.unblocked_clients,ln); /* Process remaining data in the input buffer. */ if (c->querybuf && sdslen(c->querybuf) > 0) processInputBuffer(c); } /* Write the AOF buffer on disk */ flushAppendOnlyFile(); } /* =========================== Server initialization ======================== */ void createSharedObjects(void) { int j; shared.crlf = createObject(REDIS_STRING,sdsnew("\r\n")); shared.ok = createObject(REDIS_STRING,sdsnew("+OK\r\n")); shared.err = createObject(REDIS_STRING,sdsnew("-ERR\r\n")); shared.emptybulk = createObject(REDIS_STRING,sdsnew("$0\r\n\r\n")); shared.czero = createObject(REDIS_STRING,sdsnew(":0\r\n")); shared.cone = createObject(REDIS_STRING,sdsnew(":1\r\n")); shared.cnegone = createObject(REDIS_STRING,sdsnew(":-1\r\n")); shared.nullbulk = createObject(REDIS_STRING,sdsnew("$-1\r\n")); shared.nullmultibulk = createObject(REDIS_STRING,sdsnew("*-1\r\n")); shared.emptymultibulk = createObject(REDIS_STRING,sdsnew("*0\r\n")); shared.pong = createObject(REDIS_STRING,sdsnew("+PONG\r\n")); shared.queued = createObject(REDIS_STRING,sdsnew("+QUEUED\r\n")); shared.wrongtypeerr = createObject(REDIS_STRING,sdsnew( "-ERR Operation against a key holding the wrong kind of value\r\n")); shared.nokeyerr = createObject(REDIS_STRING,sdsnew( "-ERR no such key\r\n")); shared.syntaxerr = createObject(REDIS_STRING,sdsnew( "-ERR syntax error\r\n")); shared.sameobjecterr = createObject(REDIS_STRING,sdsnew( "-ERR source and destination objects are the same\r\n")); shared.outofrangeerr = createObject(REDIS_STRING,sdsnew( "-ERR index out of range\r\n")); shared.loadingerr = createObject(REDIS_STRING,sdsnew( "-LOADING Redis is loading the dataset in memory\r\n")); shared.space = createObject(REDIS_STRING,sdsnew(" ")); shared.colon = createObject(REDIS_STRING,sdsnew(":")); shared.plus = createObject(REDIS_STRING,sdsnew("+")); shared.select0 = createStringObject("select 0\r\n",10); shared.select1 = createStringObject("select 1\r\n",10); shared.select2 = createStringObject("select 2\r\n",10); shared.select3 = createStringObject("select 3\r\n",10); shared.select4 = createStringObject("select 4\r\n",10); shared.select5 = createStringObject("select 5\r\n",10); shared.select6 = createStringObject("select 6\r\n",10); shared.select7 = createStringObject("select 7\r\n",10); shared.select8 = createStringObject("select 8\r\n",10); shared.select9 = createStringObject("select 9\r\n",10); shared.messagebulk = createStringObject("$7\r\nmessage\r\n",13); shared.pmessagebulk = createStringObject("$8\r\npmessage\r\n",14); shared.subscribebulk = createStringObject("$9\r\nsubscribe\r\n",15); shared.unsubscribebulk = createStringObject("$11\r\nunsubscribe\r\n",18); shared.psubscribebulk = createStringObject("$10\r\npsubscribe\r\n",17); shared.punsubscribebulk = createStringObject("$12\r\npunsubscribe\r\n",19); shared.mbulk3 = createStringObject("*3\r\n",4); shared.mbulk4 = createStringObject("*4\r\n",4); for (j = 0; j < REDIS_SHARED_INTEGERS; j++) { shared.integers[j] = createObject(REDIS_STRING,(void*)(long)j); shared.integers[j]->encoding = REDIS_ENCODING_INT; } } void initServerConfig() { server.port = REDIS_SERVERPORT; server.bindaddr = NULL; server.unixsocket = NULL; server.ipfd = -1; server.sofd = -1; server.dbnum = REDIS_DEFAULT_DBNUM; server.verbosity = REDIS_VERBOSE; server.maxidletime = REDIS_MAXIDLETIME; server.saveparams = NULL; server.loading = 0; server.logfile = NULL; /* NULL = log on standard output */ server.syslog_enabled = 0; server.syslog_ident = zstrdup("redis"); server.syslog_facility = LOG_LOCAL0; server.glueoutputbuf = 1; server.daemonize = 0; server.appendonly = 0; server.appendfsync = APPENDFSYNC_EVERYSEC; server.no_appendfsync_on_rewrite = 0; server.lastfsync = time(NULL); server.appendfd = -1; server.appendseldb = -1; /* Make sure the first time will not match */ server.pidfile = zstrdup("/var/run/redis.pid"); server.dbfilename = zstrdup("dump.rdb"); server.appendfilename = zstrdup("appendonly.aof"); server.requirepass = NULL; server.rdbcompression = 1; server.activerehashing = 1; server.maxclients = 0; server.bpop_blocked_clients = 0; server.maxmemory = 0; server.maxmemory_policy = REDIS_MAXMEMORY_VOLATILE_LRU; server.maxmemory_samples = 3; server.ds_enabled = 0; server.ds_path = zstrdup("/tmp/redis.ds"); server.cache_max_memory = 64LL*1024*1024; /* 64 MB of RAM */ server.cache_blocked_clients = 0; server.hash_max_zipmap_entries = REDIS_HASH_MAX_ZIPMAP_ENTRIES; server.hash_max_zipmap_value = REDIS_HASH_MAX_ZIPMAP_VALUE; server.list_max_ziplist_entries = REDIS_LIST_MAX_ZIPLIST_ENTRIES; server.list_max_ziplist_value = REDIS_LIST_MAX_ZIPLIST_VALUE; server.set_max_intset_entries = REDIS_SET_MAX_INTSET_ENTRIES; server.shutdown_asap = 0; updateLRUClock(); resetServerSaveParams(); appendServerSaveParams(60*60,1); /* save after 1 hour and 1 change */ appendServerSaveParams(300,100); /* save after 5 minutes and 100 changes */ appendServerSaveParams(60,10000); /* save after 1 minute and 10000 changes */ /* Replication related */ server.isslave = 0; server.masterauth = NULL; server.masterhost = NULL; server.masterport = 6379; server.master = NULL; server.replstate = REDIS_REPL_NONE; server.repl_serve_stale_data = 1; /* Double constants initialization */ R_Zero = 0.0; R_PosInf = 1.0/R_Zero; R_NegInf = -1.0/R_Zero; R_Nan = R_Zero/R_Zero; /* Command table -- we intiialize it here as it is part of the * initial configuration, since command names may be changed via * redis.conf using the rename-command directive. */ server.commands = dictCreate(&commandTableDictType,NULL); populateCommandTable(); server.delCommand = lookupCommandByCString("del"); server.multiCommand = lookupCommandByCString("multi"); } void initServer() { int j; signal(SIGHUP, SIG_IGN); signal(SIGPIPE, SIG_IGN); setupSigSegvAction(); if (server.syslog_enabled) { openlog(server.syslog_ident, LOG_PID | LOG_NDELAY | LOG_NOWAIT, server.syslog_facility); } server.mainthread = pthread_self(); server.clients = listCreate(); server.slaves = listCreate(); server.monitors = listCreate(); server.unblocked_clients = listCreate(); createSharedObjects(); server.el = aeCreateEventLoop(); server.db = zmalloc(sizeof(redisDb)*server.dbnum); server.ipfd = anetTcpServer(server.neterr,server.port,server.bindaddr); if (server.ipfd == ANET_ERR) { redisLog(REDIS_WARNING, "Opening port: %s", server.neterr); exit(1); } if (server.unixsocket != NULL) { unlink(server.unixsocket); /* don't care if this fails */ server.sofd = anetUnixServer(server.neterr,server.unixsocket); if (server.sofd == ANET_ERR) { redisLog(REDIS_WARNING, "Opening socket: %s", server.neterr); exit(1); } } if (server.ipfd < 0 && server.sofd < 0) { redisLog(REDIS_WARNING, "Configured to not listen anywhere, exiting."); exit(1); } for (j = 0; j < server.dbnum; j++) { server.db[j].dict = dictCreate(&dbDictType,NULL); server.db[j].expires = dictCreate(&keyptrDictType,NULL); server.db[j].blocking_keys = dictCreate(&keylistDictType,NULL); server.db[j].watched_keys = dictCreate(&keylistDictType,NULL); if (server.ds_enabled) server.db[j].io_keys = dictCreate(&keylistDictType,NULL); server.db[j].id = j; } server.pubsub_channels = dictCreate(&keylistDictType,NULL); server.pubsub_patterns = listCreate(); listSetFreeMethod(server.pubsub_patterns,freePubsubPattern); listSetMatchMethod(server.pubsub_patterns,listMatchPubsubPattern); server.cronloops = 0; server.bgsavechildpid = -1; server.bgrewritechildpid = -1; server.bgrewritebuf = sdsempty(); server.aofbuf = sdsempty(); server.lastsave = time(NULL); server.dirty = 0; server.stat_numcommands = 0; server.stat_numconnections = 0; server.stat_expiredkeys = 0; server.stat_evictedkeys = 0; server.stat_starttime = time(NULL); server.stat_keyspace_misses = 0; server.stat_keyspace_hits = 0; server.unixtime = time(NULL); aeCreateTimeEvent(server.el, 1, serverCron, NULL, NULL); if (server.ipfd > 0 && aeCreateFileEvent(server.el,server.ipfd,AE_READABLE, acceptTcpHandler,NULL) == AE_ERR) oom("creating file event"); if (server.sofd > 0 && aeCreateFileEvent(server.el,server.sofd,AE_READABLE, acceptUnixHandler,NULL) == AE_ERR) oom("creating file event"); if (server.appendonly) { server.appendfd = open(server.appendfilename,O_WRONLY|O_APPEND|O_CREAT,0644); if (server.appendfd == -1) { redisLog(REDIS_WARNING, "Can't open the append-only file: %s", strerror(errno)); exit(1); } } if (server.ds_enabled) dsInit(); } /* Populates the Redis Command Table starting from the hard coded list * we have on top of redis.c file. */ void populateCommandTable(void) { int j; int numcommands = sizeof(readonlyCommandTable)/sizeof(struct redisCommand); for (j = 0; j < numcommands; j++) { struct redisCommand *c = readonlyCommandTable+j; int retval; retval = dictAdd(server.commands, sdsnew(c->name), c); assert(retval == DICT_OK); } } /* ====================== Commands lookup and execution ===================== */ struct redisCommand *lookupCommand(sds name) { return dictFetchValue(server.commands, name); } struct redisCommand *lookupCommandByCString(char *s) { struct redisCommand *cmd; sds name = sdsnew(s); cmd = dictFetchValue(server.commands, name); sdsfree(name); return cmd; } /* Call() is the core of Redis execution of a command */ void call(redisClient *c, struct redisCommand *cmd) { long long dirty; dirty = server.dirty; cmd->proc(c); dirty = server.dirty-dirty; if (server.appendonly && dirty) feedAppendOnlyFile(cmd,c->db->id,c->argv,c->argc); if ((dirty || cmd->flags & REDIS_CMD_FORCE_REPLICATION) && listLength(server.slaves)) replicationFeedSlaves(server.slaves,c->db->id,c->argv,c->argc); if (listLength(server.monitors)) replicationFeedMonitors(server.monitors,c->db->id,c->argv,c->argc); server.stat_numcommands++; } /* If this function gets called we already read a whole * command, argments are in the client argv/argc fields. * processCommand() execute the command or prepare the * server for a bulk read from the client. * * If 1 is returned the client is still alive and valid and * and other operations can be performed by the caller. Otherwise * if 0 is returned the client was destroied (i.e. after QUIT). */ int processCommand(redisClient *c) { struct redisCommand *cmd; /* The QUIT command is handled separately. Normal command procs will * go through checking for replication and QUIT will cause trouble * when FORCE_REPLICATION is enabled and would be implemented in * a regular command proc. */ if (!strcasecmp(c->argv[0]->ptr,"quit")) { addReply(c,shared.ok); c->flags |= REDIS_CLOSE_AFTER_REPLY; return REDIS_ERR; } /* Now lookup the command and check ASAP about trivial error conditions * such wrong arity, bad command name and so forth. */ cmd = lookupCommand(c->argv[0]->ptr); if (!cmd) { addReplyErrorFormat(c,"unknown command '%s'", (char*)c->argv[0]->ptr); return REDIS_OK; } else if ((cmd->arity > 0 && cmd->arity != c->argc) || (c->argc < -cmd->arity)) { addReplyErrorFormat(c,"wrong number of arguments for '%s' command", cmd->name); return REDIS_OK; } /* Check if the user is authenticated */ if (server.requirepass && !c->authenticated && cmd->proc != authCommand) { addReplyError(c,"operation not permitted"); return REDIS_OK; } /* Handle the maxmemory directive. * * First we try to free some memory if possible (if there are volatile * keys in the dataset). If there are not the only thing we can do * is returning an error. */ if (server.maxmemory) freeMemoryIfNeeded(); if (server.maxmemory && (cmd->flags & REDIS_CMD_DENYOOM) && zmalloc_used_memory() > server.maxmemory) { addReplyError(c,"command not allowed when used memory > 'maxmemory'"); return REDIS_OK; } /* Only allow SUBSCRIBE and UNSUBSCRIBE in the context of Pub/Sub */ if ((dictSize(c->pubsub_channels) > 0 || listLength(c->pubsub_patterns) > 0) && cmd->proc != subscribeCommand && cmd->proc != unsubscribeCommand && cmd->proc != psubscribeCommand && cmd->proc != punsubscribeCommand) { addReplyError(c,"only (P)SUBSCRIBE / (P)UNSUBSCRIBE / QUIT allowed in this context"); return REDIS_OK; } /* Only allow INFO and SLAVEOF when slave-serve-stale-data is no and * we are a slave with a broken link with master. */ if (server.masterhost && server.replstate != REDIS_REPL_CONNECTED && server.repl_serve_stale_data == 0 && cmd->proc != infoCommand && cmd->proc != slaveofCommand) { addReplyError(c, "link with MASTER is down and slave-serve-stale-data is set to no"); return REDIS_OK; } /* Loading DB? Return an error if the command is not INFO */ if (server.loading && cmd->proc != infoCommand) { addReply(c, shared.loadingerr); return REDIS_OK; } /* Exec the command */ if (c->flags & REDIS_MULTI && cmd->proc != execCommand && cmd->proc != discardCommand && cmd->proc != multiCommand && cmd->proc != watchCommand) { queueMultiCommand(c,cmd); addReply(c,shared.queued); } else { if (server.ds_enabled && blockClientOnSwappedKeys(c,cmd)) return REDIS_ERR; call(c,cmd); } return REDIS_OK; } /*================================== Shutdown =============================== */ int prepareForShutdown() { redisLog(REDIS_WARNING,"User requested shutdown, saving DB..."); /* Kill the saving child if there is a background saving in progress. We want to avoid race conditions, for instance our saving child may overwrite the synchronous saving did by SHUTDOWN. */ if (server.bgsavechildpid != -1) { redisLog(REDIS_WARNING,"There is a live saving child. Killing it!"); kill(server.bgsavechildpid,SIGKILL); rdbRemoveTempFile(server.bgsavechildpid); } if (server.appendonly) { /* Append only file: fsync() the AOF and exit */ aof_fsync(server.appendfd); } else if (server.saveparamslen > 0) { /* Snapshotting. Perform a SYNC SAVE and exit */ if (rdbSave(server.dbfilename) != REDIS_OK) { /* Ooops.. error saving! The best we can do is to continue * operating. Note that if there was a background saving process, * in the next cron() Redis will be notified that the background * saving aborted, handling special stuff like slaves pending for * synchronization... */ redisLog(REDIS_WARNING,"Error trying to save the DB, can't exit"); return REDIS_ERR; } } else { redisLog(REDIS_WARNING,"Not saving DB."); } if (server.daemonize) unlink(server.pidfile); redisLog(REDIS_WARNING,"Server exit now, bye bye..."); return REDIS_OK; } /*================================== Commands =============================== */ void authCommand(redisClient *c) { if (!server.requirepass || !strcmp(c->argv[1]->ptr, server.requirepass)) { c->authenticated = 1; addReply(c,shared.ok); } else { c->authenticated = 0; addReplyError(c,"invalid password"); } } void pingCommand(redisClient *c) { addReply(c,shared.pong); } void echoCommand(redisClient *c) { addReplyBulk(c,c->argv[1]); } /* Convert an amount of bytes into a human readable string in the form * of 100B, 2G, 100M, 4K, and so forth. */ void bytesToHuman(char *s, unsigned long long n) { double d; if (n < 1024) { /* Bytes */ sprintf(s,"%lluB",n); return; } else if (n < (1024*1024)) { d = (double)n/(1024); sprintf(s,"%.2fK",d); } else if (n < (1024LL*1024*1024)) { d = (double)n/(1024*1024); sprintf(s,"%.2fM",d); } else if (n < (1024LL*1024*1024*1024)) { d = (double)n/(1024LL*1024*1024); sprintf(s,"%.2fG",d); } } /* Create the string returned by the INFO command. This is decoupled * by the INFO command itself as we need to report the same information * on memory corruption problems. */ sds genRedisInfoString(void) { sds info; time_t uptime = time(NULL)-server.stat_starttime; int j; char hmem[64]; struct rusage self_ru, c_ru; getrusage(RUSAGE_SELF, &self_ru); getrusage(RUSAGE_CHILDREN, &c_ru); bytesToHuman(hmem,zmalloc_used_memory()); info = sdscatprintf(sdsempty(), "redis_version:%s\r\n" "redis_git_sha1:%s\r\n" "redis_git_dirty:%d\r\n" "arch_bits:%s\r\n" "multiplexing_api:%s\r\n" "process_id:%ld\r\n" "uptime_in_seconds:%ld\r\n" "uptime_in_days:%ld\r\n" "lru_clock:%ld\r\n" "used_cpu_sys:%.2f\r\n" "used_cpu_user:%.2f\r\n" "used_cpu_sys_childrens:%.2f\r\n" "used_cpu_user_childrens:%.2f\r\n" "connected_clients:%d\r\n" "connected_slaves:%d\r\n" "blocked_clients:%d\r\n" "used_memory:%zu\r\n" "used_memory_human:%s\r\n" "used_memory_rss:%zu\r\n" "mem_fragmentation_ratio:%.2f\r\n" "use_tcmalloc:%d\r\n" "loading:%d\r\n" "aof_enabled:%d\r\n" "changes_since_last_save:%lld\r\n" "bgsave_in_progress:%d\r\n" "last_save_time:%ld\r\n" "bgrewriteaof_in_progress:%d\r\n" "total_connections_received:%lld\r\n" "total_commands_processed:%lld\r\n" "expired_keys:%lld\r\n" "evicted_keys:%lld\r\n" "keyspace_hits:%lld\r\n" "keyspace_misses:%lld\r\n" "hash_max_zipmap_entries:%zu\r\n" "hash_max_zipmap_value:%zu\r\n" "pubsub_channels:%ld\r\n" "pubsub_patterns:%u\r\n" "ds_enabled:%d\r\n" "role:%s\r\n" ,REDIS_VERSION, redisGitSHA1(), strtol(redisGitDirty(),NULL,10) > 0, (sizeof(long) == 8) ? "64" : "32", aeGetApiName(), (long) getpid(), uptime, uptime/(3600*24), (unsigned long) server.lruclock, (float)self_ru.ru_utime.tv_sec+(float)self_ru.ru_utime.tv_usec/1000000, (float)self_ru.ru_stime.tv_sec+(float)self_ru.ru_stime.tv_usec/1000000, (float)c_ru.ru_utime.tv_sec+(float)c_ru.ru_utime.tv_usec/1000000, (float)c_ru.ru_stime.tv_sec+(float)c_ru.ru_stime.tv_usec/1000000, listLength(server.clients)-listLength(server.slaves), listLength(server.slaves), server.bpop_blocked_clients, zmalloc_used_memory(), hmem, zmalloc_get_rss(), zmalloc_get_fragmentation_ratio(), #ifdef USE_TCMALLOC 1, #else 0, #endif server.loading, server.appendonly, server.dirty, server.bgsavechildpid != -1, server.lastsave, server.bgrewritechildpid != -1, server.stat_numconnections, server.stat_numcommands, server.stat_expiredkeys, server.stat_evictedkeys, server.stat_keyspace_hits, server.stat_keyspace_misses, server.hash_max_zipmap_entries, server.hash_max_zipmap_value, dictSize(server.pubsub_channels), listLength(server.pubsub_patterns), server.ds_enabled != 0, server.masterhost == NULL ? "master" : "slave" ); if (server.masterhost) { info = sdscatprintf(info, "master_host:%s\r\n" "master_port:%d\r\n" "master_link_status:%s\r\n" "master_last_io_seconds_ago:%d\r\n" "master_sync_in_progress:%d\r\n" ,server.masterhost, server.masterport, (server.replstate == REDIS_REPL_CONNECTED) ? "up" : "down", server.master ? ((int)(time(NULL)-server.master->lastinteraction)) : -1, server.replstate == REDIS_REPL_TRANSFER ); if (server.replstate == REDIS_REPL_TRANSFER) { info = sdscatprintf(info, "master_sync_left_bytes:%ld\r\n" "master_sync_last_io_seconds_ago:%d\r\n" ,(long)server.repl_transfer_left, (int)(time(NULL)-server.repl_transfer_lastio) ); } } if (server.ds_enabled) { lockThreadedIO(); info = sdscatprintf(info, "cache_max_memory:%llu\r\n" "cache_blocked_clients:%lu\r\n" ,(unsigned long long) server.cache_max_memory, (unsigned long) server.cache_blocked_clients ); unlockThreadedIO(); } if (server.loading) { double perc; time_t eta, elapsed; off_t remaining_bytes = server.loading_total_bytes- server.loading_loaded_bytes; perc = ((double)server.loading_loaded_bytes / server.loading_total_bytes) * 100; elapsed = time(NULL)-server.loading_start_time; if (elapsed == 0) { eta = 1; /* A fake 1 second figure if we don't have enough info */ } else { eta = (elapsed*remaining_bytes)/server.loading_loaded_bytes; } info = sdscatprintf(info, "loading_start_time:%ld\r\n" "loading_total_bytes:%llu\r\n" "loading_loaded_bytes:%llu\r\n" "loading_loaded_perc:%.2f\r\n" "loading_eta_seconds:%ld\r\n" ,(unsigned long) server.loading_start_time, (unsigned long long) server.loading_total_bytes, (unsigned long long) server.loading_loaded_bytes, perc, eta ); } for (j = 0; j < server.dbnum; j++) { long long keys, vkeys; keys = dictSize(server.db[j].dict); vkeys = dictSize(server.db[j].expires); if (keys || vkeys) { info = sdscatprintf(info, "db%d:keys=%lld,expires=%lld\r\n", j, keys, vkeys); } } return info; } void infoCommand(redisClient *c) { sds info = genRedisInfoString(); addReplySds(c,sdscatprintf(sdsempty(),"$%lu\r\n", (unsigned long)sdslen(info))); addReplySds(c,info); addReply(c,shared.crlf); } void monitorCommand(redisClient *c) { /* ignore MONITOR if aleady slave or in monitor mode */ if (c->flags & REDIS_SLAVE) return; c->flags |= (REDIS_SLAVE|REDIS_MONITOR); c->slaveseldb = 0; listAddNodeTail(server.monitors,c); addReply(c,shared.ok); } /* ============================ Maxmemory directive ======================== */ /* This function gets called when 'maxmemory' is set on the config file to limit * the max memory used by the server, and we are out of memory. * This function will try to, in order: * * - Free objects from the free list * - Try to remove keys with an EXPIRE set * * It is not possible to free enough memory to reach used-memory < maxmemory * the server will start refusing commands that will enlarge even more the * memory usage. */ void freeMemoryIfNeeded(void) { /* Remove keys accordingly to the active policy as long as we are * over the memory limit. */ if (server.maxmemory_policy == REDIS_MAXMEMORY_NO_EVICTION) return; while (server.maxmemory && zmalloc_used_memory() > server.maxmemory) { int j, k, freed = 0; for (j = 0; j < server.dbnum; j++) { long bestval = 0; /* just to prevent warning */ sds bestkey = NULL; struct dictEntry *de; redisDb *db = server.db+j; dict *dict; if (server.maxmemory_policy == REDIS_MAXMEMORY_ALLKEYS_LRU || server.maxmemory_policy == REDIS_MAXMEMORY_ALLKEYS_RANDOM) { dict = server.db[j].dict; } else { dict = server.db[j].expires; } if (dictSize(dict) == 0) continue; /* volatile-random and allkeys-random policy */ if (server.maxmemory_policy == REDIS_MAXMEMORY_ALLKEYS_RANDOM || server.maxmemory_policy == REDIS_MAXMEMORY_VOLATILE_RANDOM) { de = dictGetRandomKey(dict); bestkey = dictGetEntryKey(de); } /* volatile-lru and allkeys-lru policy */ else if (server.maxmemory_policy == REDIS_MAXMEMORY_ALLKEYS_LRU || server.maxmemory_policy == REDIS_MAXMEMORY_VOLATILE_LRU) { for (k = 0; k < server.maxmemory_samples; k++) { sds thiskey; long thisval; robj *o; de = dictGetRandomKey(dict); thiskey = dictGetEntryKey(de); /* When policy is volatile-lru we need an additonal lookup * to locate the real key, as dict is set to db->expires. */ if (server.maxmemory_policy == REDIS_MAXMEMORY_VOLATILE_LRU) de = dictFind(db->dict, thiskey); o = dictGetEntryVal(de); thisval = estimateObjectIdleTime(o); /* Higher idle time is better candidate for deletion */ if (bestkey == NULL || thisval > bestval) { bestkey = thiskey; bestval = thisval; } } } /* volatile-ttl */ else if (server.maxmemory_policy == REDIS_MAXMEMORY_VOLATILE_TTL) { for (k = 0; k < server.maxmemory_samples; k++) { sds thiskey; long thisval; de = dictGetRandomKey(dict); thiskey = dictGetEntryKey(de); thisval = (long) dictGetEntryVal(de); /* Expire sooner (minor expire unix timestamp) is better * candidate for deletion */ if (bestkey == NULL || thisval < bestval) { bestkey = thiskey; bestval = thisval; } } } /* Finally remove the selected key. */ if (bestkey) { robj *keyobj = createStringObject(bestkey,sdslen(bestkey)); dbDelete(db,keyobj); server.stat_evictedkeys++; decrRefCount(keyobj); freed++; } } if (!freed) return; /* nothing to free... */ } } /* =================================== Main! ================================ */ #ifdef __linux__ int linuxOvercommitMemoryValue(void) { FILE *fp = fopen("/proc/sys/vm/overcommit_memory","r"); char buf[64]; if (!fp) return -1; if (fgets(buf,64,fp) == NULL) { fclose(fp); return -1; } fclose(fp); return atoi(buf); } void linuxOvercommitMemoryWarning(void) { if (linuxOvercommitMemoryValue() == 0) { redisLog(REDIS_WARNING,"WARNING overcommit_memory is set to 0! Background save may fail under low memory condition. To fix this issue add 'vm.overcommit_memory = 1' to /etc/sysctl.conf and then reboot or run the command 'sysctl vm.overcommit_memory=1' for this to take effect."); } } #endif /* __linux__ */ void createPidFile(void) { /* Try to write the pid file in a best-effort way. */ FILE *fp = fopen(server.pidfile,"w"); if (fp) { fprintf(fp,"%d\n",getpid()); fclose(fp); } } void daemonize(void) { int fd; if (fork() != 0) exit(0); /* parent exits */ setsid(); /* create a new session */ /* Every output goes to /dev/null. If Redis is daemonized but * the 'logfile' is set to 'stdout' in the configuration file * it will not log at all. */ if ((fd = open("/dev/null", O_RDWR, 0)) != -1) { dup2(fd, STDIN_FILENO); dup2(fd, STDOUT_FILENO); dup2(fd, STDERR_FILENO); if (fd > STDERR_FILENO) close(fd); } } void version() { printf("Redis server version %s (%s:%d)\n", REDIS_VERSION, redisGitSHA1(), atoi(redisGitDirty()) > 0); exit(0); } void usage() { fprintf(stderr,"Usage: ./redis-server [/path/to/redis.conf]\n"); fprintf(stderr," ./redis-server - (read config from stdin)\n"); exit(1); } int main(int argc, char **argv) { time_t start; initServerConfig(); if (argc == 2) { if (strcmp(argv[1], "-v") == 0 || strcmp(argv[1], "--version") == 0) version(); if (strcmp(argv[1], "--help") == 0) usage(); resetServerSaveParams(); loadServerConfig(argv[1]); } else if ((argc > 2)) { usage(); } else { redisLog(REDIS_WARNING,"Warning: no config file specified, using the default config. In order to specify a config file use 'redis-server /path/to/redis.conf'"); } if (server.daemonize) daemonize(); initServer(); if (server.daemonize) createPidFile(); redisLog(REDIS_NOTICE,"Server started, Redis version " REDIS_VERSION); #ifdef __linux__ linuxOvercommitMemoryWarning(); #endif start = time(NULL); if (server.appendonly) { if (loadAppendOnlyFile(server.appendfilename) == REDIS_OK) redisLog(REDIS_NOTICE,"DB loaded from append only file: %ld seconds",time(NULL)-start); } else { if (rdbLoad(server.dbfilename) == REDIS_OK) redisLog(REDIS_NOTICE,"DB loaded from disk: %ld seconds",time(NULL)-start); } if (server.ipfd > 0) redisLog(REDIS_NOTICE,"The server is now ready to accept connections on port %d", server.port); if (server.sofd > 0) redisLog(REDIS_NOTICE,"The server is now ready to accept connections at %s", server.unixsocket); aeSetBeforeSleepProc(server.el,beforeSleep); aeMain(server.el); aeDeleteEventLoop(server.el); return 0; } /* ============================= Backtrace support ========================= */ #ifdef HAVE_BACKTRACE void *getMcontextEip(ucontext_t *uc) { #if defined(__FreeBSD__) return (void*) uc->uc_mcontext.mc_eip; #elif defined(__dietlibc__) return (void*) uc->uc_mcontext.eip; #elif defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_6) #if __x86_64__ return (void*) uc->uc_mcontext->__ss.__rip; #else return (void*) uc->uc_mcontext->__ss.__eip; #endif #elif defined(__APPLE__) && defined(MAC_OS_X_VERSION_10_6) #if defined(_STRUCT_X86_THREAD_STATE64) && !defined(__i386__) return (void*) uc->uc_mcontext->__ss.__rip; #else return (void*) uc->uc_mcontext->__ss.__eip; #endif #elif defined(__i386__) return (void*) uc->uc_mcontext.gregs[14]; /* Linux 32 */ #elif defined(__X86_64__) || defined(__x86_64__) return (void*) uc->uc_mcontext.gregs[16]; /* Linux 64 */ #elif defined(__ia64__) /* Linux IA64 */ return (void*) uc->uc_mcontext.sc_ip; #else return NULL; #endif } void segvHandler(int sig, siginfo_t *info, void *secret) { void *trace[100]; char **messages = NULL; int i, trace_size = 0; ucontext_t *uc = (ucontext_t*) secret; sds infostring; struct sigaction act; REDIS_NOTUSED(info); redisLog(REDIS_WARNING, "======= Ooops! Redis %s got signal: -%d- =======", REDIS_VERSION, sig); infostring = genRedisInfoString(); redisLog(REDIS_WARNING, "%s",infostring); /* It's not safe to sdsfree() the returned string under memory * corruption conditions. Let it leak as we are going to abort */ trace_size = backtrace(trace, 100); /* overwrite sigaction with caller's address */ if (getMcontextEip(uc) != NULL) { trace[1] = getMcontextEip(uc); } messages = backtrace_symbols(trace, trace_size); for (i=1; i<trace_size; ++i) redisLog(REDIS_WARNING,"%s", messages[i]); /* free(messages); Don't call free() with possibly corrupted memory. */ if (server.daemonize) unlink(server.pidfile); /* Make sure we exit with the right signal at the end. So for instance * the core will be dumped if enabled. */ sigemptyset (&act.sa_mask); /* When the SA_SIGINFO flag is set in sa_flags then sa_sigaction * is used. Otherwise, sa_handler is used */ act.sa_flags = SA_NODEFER | SA_ONSTACK | SA_RESETHAND; act.sa_handler = SIG_DFL; sigaction (sig, &act, NULL); kill(getpid(),sig); } void sigtermHandler(int sig) { REDIS_NOTUSED(sig); redisLog(REDIS_WARNING,"SIGTERM received, scheduling shutting down..."); server.shutdown_asap = 1; } void setupSigSegvAction(void) { struct sigaction act; sigemptyset (&act.sa_mask); /* When the SA_SIGINFO flag is set in sa_flags then sa_sigaction * is used. Otherwise, sa_handler is used */ act.sa_flags = SA_NODEFER | SA_ONSTACK | SA_RESETHAND | SA_SIGINFO; act.sa_sigaction = segvHandler; sigaction (SIGSEGV, &act, NULL); sigaction (SIGBUS, &act, NULL); sigaction (SIGFPE, &act, NULL); sigaction (SIGILL, &act, NULL); sigaction (SIGBUS, &act, NULL); act.sa_flags = SA_NODEFER | SA_ONSTACK | SA_RESETHAND; act.sa_handler = sigtermHandler; sigaction (SIGTERM, &act, NULL); return; } #else /* HAVE_BACKTRACE */ void setupSigSegvAction(void) { } #endif /* HAVE_BACKTRACE */ /* The End */
./CrossVul/dataset_final_sorted/CWE-20/c/good_5533_2
crossvul-cpp_data_good_3581_2
/* * linux/kernel/exit.c * * Copyright (C) 1991, 1992 Linus Torvalds */ #include <linux/mm.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/capability.h> #include <linux/completion.h> #include <linux/personality.h> #include <linux/tty.h> #include <linux/iocontext.h> #include <linux/key.h> #include <linux/security.h> #include <linux/cpu.h> #include <linux/acct.h> #include <linux/tsacct_kern.h> #include <linux/file.h> #include <linux/fdtable.h> #include <linux/binfmts.h> #include <linux/nsproxy.h> #include <linux/pid_namespace.h> #include <linux/ptrace.h> #include <linux/profile.h> #include <linux/mount.h> #include <linux/proc_fs.h> #include <linux/kthread.h> #include <linux/mempolicy.h> #include <linux/taskstats_kern.h> #include <linux/delayacct.h> #include <linux/freezer.h> #include <linux/cgroup.h> #include <linux/syscalls.h> #include <linux/signal.h> #include <linux/posix-timers.h> #include <linux/cn_proc.h> #include <linux/mutex.h> #include <linux/futex.h> #include <linux/pipe_fs_i.h> #include <linux/audit.h> /* for audit_free() */ #include <linux/resource.h> #include <linux/blkdev.h> #include <linux/task_io_accounting_ops.h> #include <linux/tracehook.h> #include <linux/fs_struct.h> #include <linux/init_task.h> #include <linux/perf_event.h> #include <trace/events/sched.h> #include <asm/uaccess.h> #include <asm/unistd.h> #include <asm/pgtable.h> #include <asm/mmu_context.h> #include "cred-internals.h" static void exit_mm(struct task_struct * tsk); static void __unhash_process(struct task_struct *p) { nr_threads--; detach_pid(p, PIDTYPE_PID); if (thread_group_leader(p)) { detach_pid(p, PIDTYPE_PGID); detach_pid(p, PIDTYPE_SID); list_del_rcu(&p->tasks); __get_cpu_var(process_counts)--; } list_del_rcu(&p->thread_group); list_del_init(&p->sibling); } /* * This function expects the tasklist_lock write-locked. */ static void __exit_signal(struct task_struct *tsk) { struct signal_struct *sig = tsk->signal; struct sighand_struct *sighand; BUG_ON(!sig); BUG_ON(!atomic_read(&sig->count)); sighand = rcu_dereference(tsk->sighand); spin_lock(&sighand->siglock); posix_cpu_timers_exit(tsk); if (atomic_dec_and_test(&sig->count)) posix_cpu_timers_exit_group(tsk); else { /* * If there is any task waiting for the group exit * then notify it: */ if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) wake_up_process(sig->group_exit_task); if (tsk == sig->curr_target) sig->curr_target = next_thread(tsk); /* * Accumulate here the counters for all threads but the * group leader as they die, so they can be added into * the process-wide totals when those are taken. * The group leader stays around as a zombie as long * as there are other threads. When it gets reaped, * the exit.c code will add its counts into these totals. * We won't ever get here for the group leader, since it * will have been the last reference on the signal_struct. */ sig->utime = cputime_add(sig->utime, task_utime(tsk)); sig->stime = cputime_add(sig->stime, task_stime(tsk)); sig->gtime = cputime_add(sig->gtime, task_gtime(tsk)); sig->min_flt += tsk->min_flt; sig->maj_flt += tsk->maj_flt; sig->nvcsw += tsk->nvcsw; sig->nivcsw += tsk->nivcsw; sig->inblock += task_io_get_inblock(tsk); sig->oublock += task_io_get_oublock(tsk); task_io_accounting_add(&sig->ioac, &tsk->ioac); sig->sum_sched_runtime += tsk->se.sum_exec_runtime; sig = NULL; /* Marker for below. */ } __unhash_process(tsk); /* * Do this under ->siglock, we can race with another thread * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. */ flush_sigqueue(&tsk->pending); tsk->signal = NULL; tsk->sighand = NULL; spin_unlock(&sighand->siglock); __cleanup_sighand(sighand); clear_tsk_thread_flag(tsk,TIF_SIGPENDING); if (sig) { flush_sigqueue(&sig->shared_pending); taskstats_tgid_free(sig); /* * Make sure ->signal can't go away under rq->lock, * see account_group_exec_runtime(). */ task_rq_unlock_wait(tsk); __cleanup_signal(sig); } } static void delayed_put_task_struct(struct rcu_head *rhp) { struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); #ifdef CONFIG_PERF_EVENTS WARN_ON_ONCE(tsk->perf_event_ctxp); #endif trace_sched_process_free(tsk); put_task_struct(tsk); } void release_task(struct task_struct * p) { struct task_struct *leader; int zap_leader; repeat: tracehook_prepare_release_task(p); /* don't need to get the RCU readlock here - the process is dead and * can't be modifying its own credentials */ atomic_dec(&__task_cred(p)->user->processes); proc_flush_task(p); write_lock_irq(&tasklist_lock); tracehook_finish_release_task(p); __exit_signal(p); /* * If we are the last non-leader member of the thread * group, and the leader is zombie, then notify the * group leader's parent process. (if it wants notification.) */ zap_leader = 0; leader = p->group_leader; if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) { BUG_ON(task_detached(leader)); do_notify_parent(leader, leader->exit_signal); /* * If we were the last child thread and the leader has * exited already, and the leader's parent ignores SIGCHLD, * then we are the one who should release the leader. * * do_notify_parent() will have marked it self-reaping in * that case. */ zap_leader = task_detached(leader); /* * This maintains the invariant that release_task() * only runs on a task in EXIT_DEAD, just for sanity. */ if (zap_leader) leader->exit_state = EXIT_DEAD; } write_unlock_irq(&tasklist_lock); release_thread(p); call_rcu(&p->rcu, delayed_put_task_struct); p = leader; if (unlikely(zap_leader)) goto repeat; } /* * This checks not only the pgrp, but falls back on the pid if no * satisfactory pgrp is found. I dunno - gdb doesn't work correctly * without this... * * The caller must hold rcu lock or the tasklist lock. */ struct pid *session_of_pgrp(struct pid *pgrp) { struct task_struct *p; struct pid *sid = NULL; p = pid_task(pgrp, PIDTYPE_PGID); if (p == NULL) p = pid_task(pgrp, PIDTYPE_PID); if (p != NULL) sid = task_session(p); return sid; } /* * Determine if a process group is "orphaned", according to the POSIX * definition in 2.2.2.52. Orphaned process groups are not to be affected * by terminal-generated stop signals. Newly orphaned process groups are * to receive a SIGHUP and a SIGCONT. * * "I ask you, have you ever known what it is to be an orphan?" */ static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task) { struct task_struct *p; do_each_pid_task(pgrp, PIDTYPE_PGID, p) { if ((p == ignored_task) || (p->exit_state && thread_group_empty(p)) || is_global_init(p->real_parent)) continue; if (task_pgrp(p->real_parent) != pgrp && task_session(p->real_parent) == task_session(p)) return 0; } while_each_pid_task(pgrp, PIDTYPE_PGID, p); return 1; } int is_current_pgrp_orphaned(void) { int retval; read_lock(&tasklist_lock); retval = will_become_orphaned_pgrp(task_pgrp(current), NULL); read_unlock(&tasklist_lock); return retval; } static int has_stopped_jobs(struct pid *pgrp) { int retval = 0; struct task_struct *p; do_each_pid_task(pgrp, PIDTYPE_PGID, p) { if (!task_is_stopped(p)) continue; retval = 1; break; } while_each_pid_task(pgrp, PIDTYPE_PGID, p); return retval; } /* * Check to see if any process groups have become orphaned as * a result of our exiting, and if they have any stopped jobs, * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) */ static void kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent) { struct pid *pgrp = task_pgrp(tsk); struct task_struct *ignored_task = tsk; if (!parent) /* exit: our father is in a different pgrp than * we are and we were the only connection outside. */ parent = tsk->real_parent; else /* reparent: our child is in a different pgrp than * we are, and it was the only connection outside. */ ignored_task = NULL; if (task_pgrp(parent) != pgrp && task_session(parent) == task_session(tsk) && will_become_orphaned_pgrp(pgrp, ignored_task) && has_stopped_jobs(pgrp)) { __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp); __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp); } } /** * reparent_to_kthreadd - Reparent the calling kernel thread to kthreadd * * If a kernel thread is launched as a result of a system call, or if * it ever exits, it should generally reparent itself to kthreadd so it * isn't in the way of other processes and is correctly cleaned up on exit. * * The various task state such as scheduling policy and priority may have * been inherited from a user process, so we reset them to sane values here. * * NOTE that reparent_to_kthreadd() gives the caller full capabilities. */ static void reparent_to_kthreadd(void) { write_lock_irq(&tasklist_lock); ptrace_unlink(current); /* Reparent to init */ current->real_parent = current->parent = kthreadd_task; list_move_tail(&current->sibling, &current->real_parent->children); /* Set the exit signal to SIGCHLD so we signal init on exit */ current->exit_signal = SIGCHLD; if (task_nice(current) < 0) set_user_nice(current, 0); /* cpus_allowed? */ /* rt_priority? */ /* signals? */ memcpy(current->signal->rlim, init_task.signal->rlim, sizeof(current->signal->rlim)); atomic_inc(&init_cred.usage); commit_creds(&init_cred); write_unlock_irq(&tasklist_lock); } void __set_special_pids(struct pid *pid) { struct task_struct *curr = current->group_leader; if (task_session(curr) != pid) change_pid(curr, PIDTYPE_SID, pid); if (task_pgrp(curr) != pid) change_pid(curr, PIDTYPE_PGID, pid); } static void set_special_pids(struct pid *pid) { write_lock_irq(&tasklist_lock); __set_special_pids(pid); write_unlock_irq(&tasklist_lock); } /* * Let kernel threads use this to say that they allow a certain signal. * Must not be used if kthread was cloned with CLONE_SIGHAND. */ int allow_signal(int sig) { if (!valid_signal(sig) || sig < 1) return -EINVAL; spin_lock_irq(&current->sighand->siglock); /* This is only needed for daemonize()'ed kthreads */ sigdelset(&current->blocked, sig); /* * Kernel threads handle their own signals. Let the signal code * know it'll be handled, so that they don't get converted to * SIGKILL or just silently dropped. */ current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2; recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); return 0; } EXPORT_SYMBOL(allow_signal); int disallow_signal(int sig) { if (!valid_signal(sig) || sig < 1) return -EINVAL; spin_lock_irq(&current->sighand->siglock); current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN; recalc_sigpending(); spin_unlock_irq(&current->sighand->siglock); return 0; } EXPORT_SYMBOL(disallow_signal); /* * Put all the gunge required to become a kernel thread without * attached user resources in one place where it belongs. */ void daemonize(const char *name, ...) { va_list args; sigset_t blocked; va_start(args, name); vsnprintf(current->comm, sizeof(current->comm), name, args); va_end(args); /* * If we were started as result of loading a module, close all of the * user space pages. We don't need them, and if we didn't close them * they would be locked into memory. */ exit_mm(current); /* * We don't want to have TIF_FREEZE set if the system-wide hibernation * or suspend transition begins right now. */ current->flags |= (PF_NOFREEZE | PF_KTHREAD); if (current->nsproxy != &init_nsproxy) { get_nsproxy(&init_nsproxy); switch_task_namespaces(current, &init_nsproxy); } set_special_pids(&init_struct_pid); proc_clear_tty(current); /* Block and flush all signals */ sigfillset(&blocked); sigprocmask(SIG_BLOCK, &blocked, NULL); flush_signals(current); /* Become as one with the init task */ daemonize_fs_struct(); exit_files(current); current->files = init_task.files; atomic_inc(&current->files->count); reparent_to_kthreadd(); } EXPORT_SYMBOL(daemonize); static void close_files(struct files_struct * files) { int i, j; struct fdtable *fdt; j = 0; /* * It is safe to dereference the fd table without RCU or * ->file_lock because this is the last reference to the * files structure. */ fdt = files_fdtable(files); for (;;) { unsigned long set; i = j * __NFDBITS; if (i >= fdt->max_fds) break; set = fdt->open_fds->fds_bits[j++]; while (set) { if (set & 1) { struct file * file = xchg(&fdt->fd[i], NULL); if (file) { filp_close(file, files); cond_resched(); } } i++; set >>= 1; } } } struct files_struct *get_files_struct(struct task_struct *task) { struct files_struct *files; task_lock(task); files = task->files; if (files) atomic_inc(&files->count); task_unlock(task); return files; } void put_files_struct(struct files_struct *files) { struct fdtable *fdt; if (atomic_dec_and_test(&files->count)) { close_files(files); /* * Free the fd and fdset arrays if we expanded them. * If the fdtable was embedded, pass files for freeing * at the end of the RCU grace period. Otherwise, * you can free files immediately. */ fdt = files_fdtable(files); if (fdt != &files->fdtab) kmem_cache_free(files_cachep, files); free_fdtable(fdt); } } void reset_files_struct(struct files_struct *files) { struct task_struct *tsk = current; struct files_struct *old; old = tsk->files; task_lock(tsk); tsk->files = files; task_unlock(tsk); put_files_struct(old); } void exit_files(struct task_struct *tsk) { struct files_struct * files = tsk->files; if (files) { task_lock(tsk); tsk->files = NULL; task_unlock(tsk); put_files_struct(files); } } #ifdef CONFIG_MM_OWNER /* * Task p is exiting and it owned mm, lets find a new owner for it */ static inline int mm_need_new_owner(struct mm_struct *mm, struct task_struct *p) { /* * If there are other users of the mm and the owner (us) is exiting * we need to find a new owner to take on the responsibility. */ if (atomic_read(&mm->mm_users) <= 1) return 0; if (mm->owner != p) return 0; return 1; } void mm_update_next_owner(struct mm_struct *mm) { struct task_struct *c, *g, *p = current; retry: if (!mm_need_new_owner(mm, p)) return; read_lock(&tasklist_lock); /* * Search in the children */ list_for_each_entry(c, &p->children, sibling) { if (c->mm == mm) goto assign_new_owner; } /* * Search in the siblings */ list_for_each_entry(c, &p->real_parent->children, sibling) { if (c->mm == mm) goto assign_new_owner; } /* * Search through everything else. We should not get * here often */ do_each_thread(g, c) { if (c->mm == mm) goto assign_new_owner; } while_each_thread(g, c); read_unlock(&tasklist_lock); /* * We found no owner yet mm_users > 1: this implies that we are * most likely racing with swapoff (try_to_unuse()) or /proc or * ptrace or page migration (get_task_mm()). Mark owner as NULL. */ mm->owner = NULL; return; assign_new_owner: BUG_ON(c == p); get_task_struct(c); /* * The task_lock protects c->mm from changing. * We always want mm->owner->mm == mm */ task_lock(c); /* * Delay read_unlock() till we have the task_lock() * to ensure that c does not slip away underneath us */ read_unlock(&tasklist_lock); if (c->mm != mm) { task_unlock(c); put_task_struct(c); goto retry; } mm->owner = c; task_unlock(c); put_task_struct(c); } #endif /* CONFIG_MM_OWNER */ /* * Turn us into a lazy TLB process if we * aren't already.. */ static void exit_mm(struct task_struct * tsk) { struct mm_struct *mm = tsk->mm; struct core_state *core_state; mm_release(tsk, mm); if (!mm) return; /* * Serialize with any possible pending coredump. * We must hold mmap_sem around checking core_state * and clearing tsk->mm. The core-inducing thread * will increment ->nr_threads for each thread in the * group with ->mm != NULL. */ down_read(&mm->mmap_sem); core_state = mm->core_state; if (core_state) { struct core_thread self; up_read(&mm->mmap_sem); self.task = tsk; self.next = xchg(&core_state->dumper.next, &self); /* * Implies mb(), the result of xchg() must be visible * to core_state->dumper. */ if (atomic_dec_and_test(&core_state->nr_threads)) complete(&core_state->startup); for (;;) { set_task_state(tsk, TASK_UNINTERRUPTIBLE); if (!self.task) /* see coredump_finish() */ break; schedule(); } __set_task_state(tsk, TASK_RUNNING); down_read(&mm->mmap_sem); } atomic_inc(&mm->mm_count); BUG_ON(mm != tsk->active_mm); /* more a memory barrier than a real lock */ task_lock(tsk); tsk->mm = NULL; up_read(&mm->mmap_sem); enter_lazy_tlb(mm, current); /* We don't want this task to be frozen prematurely */ clear_freeze_flag(tsk); task_unlock(tsk); mm_update_next_owner(mm); mmput(mm); } /* * When we die, we re-parent all our children. * Try to give them to another thread in our thread * group, and if no such member exists, give it to * the child reaper process (ie "init") in our pid * space. */ static struct task_struct *find_new_reaper(struct task_struct *father) { struct pid_namespace *pid_ns = task_active_pid_ns(father); struct task_struct *thread; thread = father; while_each_thread(father, thread) { if (thread->flags & PF_EXITING) continue; if (unlikely(pid_ns->child_reaper == father)) pid_ns->child_reaper = thread; return thread; } if (unlikely(pid_ns->child_reaper == father)) { write_unlock_irq(&tasklist_lock); if (unlikely(pid_ns == &init_pid_ns)) panic("Attempted to kill init!"); zap_pid_ns_processes(pid_ns); write_lock_irq(&tasklist_lock); /* * We can not clear ->child_reaper or leave it alone. * There may by stealth EXIT_DEAD tasks on ->children, * forget_original_parent() must move them somewhere. */ pid_ns->child_reaper = init_pid_ns.child_reaper; } return pid_ns->child_reaper; } /* * Any that need to be release_task'd are put on the @dead list. */ static void reparent_thread(struct task_struct *father, struct task_struct *p, struct list_head *dead) { if (p->pdeath_signal) group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p); list_move_tail(&p->sibling, &p->real_parent->children); if (task_detached(p)) return; /* * If this is a threaded reparent there is no need to * notify anyone anything has happened. */ if (same_thread_group(p->real_parent, father)) return; /* We don't want people slaying init. */ p->exit_signal = SIGCHLD; /* If it has exited notify the new parent about this child's death. */ if (!task_ptrace(p) && p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) { do_notify_parent(p, p->exit_signal); if (task_detached(p)) { p->exit_state = EXIT_DEAD; list_move_tail(&p->sibling, dead); } } kill_orphaned_pgrp(p, father); } static void forget_original_parent(struct task_struct *father) { struct task_struct *p, *n, *reaper; LIST_HEAD(dead_children); exit_ptrace(father); write_lock_irq(&tasklist_lock); reaper = find_new_reaper(father); list_for_each_entry_safe(p, n, &father->children, sibling) { p->real_parent = reaper; if (p->parent == father) { BUG_ON(task_ptrace(p)); p->parent = p->real_parent; } reparent_thread(father, p, &dead_children); } write_unlock_irq(&tasklist_lock); BUG_ON(!list_empty(&father->children)); list_for_each_entry_safe(p, n, &dead_children, sibling) { list_del_init(&p->sibling); release_task(p); } } /* * Send signals to all our closest relatives so that they know * to properly mourn us.. */ static void exit_notify(struct task_struct *tsk, int group_dead) { int signal; void *cookie; /* * This does two things: * * A. Make init inherit all the child processes * B. Check to see if any process groups have become orphaned * as a result of our exiting, and if they have any stopped * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) */ forget_original_parent(tsk); exit_task_namespaces(tsk); write_lock_irq(&tasklist_lock); if (group_dead) kill_orphaned_pgrp(tsk->group_leader, NULL); /* Let father know we died * * Thread signals are configurable, but you aren't going to use * that to send signals to arbitary processes. * That stops right now. * * If the parent exec id doesn't match the exec id we saved * when we started then we know the parent has changed security * domain. * * If our self_exec id doesn't match our parent_exec_id then * we have changed execution domain as these two values started * the same after a fork. */ if (tsk->exit_signal != SIGCHLD && !task_detached(tsk) && (tsk->parent_exec_id != tsk->real_parent->self_exec_id || tsk->self_exec_id != tsk->parent_exec_id)) tsk->exit_signal = SIGCHLD; signal = tracehook_notify_death(tsk, &cookie, group_dead); if (signal >= 0) signal = do_notify_parent(tsk, signal); tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE; /* mt-exec, de_thread() is waiting for us */ if (thread_group_leader(tsk) && tsk->signal->group_exit_task && tsk->signal->notify_count < 0) wake_up_process(tsk->signal->group_exit_task); write_unlock_irq(&tasklist_lock); tracehook_report_death(tsk, signal, cookie, group_dead); /* If the process is dead, release it - nobody will wait for it */ if (signal == DEATH_REAP) release_task(tsk); } #ifdef CONFIG_DEBUG_STACK_USAGE static void check_stack_usage(void) { static DEFINE_SPINLOCK(low_water_lock); static int lowest_to_date = THREAD_SIZE; unsigned long free; free = stack_not_used(current); if (free >= lowest_to_date) return; spin_lock(&low_water_lock); if (free < lowest_to_date) { printk(KERN_WARNING "%s used greatest stack depth: %lu bytes " "left\n", current->comm, free); lowest_to_date = free; } spin_unlock(&low_water_lock); } #else static inline void check_stack_usage(void) {} #endif NORET_TYPE void do_exit(long code) { struct task_struct *tsk = current; int group_dead; profile_task_exit(tsk); WARN_ON(atomic_read(&tsk->fs_excl)); if (unlikely(in_interrupt())) panic("Aiee, killing interrupt handler!"); if (unlikely(!tsk->pid)) panic("Attempted to kill the idle task!"); tracehook_report_exit(&code); validate_creds_for_do_exit(tsk); /* * We're taking recursive faults here in do_exit. Safest is to just * leave this task alone and wait for reboot. */ if (unlikely(tsk->flags & PF_EXITING)) { printk(KERN_ALERT "Fixing recursive fault but reboot is needed!\n"); /* * We can do this unlocked here. The futex code uses * this flag just to verify whether the pi state * cleanup has been done or not. In the worst case it * loops once more. We pretend that the cleanup was * done as there is no way to return. Either the * OWNER_DIED bit is set by now or we push the blocked * task into the wait for ever nirwana as well. */ tsk->flags |= PF_EXITPIDONE; set_current_state(TASK_UNINTERRUPTIBLE); schedule(); } exit_irq_thread(); exit_signals(tsk); /* sets PF_EXITING */ /* * tsk->flags are checked in the futex code to protect against * an exiting task cleaning up the robust pi futexes. */ smp_mb(); spin_unlock_wait(&tsk->pi_lock); if (unlikely(in_atomic())) printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", current->comm, task_pid_nr(current), preempt_count()); acct_update_integrals(tsk); group_dead = atomic_dec_and_test(&tsk->signal->live); if (group_dead) { hrtimer_cancel(&tsk->signal->real_timer); exit_itimers(tsk->signal); if (tsk->mm) setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm); } acct_collect(code, group_dead); if (group_dead) tty_audit_exit(); if (unlikely(tsk->audit_context)) audit_free(tsk); tsk->exit_code = code; taskstats_exit(tsk, group_dead); exit_mm(tsk); if (group_dead) acct_process(); trace_sched_process_exit(tsk); exit_sem(tsk); exit_files(tsk); exit_fs(tsk); check_stack_usage(); exit_thread(); cgroup_exit(tsk, 1); if (group_dead && tsk->signal->leader) disassociate_ctty(1); module_put(task_thread_info(tsk)->exec_domain->module); proc_exit_connector(tsk); /* * Flush inherited counters to the parent - before the parent * gets woken up by child-exit notifications. */ perf_event_exit_task(tsk); exit_notify(tsk, group_dead); #ifdef CONFIG_NUMA mpol_put(tsk->mempolicy); tsk->mempolicy = NULL; #endif #ifdef CONFIG_FUTEX if (unlikely(current->pi_state_cache)) kfree(current->pi_state_cache); #endif /* * Make sure we are holding no locks: */ debug_check_no_locks_held(tsk); /* * We can do this unlocked here. The futex code uses this flag * just to verify whether the pi state cleanup has been done * or not. In the worst case it loops once more. */ tsk->flags |= PF_EXITPIDONE; if (tsk->io_context) exit_io_context(tsk); if (tsk->splice_pipe) __free_pipe_info(tsk->splice_pipe); validate_creds_for_do_exit(tsk); preempt_disable(); exit_rcu(); /* causes final put_task_struct in finish_task_switch(). */ tsk->state = TASK_DEAD; schedule(); BUG(); /* Avoid "noreturn function does return". */ for (;;) cpu_relax(); /* For when BUG is null */ } EXPORT_SYMBOL_GPL(do_exit); NORET_TYPE void complete_and_exit(struct completion *comp, long code) { if (comp) complete(comp); do_exit(code); } EXPORT_SYMBOL(complete_and_exit); SYSCALL_DEFINE1(exit, int, error_code) { do_exit((error_code&0xff)<<8); } /* * Take down every thread in the group. This is called by fatal signals * as well as by sys_exit_group (below). */ NORET_TYPE void do_group_exit(int exit_code) { struct signal_struct *sig = current->signal; BUG_ON(exit_code & 0x80); /* core dumps don't get here */ if (signal_group_exit(sig)) exit_code = sig->group_exit_code; else if (!thread_group_empty(current)) { struct sighand_struct *const sighand = current->sighand; spin_lock_irq(&sighand->siglock); if (signal_group_exit(sig)) /* Another thread got here before we took the lock. */ exit_code = sig->group_exit_code; else { sig->group_exit_code = exit_code; sig->flags = SIGNAL_GROUP_EXIT; zap_other_threads(current); } spin_unlock_irq(&sighand->siglock); } do_exit(exit_code); /* NOTREACHED */ } /* * this kills every thread in the thread group. Note that any externally * wait4()-ing process will get the correct exit code - even if this * thread is not the thread group leader. */ SYSCALL_DEFINE1(exit_group, int, error_code) { do_group_exit((error_code & 0xff) << 8); /* NOTREACHED */ return 0; } struct wait_opts { enum pid_type wo_type; int wo_flags; struct pid *wo_pid; struct siginfo __user *wo_info; int __user *wo_stat; struct rusage __user *wo_rusage; wait_queue_t child_wait; int notask_error; }; static inline struct pid *task_pid_type(struct task_struct *task, enum pid_type type) { if (type != PIDTYPE_PID) task = task->group_leader; return task->pids[type].pid; } static int eligible_pid(struct wait_opts *wo, struct task_struct *p) { return wo->wo_type == PIDTYPE_MAX || task_pid_type(p, wo->wo_type) == wo->wo_pid; } static int eligible_child(struct wait_opts *wo, struct task_struct *p) { if (!eligible_pid(wo, p)) return 0; /* Wait for all children (clone and not) if __WALL is set; * otherwise, wait for clone children *only* if __WCLONE is * set; otherwise, wait for non-clone children *only*. (Note: * A "clone" child here is one that reports to its parent * using a signal other than SIGCHLD.) */ if (((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE)) && !(wo->wo_flags & __WALL)) return 0; return 1; } static int wait_noreap_copyout(struct wait_opts *wo, struct task_struct *p, pid_t pid, uid_t uid, int why, int status) { struct siginfo __user *infop; int retval = wo->wo_rusage ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; put_task_struct(p); infop = wo->wo_info; if (infop) { if (!retval) retval = put_user(SIGCHLD, &infop->si_signo); if (!retval) retval = put_user(0, &infop->si_errno); if (!retval) retval = put_user((short)why, &infop->si_code); if (!retval) retval = put_user(pid, &infop->si_pid); if (!retval) retval = put_user(uid, &infop->si_uid); if (!retval) retval = put_user(status, &infop->si_status); } if (!retval) retval = pid; return retval; } /* * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold * read_lock(&tasklist_lock) on entry. If we return zero, we still hold * the lock and this task is uninteresting. If we return nonzero, we have * released the lock and the system call should return. */ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) { unsigned long state; int retval, status, traced; pid_t pid = task_pid_vnr(p); uid_t uid = __task_cred(p)->uid; struct siginfo __user *infop; if (!likely(wo->wo_flags & WEXITED)) return 0; if (unlikely(wo->wo_flags & WNOWAIT)) { int exit_code = p->exit_code; int why, status; get_task_struct(p); read_unlock(&tasklist_lock); if ((exit_code & 0x7f) == 0) { why = CLD_EXITED; status = exit_code >> 8; } else { why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED; status = exit_code & 0x7f; } return wait_noreap_copyout(wo, p, pid, uid, why, status); } /* * Try to move the task's state to DEAD * only one thread is allowed to do this: */ state = xchg(&p->exit_state, EXIT_DEAD); if (state != EXIT_ZOMBIE) { BUG_ON(state != EXIT_DEAD); return 0; } traced = ptrace_reparented(p); /* * It can be ptraced but not reparented, check * !task_detached() to filter out sub-threads. */ if (likely(!traced) && likely(!task_detached(p))) { struct signal_struct *psig; struct signal_struct *sig; unsigned long maxrss; /* * The resource counters for the group leader are in its * own task_struct. Those for dead threads in the group * are in its signal_struct, as are those for the child * processes it has previously reaped. All these * accumulate in the parent's signal_struct c* fields. * * We don't bother to take a lock here to protect these * p->signal fields, because they are only touched by * __exit_signal, which runs with tasklist_lock * write-locked anyway, and so is excluded here. We do * need to protect the access to parent->signal fields, * as other threads in the parent group can be right * here reaping other children at the same time. */ spin_lock_irq(&p->real_parent->sighand->siglock); psig = p->real_parent->signal; sig = p->signal; psig->cutime = cputime_add(psig->cutime, cputime_add(p->utime, cputime_add(sig->utime, sig->cutime))); psig->cstime = cputime_add(psig->cstime, cputime_add(p->stime, cputime_add(sig->stime, sig->cstime))); psig->cgtime = cputime_add(psig->cgtime, cputime_add(p->gtime, cputime_add(sig->gtime, sig->cgtime))); psig->cmin_flt += p->min_flt + sig->min_flt + sig->cmin_flt; psig->cmaj_flt += p->maj_flt + sig->maj_flt + sig->cmaj_flt; psig->cnvcsw += p->nvcsw + sig->nvcsw + sig->cnvcsw; psig->cnivcsw += p->nivcsw + sig->nivcsw + sig->cnivcsw; psig->cinblock += task_io_get_inblock(p) + sig->inblock + sig->cinblock; psig->coublock += task_io_get_oublock(p) + sig->oublock + sig->coublock; maxrss = max(sig->maxrss, sig->cmaxrss); if (psig->cmaxrss < maxrss) psig->cmaxrss = maxrss; task_io_accounting_add(&psig->ioac, &p->ioac); task_io_accounting_add(&psig->ioac, &sig->ioac); spin_unlock_irq(&p->real_parent->sighand->siglock); } /* * Now we are sure this task is interesting, and no other * thread can reap it because we set its state to EXIT_DEAD. */ read_unlock(&tasklist_lock); retval = wo->wo_rusage ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; status = (p->signal->flags & SIGNAL_GROUP_EXIT) ? p->signal->group_exit_code : p->exit_code; if (!retval && wo->wo_stat) retval = put_user(status, wo->wo_stat); infop = wo->wo_info; if (!retval && infop) retval = put_user(SIGCHLD, &infop->si_signo); if (!retval && infop) retval = put_user(0, &infop->si_errno); if (!retval && infop) { int why; if ((status & 0x7f) == 0) { why = CLD_EXITED; status >>= 8; } else { why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED; status &= 0x7f; } retval = put_user((short)why, &infop->si_code); if (!retval) retval = put_user(status, &infop->si_status); } if (!retval && infop) retval = put_user(pid, &infop->si_pid); if (!retval && infop) retval = put_user(uid, &infop->si_uid); if (!retval) retval = pid; if (traced) { write_lock_irq(&tasklist_lock); /* We dropped tasklist, ptracer could die and untrace */ ptrace_unlink(p); /* * If this is not a detached task, notify the parent. * If it's still not detached after that, don't release * it now. */ if (!task_detached(p)) { do_notify_parent(p, p->exit_signal); if (!task_detached(p)) { p->exit_state = EXIT_ZOMBIE; p = NULL; } } write_unlock_irq(&tasklist_lock); } if (p != NULL) release_task(p); return retval; } static int *task_stopped_code(struct task_struct *p, bool ptrace) { if (ptrace) { if (task_is_stopped_or_traced(p)) return &p->exit_code; } else { if (p->signal->flags & SIGNAL_STOP_STOPPED) return &p->signal->group_exit_code; } return NULL; } /* * Handle sys_wait4 work for one task in state TASK_STOPPED. We hold * read_lock(&tasklist_lock) on entry. If we return zero, we still hold * the lock and this task is uninteresting. If we return nonzero, we have * released the lock and the system call should return. */ static int wait_task_stopped(struct wait_opts *wo, int ptrace, struct task_struct *p) { struct siginfo __user *infop; int retval, exit_code, *p_code, why; uid_t uid = 0; /* unneeded, required by compiler */ pid_t pid; /* * Traditionally we see ptrace'd stopped tasks regardless of options. */ if (!ptrace && !(wo->wo_flags & WUNTRACED)) return 0; exit_code = 0; spin_lock_irq(&p->sighand->siglock); p_code = task_stopped_code(p, ptrace); if (unlikely(!p_code)) goto unlock_sig; exit_code = *p_code; if (!exit_code) goto unlock_sig; if (!unlikely(wo->wo_flags & WNOWAIT)) *p_code = 0; /* don't need the RCU readlock here as we're holding a spinlock */ uid = __task_cred(p)->uid; unlock_sig: spin_unlock_irq(&p->sighand->siglock); if (!exit_code) return 0; /* * Now we are pretty sure this task is interesting. * Make sure it doesn't get reaped out from under us while we * give up the lock and then examine it below. We don't want to * keep holding onto the tasklist_lock while we call getrusage and * possibly take page faults for user memory. */ get_task_struct(p); pid = task_pid_vnr(p); why = ptrace ? CLD_TRAPPED : CLD_STOPPED; read_unlock(&tasklist_lock); if (unlikely(wo->wo_flags & WNOWAIT)) return wait_noreap_copyout(wo, p, pid, uid, why, exit_code); retval = wo->wo_rusage ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; if (!retval && wo->wo_stat) retval = put_user((exit_code << 8) | 0x7f, wo->wo_stat); infop = wo->wo_info; if (!retval && infop) retval = put_user(SIGCHLD, &infop->si_signo); if (!retval && infop) retval = put_user(0, &infop->si_errno); if (!retval && infop) retval = put_user((short)why, &infop->si_code); if (!retval && infop) retval = put_user(exit_code, &infop->si_status); if (!retval && infop) retval = put_user(pid, &infop->si_pid); if (!retval && infop) retval = put_user(uid, &infop->si_uid); if (!retval) retval = pid; put_task_struct(p); BUG_ON(!retval); return retval; } /* * Handle do_wait work for one task in a live, non-stopped state. * read_lock(&tasklist_lock) on entry. If we return zero, we still hold * the lock and this task is uninteresting. If we return nonzero, we have * released the lock and the system call should return. */ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) { int retval; pid_t pid; uid_t uid; if (!unlikely(wo->wo_flags & WCONTINUED)) return 0; if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) return 0; spin_lock_irq(&p->sighand->siglock); /* Re-check with the lock held. */ if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) { spin_unlock_irq(&p->sighand->siglock); return 0; } if (!unlikely(wo->wo_flags & WNOWAIT)) p->signal->flags &= ~SIGNAL_STOP_CONTINUED; uid = __task_cred(p)->uid; spin_unlock_irq(&p->sighand->siglock); pid = task_pid_vnr(p); get_task_struct(p); read_unlock(&tasklist_lock); if (!wo->wo_info) { retval = wo->wo_rusage ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0; put_task_struct(p); if (!retval && wo->wo_stat) retval = put_user(0xffff, wo->wo_stat); if (!retval) retval = pid; } else { retval = wait_noreap_copyout(wo, p, pid, uid, CLD_CONTINUED, SIGCONT); BUG_ON(retval == 0); } return retval; } /* * Consider @p for a wait by @parent. * * -ECHILD should be in ->notask_error before the first call. * Returns nonzero for a final return, when we have unlocked tasklist_lock. * Returns zero if the search for a child should continue; * then ->notask_error is 0 if @p is an eligible child, * or another error from security_task_wait(), or still -ECHILD. */ static int wait_consider_task(struct wait_opts *wo, int ptrace, struct task_struct *p) { int ret = eligible_child(wo, p); if (!ret) return ret; ret = security_task_wait(p); if (unlikely(ret < 0)) { /* * If we have not yet seen any eligible child, * then let this error code replace -ECHILD. * A permission error will give the user a clue * to look for security policy problems, rather * than for mysterious wait bugs. */ if (wo->notask_error) wo->notask_error = ret; return 0; } if (likely(!ptrace) && unlikely(task_ptrace(p))) { /* * This child is hidden by ptrace. * We aren't allowed to see it now, but eventually we will. */ wo->notask_error = 0; return 0; } if (p->exit_state == EXIT_DEAD) return 0; /* * We don't reap group leaders with subthreads. */ if (p->exit_state == EXIT_ZOMBIE && !delay_group_leader(p)) return wait_task_zombie(wo, p); /* * It's stopped or running now, so it might * later continue, exit, or stop again. */ wo->notask_error = 0; if (task_stopped_code(p, ptrace)) return wait_task_stopped(wo, ptrace, p); return wait_task_continued(wo, p); } /* * Do the work of do_wait() for one thread in the group, @tsk. * * -ECHILD should be in ->notask_error before the first call. * Returns nonzero for a final return, when we have unlocked tasklist_lock. * Returns zero if the search for a child should continue; then * ->notask_error is 0 if there were any eligible children, * or another error from security_task_wait(), or still -ECHILD. */ static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk) { struct task_struct *p; list_for_each_entry(p, &tsk->children, sibling) { /* * Do not consider detached threads. */ if (!task_detached(p)) { int ret = wait_consider_task(wo, 0, p); if (ret) return ret; } } return 0; } static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk) { struct task_struct *p; list_for_each_entry(p, &tsk->ptraced, ptrace_entry) { int ret = wait_consider_task(wo, 1, p); if (ret) return ret; } return 0; } static int child_wait_callback(wait_queue_t *wait, unsigned mode, int sync, void *key) { struct wait_opts *wo = container_of(wait, struct wait_opts, child_wait); struct task_struct *p = key; if (!eligible_pid(wo, p)) return 0; if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent) return 0; return default_wake_function(wait, mode, sync, key); } void __wake_up_parent(struct task_struct *p, struct task_struct *parent) { __wake_up_sync_key(&parent->signal->wait_chldexit, TASK_INTERRUPTIBLE, 1, p); } static long do_wait(struct wait_opts *wo) { struct task_struct *tsk; int retval; trace_sched_process_wait(wo->wo_pid); init_waitqueue_func_entry(&wo->child_wait, child_wait_callback); wo->child_wait.private = current; add_wait_queue(&current->signal->wait_chldexit, &wo->child_wait); repeat: /* * If there is nothing that can match our critiera just get out. * We will clear ->notask_error to zero if we see any child that * might later match our criteria, even if we are not able to reap * it yet. */ wo->notask_error = -ECHILD; if ((wo->wo_type < PIDTYPE_MAX) && (!wo->wo_pid || hlist_empty(&wo->wo_pid->tasks[wo->wo_type]))) goto notask; set_current_state(TASK_INTERRUPTIBLE); read_lock(&tasklist_lock); tsk = current; do { retval = do_wait_thread(wo, tsk); if (retval) goto end; retval = ptrace_do_wait(wo, tsk); if (retval) goto end; if (wo->wo_flags & __WNOTHREAD) break; } while_each_thread(current, tsk); read_unlock(&tasklist_lock); notask: retval = wo->notask_error; if (!retval && !(wo->wo_flags & WNOHANG)) { retval = -ERESTARTSYS; if (!signal_pending(current)) { schedule(); goto repeat; } } end: __set_current_state(TASK_RUNNING); remove_wait_queue(&current->signal->wait_chldexit, &wo->child_wait); return retval; } SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, infop, int, options, struct rusage __user *, ru) { struct wait_opts wo; struct pid *pid = NULL; enum pid_type type; long ret; if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED)) return -EINVAL; if (!(options & (WEXITED|WSTOPPED|WCONTINUED))) return -EINVAL; switch (which) { case P_ALL: type = PIDTYPE_MAX; break; case P_PID: type = PIDTYPE_PID; if (upid <= 0) return -EINVAL; break; case P_PGID: type = PIDTYPE_PGID; if (upid <= 0) return -EINVAL; break; default: return -EINVAL; } if (type < PIDTYPE_MAX) pid = find_get_pid(upid); wo.wo_type = type; wo.wo_pid = pid; wo.wo_flags = options; wo.wo_info = infop; wo.wo_stat = NULL; wo.wo_rusage = ru; ret = do_wait(&wo); if (ret > 0) { ret = 0; } else if (infop) { /* * For a WNOHANG return, clear out all the fields * we would set so the user can easily tell the * difference. */ if (!ret) ret = put_user(0, &infop->si_signo); if (!ret) ret = put_user(0, &infop->si_errno); if (!ret) ret = put_user(0, &infop->si_code); if (!ret) ret = put_user(0, &infop->si_pid); if (!ret) ret = put_user(0, &infop->si_uid); if (!ret) ret = put_user(0, &infop->si_status); } put_pid(pid); /* avoid REGPARM breakage on x86: */ asmlinkage_protect(5, ret, which, upid, infop, options, ru); return ret; } SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr, int, options, struct rusage __user *, ru) { struct wait_opts wo; struct pid *pid = NULL; enum pid_type type; long ret; if (options & ~(WNOHANG|WUNTRACED|WCONTINUED| __WNOTHREAD|__WCLONE|__WALL)) return -EINVAL; if (upid == -1) type = PIDTYPE_MAX; else if (upid < 0) { type = PIDTYPE_PGID; pid = find_get_pid(-upid); } else if (upid == 0) { type = PIDTYPE_PGID; pid = get_task_pid(current, PIDTYPE_PGID); } else /* upid > 0 */ { type = PIDTYPE_PID; pid = find_get_pid(upid); } wo.wo_type = type; wo.wo_pid = pid; wo.wo_flags = options | WEXITED; wo.wo_info = NULL; wo.wo_stat = stat_addr; wo.wo_rusage = ru; ret = do_wait(&wo); put_pid(pid); /* avoid REGPARM breakage on x86: */ asmlinkage_protect(4, ret, upid, stat_addr, options, ru); return ret; } #ifdef __ARCH_WANT_SYS_WAITPID /* * sys_waitpid() remains for compatibility. waitpid() should be * implemented by calling sys_wait4() from libc.a. */ SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options) { return sys_wait4(pid, stat_addr, options, NULL); } #endif
./CrossVul/dataset_final_sorted/CWE-20/c/good_3581_2
crossvul-cpp_data_good_5511_0
/* * Internet Control Message Protocol (ICMPv6) * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * * Based on net/ipv4/icmp.c * * RFC 1885 * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ /* * Changes: * * Andi Kleen : exception handling * Andi Kleen add rate limits. never reply to a icmp. * add more length checks and other fixes. * yoshfuji : ensure to sent parameter problem for * fragments. * YOSHIFUJI Hideaki @USAGI: added sysctl for icmp rate limit. * Randy Dunlap and * YOSHIFUJI Hideaki @USAGI: Per-interface statistics support * Kazunori MIYAZAWA @USAGI: change output process to use ip6_append_data */ #define pr_fmt(fmt) "IPv6: " fmt #include <linux/module.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/netfilter.h> #include <linux/slab.h> #ifdef CONFIG_SYSCTL #include <linux/sysctl.h> #endif #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/icmpv6.h> #include <net/ip.h> #include <net/sock.h> #include <net/ipv6.h> #include <net/ip6_checksum.h> #include <net/ping.h> #include <net/protocol.h> #include <net/raw.h> #include <net/rawv6.h> #include <net/transp_v6.h> #include <net/ip6_route.h> #include <net/addrconf.h> #include <net/icmp.h> #include <net/xfrm.h> #include <net/inet_common.h> #include <net/dsfield.h> #include <net/l3mdev.h> #include <asm/uaccess.h> /* * The ICMP socket(s). This is the most convenient way to flow control * our ICMP output as well as maintain a clean interface throughout * all layers. All Socketless IP sends will soon be gone. * * On SMP we have one ICMP socket per-cpu. */ static inline struct sock *icmpv6_sk(struct net *net) { return net->ipv6.icmp_sk[smp_processor_id()]; } static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { /* icmpv6_notify checks 8 bytes can be pulled, icmp6hdr is 8 bytes */ struct icmp6hdr *icmp6 = (struct icmp6hdr *) (skb->data + offset); struct net *net = dev_net(skb->dev); if (type == ICMPV6_PKT_TOOBIG) ip6_update_pmtu(skb, net, info, 0, 0); else if (type == NDISC_REDIRECT) ip6_redirect(skb, net, skb->dev->ifindex, 0); if (!(type & ICMPV6_INFOMSG_MASK)) if (icmp6->icmp6_type == ICMPV6_ECHO_REQUEST) ping_err(skb, offset, ntohl(info)); } static int icmpv6_rcv(struct sk_buff *skb); static const struct inet6_protocol icmpv6_protocol = { .handler = icmpv6_rcv, .err_handler = icmpv6_err, .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, }; static __inline__ struct sock *icmpv6_xmit_lock(struct net *net) { struct sock *sk; local_bh_disable(); sk = icmpv6_sk(net); if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { /* This can happen if the output path (f.e. SIT or * ip6ip6 tunnel) signals dst_link_failure() for an * outgoing ICMP6 packet. */ local_bh_enable(); return NULL; } return sk; } static __inline__ void icmpv6_xmit_unlock(struct sock *sk) { spin_unlock_bh(&sk->sk_lock.slock); } /* * Figure out, may we reply to this packet with icmp error. * * We do not reply, if: * - it was icmp error message. * - it is truncated, so that it is known, that protocol is ICMPV6 * (i.e. in the middle of some exthdr) * * --ANK (980726) */ static bool is_ineligible(const struct sk_buff *skb) { int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data; int len = skb->len - ptr; __u8 nexthdr = ipv6_hdr(skb)->nexthdr; __be16 frag_off; if (len < 0) return true; ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr, &frag_off); if (ptr < 0) return false; if (nexthdr == IPPROTO_ICMPV6) { u8 _type, *tp; tp = skb_header_pointer(skb, ptr+offsetof(struct icmp6hdr, icmp6_type), sizeof(_type), &_type); if (!tp || !(*tp & ICMPV6_INFOMSG_MASK)) return true; } return false; } /* * Check the ICMP output rate limit */ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type, struct flowi6 *fl6) { struct net *net = sock_net(sk); struct dst_entry *dst; bool res = false; /* Informational messages are not limited. */ if (type & ICMPV6_INFOMSG_MASK) return true; /* Do not limit pmtu discovery, it would break it. */ if (type == ICMPV6_PKT_TOOBIG) return true; /* * Look up the output route. * XXX: perhaps the expire for routing entries cloned by * this lookup should be more aggressive (not longer than timeout). */ dst = ip6_route_output(net, sk, fl6); if (dst->error) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); } else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) { res = true; } else { struct rt6_info *rt = (struct rt6_info *)dst; int tmo = net->ipv6.sysctl.icmpv6_time; /* Give more bandwidth to wider prefixes. */ if (rt->rt6i_dst.plen < 128) tmo >>= ((128 - rt->rt6i_dst.plen)>>5); if (icmp_global_allow()) { struct inet_peer *peer; peer = inet_getpeer_v6(net->ipv6.peers, &fl6->daddr, 1); res = inet_peer_xrlim_allow(peer, tmo); if (peer) inet_putpeer(peer); } } dst_release(dst); return res; } /* * an inline helper for the "simple" if statement below * checks if parameter problem report is caused by an * unrecognized IPv6 option that has the Option Type * highest-order two bits set to 10 */ static bool opt_unrec(struct sk_buff *skb, __u32 offset) { u8 _optval, *op; offset += skb_network_offset(skb); op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval); if (!op) return true; return (*op & 0xC0) == 0x80; } int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6, struct icmp6hdr *thdr, int len) { struct sk_buff *skb; struct icmp6hdr *icmp6h; int err = 0; skb = skb_peek(&sk->sk_write_queue); if (!skb) goto out; icmp6h = icmp6_hdr(skb); memcpy(icmp6h, thdr, sizeof(struct icmp6hdr)); icmp6h->icmp6_cksum = 0; if (skb_queue_len(&sk->sk_write_queue) == 1) { skb->csum = csum_partial(icmp6h, sizeof(struct icmp6hdr), skb->csum); icmp6h->icmp6_cksum = csum_ipv6_magic(&fl6->saddr, &fl6->daddr, len, fl6->flowi6_proto, skb->csum); } else { __wsum tmp_csum = 0; skb_queue_walk(&sk->sk_write_queue, skb) { tmp_csum = csum_add(tmp_csum, skb->csum); } tmp_csum = csum_partial(icmp6h, sizeof(struct icmp6hdr), tmp_csum); icmp6h->icmp6_cksum = csum_ipv6_magic(&fl6->saddr, &fl6->daddr, len, fl6->flowi6_proto, tmp_csum); } ip6_push_pending_frames(sk); out: return err; } struct icmpv6_msg { struct sk_buff *skb; int offset; uint8_t type; }; static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) { struct icmpv6_msg *msg = (struct icmpv6_msg *) from; struct sk_buff *org_skb = msg->skb; __wsum csum = 0; csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset, to, len, csum); skb->csum = csum_block_add(skb->csum, csum, odd); if (!(msg->type & ICMPV6_INFOMSG_MASK)) nf_ct_attach(skb, org_skb); return 0; } #if IS_ENABLED(CONFIG_IPV6_MIP6) static void mip6_addr_swap(struct sk_buff *skb) { struct ipv6hdr *iph = ipv6_hdr(skb); struct inet6_skb_parm *opt = IP6CB(skb); struct ipv6_destopt_hao *hao; struct in6_addr tmp; int off; if (opt->dsthao) { off = ipv6_find_tlv(skb, opt->dsthao, IPV6_TLV_HAO); if (likely(off >= 0)) { hao = (struct ipv6_destopt_hao *) (skb_network_header(skb) + off); tmp = iph->saddr; iph->saddr = hao->addr; hao->addr = tmp; } } } #else static inline void mip6_addr_swap(struct sk_buff *skb) {} #endif static struct dst_entry *icmpv6_route_lookup(struct net *net, struct sk_buff *skb, struct sock *sk, struct flowi6 *fl6) { struct dst_entry *dst, *dst2; struct flowi6 fl2; int err; err = ip6_dst_lookup(net, sk, &dst, fl6); if (err) return ERR_PTR(err); /* * We won't send icmp if the destination is known * anycast. */ if (ipv6_anycast_destination(dst, &fl6->daddr)) { net_dbg_ratelimited("icmp6_send: acast source\n"); dst_release(dst); return ERR_PTR(-EINVAL); } /* No need to clone since we're just using its address. */ dst2 = dst; dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), sk, 0); if (!IS_ERR(dst)) { if (dst != dst2) return dst; } else { if (PTR_ERR(dst) == -EPERM) dst = NULL; else return dst; } err = xfrm_decode_session_reverse(skb, flowi6_to_flowi(&fl2), AF_INET6); if (err) goto relookup_failed; err = ip6_dst_lookup(net, sk, &dst2, &fl2); if (err) goto relookup_failed; dst2 = xfrm_lookup(net, dst2, flowi6_to_flowi(&fl2), sk, XFRM_LOOKUP_ICMP); if (!IS_ERR(dst2)) { dst_release(dst); dst = dst2; } else { err = PTR_ERR(dst2); if (err == -EPERM) { dst_release(dst); return dst2; } else goto relookup_failed; } relookup_failed: if (dst) return dst; return ERR_PTR(err); } /* * Send an ICMP message in response to a packet in error */ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, const struct in6_addr *force_saddr) { struct net *net = dev_net(skb->dev); struct inet6_dev *idev = NULL; struct ipv6hdr *hdr = ipv6_hdr(skb); struct sock *sk; struct ipv6_pinfo *np; const struct in6_addr *saddr = NULL; struct dst_entry *dst; struct icmp6hdr tmp_hdr; struct flowi6 fl6; struct icmpv6_msg msg; struct sockcm_cookie sockc_unused = {0}; struct ipcm6_cookie ipc6; int iif = 0; int addr_type = 0; int len; int err = 0; u32 mark = IP6_REPLY_MARK(net, skb->mark); if ((u8 *)hdr < skb->head || (skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb)) return; /* * Make sure we respect the rules * i.e. RFC 1885 2.4(e) * Rule (e.1) is enforced by not using icmp6_send * in any code that processes icmp errors. */ addr_type = ipv6_addr_type(&hdr->daddr); if (ipv6_chk_addr(net, &hdr->daddr, skb->dev, 0) || ipv6_chk_acast_addr_src(net, skb->dev, &hdr->daddr)) saddr = &hdr->daddr; /* * Dest addr check */ if (addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST) { if (type != ICMPV6_PKT_TOOBIG && !(type == ICMPV6_PARAMPROB && code == ICMPV6_UNK_OPTION && (opt_unrec(skb, info)))) return; saddr = NULL; } addr_type = ipv6_addr_type(&hdr->saddr); /* * Source addr check */ if (__ipv6_addr_needs_scope_id(addr_type)) iif = skb->dev->ifindex; else { dst = skb_dst(skb); iif = l3mdev_master_ifindex(dst ? dst->dev : skb->dev); } /* * Must not send error if the source does not uniquely * identify a single node (RFC2463 Section 2.4). * We check unspecified / multicast addresses here, * and anycast addresses will be checked later. */ if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) { net_dbg_ratelimited("icmp6_send: addr_any/mcast source [%pI6c > %pI6c]\n", &hdr->saddr, &hdr->daddr); return; } /* * Never answer to a ICMP packet. */ if (is_ineligible(skb)) { net_dbg_ratelimited("icmp6_send: no reply to icmp error [%pI6c > %pI6c]\n", &hdr->saddr, &hdr->daddr); return; } mip6_addr_swap(skb); memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_ICMPV6; fl6.daddr = hdr->saddr; if (force_saddr) saddr = force_saddr; if (saddr) fl6.saddr = *saddr; fl6.flowi6_mark = mark; fl6.flowi6_oif = iif; fl6.fl6_icmp_type = type; fl6.fl6_icmp_code = code; security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); sk = icmpv6_xmit_lock(net); if (!sk) return; sk->sk_mark = mark; np = inet6_sk(sk); if (!icmpv6_xrlim_allow(sk, type, &fl6)) goto out; tmp_hdr.icmp6_type = type; tmp_hdr.icmp6_code = code; tmp_hdr.icmp6_cksum = 0; tmp_hdr.icmp6_pointer = htonl(info); if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) fl6.flowi6_oif = np->mcast_oif; else if (!fl6.flowi6_oif) fl6.flowi6_oif = np->ucast_oif; ipc6.tclass = np->tclass; fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); dst = icmpv6_route_lookup(net, skb, sk, &fl6); if (IS_ERR(dst)) goto out; ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); ipc6.dontfrag = np->dontfrag; ipc6.opt = NULL; msg.skb = skb; msg.offset = skb_network_offset(skb); msg.type = type; len = skb->len - msg.offset; len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) - sizeof(struct icmp6hdr)); if (len < 0) { net_dbg_ratelimited("icmp: len problem [%pI6c > %pI6c]\n", &hdr->saddr, &hdr->daddr); goto out_dst_release; } rcu_read_lock(); idev = __in6_dev_get(skb->dev); err = ip6_append_data(sk, icmpv6_getfrag, &msg, len + sizeof(struct icmp6hdr), sizeof(struct icmp6hdr), &ipc6, &fl6, (struct rt6_info *)dst, MSG_DONTWAIT, &sockc_unused); if (err) { ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS); ip6_flush_pending_frames(sk); } else { err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr, len + sizeof(struct icmp6hdr)); } rcu_read_unlock(); out_dst_release: dst_release(dst); out: icmpv6_xmit_unlock(sk); } /* Slightly more convenient version of icmp6_send. */ void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos) { icmp6_send(skb, ICMPV6_PARAMPROB, code, pos, NULL); kfree_skb(skb); } /* Generate icmpv6 with type/code ICMPV6_DEST_UNREACH/ICMPV6_ADDR_UNREACH * if sufficient data bytes are available * @nhs is the size of the tunnel header(s) : * Either an IPv4 header for SIT encap * an IPv4 header + GRE header for GRE encap */ int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type, unsigned int data_len) { struct in6_addr temp_saddr; struct rt6_info *rt; struct sk_buff *skb2; u32 info = 0; if (!pskb_may_pull(skb, nhs + sizeof(struct ipv6hdr) + 8)) return 1; /* RFC 4884 (partial) support for ICMP extensions */ if (data_len < 128 || (data_len & 7) || skb->len < data_len) data_len = 0; skb2 = data_len ? skb_copy(skb, GFP_ATOMIC) : skb_clone(skb, GFP_ATOMIC); if (!skb2) return 1; skb_dst_drop(skb2); skb_pull(skb2, nhs); skb_reset_network_header(skb2); rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, NULL, 0, 0); if (rt && rt->dst.dev) skb2->dev = rt->dst.dev; ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, &temp_saddr); if (data_len) { /* RFC 4884 (partial) support : * insert 0 padding at the end, before the extensions */ __skb_push(skb2, nhs); skb_reset_network_header(skb2); memmove(skb2->data, skb2->data + nhs, data_len - nhs); memset(skb2->data + data_len - nhs, 0, nhs); /* RFC 4884 4.5 : Length is measured in 64-bit words, * and stored in reserved[0] */ info = (data_len/8) << 24; } if (type == ICMP_TIME_EXCEEDED) icmp6_send(skb2, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, info, &temp_saddr); else icmp6_send(skb2, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, info, &temp_saddr); if (rt) ip6_rt_put(rt); kfree_skb(skb2); return 0; } EXPORT_SYMBOL(ip6_err_gen_icmpv6_unreach); static void icmpv6_echo_reply(struct sk_buff *skb) { struct net *net = dev_net(skb->dev); struct sock *sk; struct inet6_dev *idev; struct ipv6_pinfo *np; const struct in6_addr *saddr = NULL; struct icmp6hdr *icmph = icmp6_hdr(skb); struct icmp6hdr tmp_hdr; struct flowi6 fl6; struct icmpv6_msg msg; struct dst_entry *dst; struct ipcm6_cookie ipc6; int err = 0; u32 mark = IP6_REPLY_MARK(net, skb->mark); struct sockcm_cookie sockc_unused = {0}; saddr = &ipv6_hdr(skb)->daddr; if (!ipv6_unicast_destination(skb) && !(net->ipv6.sysctl.anycast_src_echo_reply && ipv6_anycast_destination(skb_dst(skb), saddr))) saddr = NULL; memcpy(&tmp_hdr, icmph, sizeof(tmp_hdr)); tmp_hdr.icmp6_type = ICMPV6_ECHO_REPLY; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_ICMPV6; fl6.daddr = ipv6_hdr(skb)->saddr; if (saddr) fl6.saddr = *saddr; fl6.flowi6_oif = skb->dev->ifindex; fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY; fl6.flowi6_mark = mark; security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); sk = icmpv6_xmit_lock(net); if (!sk) return; sk->sk_mark = mark; np = inet6_sk(sk); if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) fl6.flowi6_oif = np->mcast_oif; else if (!fl6.flowi6_oif) fl6.flowi6_oif = np->ucast_oif; err = ip6_dst_lookup(net, sk, &dst, &fl6); if (err) goto out; dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0); if (IS_ERR(dst)) goto out; idev = __in6_dev_get(skb->dev); msg.skb = skb; msg.offset = 0; msg.type = ICMPV6_ECHO_REPLY; ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); ipc6.tclass = ipv6_get_dsfield(ipv6_hdr(skb)); ipc6.dontfrag = np->dontfrag; ipc6.opt = NULL; err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr), sizeof(struct icmp6hdr), &ipc6, &fl6, (struct rt6_info *)dst, MSG_DONTWAIT, &sockc_unused); if (err) { __ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS); ip6_flush_pending_frames(sk); } else { err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr, skb->len + sizeof(struct icmp6hdr)); } dst_release(dst); out: icmpv6_xmit_unlock(sk); } void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info) { const struct inet6_protocol *ipprot; int inner_offset; __be16 frag_off; u8 nexthdr; struct net *net = dev_net(skb->dev); if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) goto out; nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr; if (ipv6_ext_hdr(nexthdr)) { /* now skip over extension headers */ inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, &frag_off); if (inner_offset < 0) goto out; } else { inner_offset = sizeof(struct ipv6hdr); } /* Checkin header including 8 bytes of inner protocol header. */ if (!pskb_may_pull(skb, inner_offset+8)) goto out; /* BUGGG_FUTURE: we should try to parse exthdrs in this packet. Without this we will not able f.e. to make source routed pmtu discovery. Corresponding argument (opt) to notifiers is already added. --ANK (980726) */ ipprot = rcu_dereference(inet6_protos[nexthdr]); if (ipprot && ipprot->err_handler) ipprot->err_handler(skb, NULL, type, code, inner_offset, info); raw6_icmp_error(skb, nexthdr, type, code, inner_offset, info); return; out: __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS); } /* * Handle icmp messages */ static int icmpv6_rcv(struct sk_buff *skb) { struct net_device *dev = skb->dev; struct inet6_dev *idev = __in6_dev_get(dev); const struct in6_addr *saddr, *daddr; struct icmp6hdr *hdr; u8 type; bool success = false; if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { struct sec_path *sp = skb_sec_path(skb); int nh; if (!(sp && sp->xvec[sp->len - 1]->props.flags & XFRM_STATE_ICMP)) goto drop_no_count; if (!pskb_may_pull(skb, sizeof(*hdr) + sizeof(struct ipv6hdr))) goto drop_no_count; nh = skb_network_offset(skb); skb_set_network_header(skb, sizeof(*hdr)); if (!xfrm6_policy_check_reverse(NULL, XFRM_POLICY_IN, skb)) goto drop_no_count; skb_set_network_header(skb, nh); } __ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_INMSGS); saddr = &ipv6_hdr(skb)->saddr; daddr = &ipv6_hdr(skb)->daddr; if (skb_checksum_validate(skb, IPPROTO_ICMPV6, ip6_compute_pseudo)) { net_dbg_ratelimited("ICMPv6 checksum failed [%pI6c > %pI6c]\n", saddr, daddr); goto csum_error; } if (!pskb_pull(skb, sizeof(*hdr))) goto discard_it; hdr = icmp6_hdr(skb); type = hdr->icmp6_type; ICMP6MSGIN_INC_STATS(dev_net(dev), idev, type); switch (type) { case ICMPV6_ECHO_REQUEST: icmpv6_echo_reply(skb); break; case ICMPV6_ECHO_REPLY: success = ping_rcv(skb); break; case ICMPV6_PKT_TOOBIG: /* BUGGG_FUTURE: if packet contains rthdr, we cannot update standard destination cache. Seems, only "advanced" destination cache will allow to solve this problem --ANK (980726) */ if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) goto discard_it; hdr = icmp6_hdr(skb); /* * Drop through to notify */ case ICMPV6_DEST_UNREACH: case ICMPV6_TIME_EXCEED: case ICMPV6_PARAMPROB: icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu); break; case NDISC_ROUTER_SOLICITATION: case NDISC_ROUTER_ADVERTISEMENT: case NDISC_NEIGHBOUR_SOLICITATION: case NDISC_NEIGHBOUR_ADVERTISEMENT: case NDISC_REDIRECT: ndisc_rcv(skb); break; case ICMPV6_MGM_QUERY: igmp6_event_query(skb); break; case ICMPV6_MGM_REPORT: igmp6_event_report(skb); break; case ICMPV6_MGM_REDUCTION: case ICMPV6_NI_QUERY: case ICMPV6_NI_REPLY: case ICMPV6_MLD2_REPORT: case ICMPV6_DHAAD_REQUEST: case ICMPV6_DHAAD_REPLY: case ICMPV6_MOBILE_PREFIX_SOL: case ICMPV6_MOBILE_PREFIX_ADV: break; default: /* informational */ if (type & ICMPV6_INFOMSG_MASK) break; net_dbg_ratelimited("icmpv6: msg of unknown type [%pI6c > %pI6c]\n", saddr, daddr); /* * error of unknown type. * must pass to upper level */ icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu); } /* until the v6 path can be better sorted assume failure and * preserve the status quo behaviour for the rest of the paths to here */ if (success) consume_skb(skb); else kfree_skb(skb); return 0; csum_error: __ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_CSUMERRORS); discard_it: __ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_INERRORS); drop_no_count: kfree_skb(skb); return 0; } void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6, u8 type, const struct in6_addr *saddr, const struct in6_addr *daddr, int oif) { memset(fl6, 0, sizeof(*fl6)); fl6->saddr = *saddr; fl6->daddr = *daddr; fl6->flowi6_proto = IPPROTO_ICMPV6; fl6->fl6_icmp_type = type; fl6->fl6_icmp_code = 0; fl6->flowi6_oif = oif; security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); } static int __net_init icmpv6_sk_init(struct net *net) { struct sock *sk; int err, i, j; net->ipv6.icmp_sk = kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL); if (!net->ipv6.icmp_sk) return -ENOMEM; for_each_possible_cpu(i) { err = inet_ctl_sock_create(&sk, PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, net); if (err < 0) { pr_err("Failed to initialize the ICMP6 control socket (err %d)\n", err); goto fail; } net->ipv6.icmp_sk[i] = sk; /* Enough space for 2 64K ICMP packets, including * sk_buff struct overhead. */ sk->sk_sndbuf = 2 * SKB_TRUESIZE(64 * 1024); } return 0; fail: for (j = 0; j < i; j++) inet_ctl_sock_destroy(net->ipv6.icmp_sk[j]); kfree(net->ipv6.icmp_sk); return err; } static void __net_exit icmpv6_sk_exit(struct net *net) { int i; for_each_possible_cpu(i) { inet_ctl_sock_destroy(net->ipv6.icmp_sk[i]); } kfree(net->ipv6.icmp_sk); } static struct pernet_operations icmpv6_sk_ops = { .init = icmpv6_sk_init, .exit = icmpv6_sk_exit, }; int __init icmpv6_init(void) { int err; err = register_pernet_subsys(&icmpv6_sk_ops); if (err < 0) return err; err = -EAGAIN; if (inet6_add_protocol(&icmpv6_protocol, IPPROTO_ICMPV6) < 0) goto fail; err = inet6_register_icmp_sender(icmp6_send); if (err) goto sender_reg_err; return 0; sender_reg_err: inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6); fail: pr_err("Failed to register ICMP6 protocol\n"); unregister_pernet_subsys(&icmpv6_sk_ops); return err; } void icmpv6_cleanup(void) { inet6_unregister_icmp_sender(icmp6_send); unregister_pernet_subsys(&icmpv6_sk_ops); inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6); } static const struct icmp6_err { int err; int fatal; } tab_unreach[] = { { /* NOROUTE */ .err = ENETUNREACH, .fatal = 0, }, { /* ADM_PROHIBITED */ .err = EACCES, .fatal = 1, }, { /* Was NOT_NEIGHBOUR, now reserved */ .err = EHOSTUNREACH, .fatal = 0, }, { /* ADDR_UNREACH */ .err = EHOSTUNREACH, .fatal = 0, }, { /* PORT_UNREACH */ .err = ECONNREFUSED, .fatal = 1, }, { /* POLICY_FAIL */ .err = EACCES, .fatal = 1, }, { /* REJECT_ROUTE */ .err = EACCES, .fatal = 1, }, }; int icmpv6_err_convert(u8 type, u8 code, int *err) { int fatal = 0; *err = EPROTO; switch (type) { case ICMPV6_DEST_UNREACH: fatal = 1; if (code < ARRAY_SIZE(tab_unreach)) { *err = tab_unreach[code].err; fatal = tab_unreach[code].fatal; } break; case ICMPV6_PKT_TOOBIG: *err = EMSGSIZE; break; case ICMPV6_PARAMPROB: *err = EPROTO; fatal = 1; break; case ICMPV6_TIME_EXCEED: *err = EHOSTUNREACH; break; } return fatal; } EXPORT_SYMBOL(icmpv6_err_convert); #ifdef CONFIG_SYSCTL static struct ctl_table ipv6_icmp_table_template[] = { { .procname = "ratelimit", .data = &init_net.ipv6.sysctl.icmpv6_time, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_ms_jiffies, }, { }, }; struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net) { struct ctl_table *table; table = kmemdup(ipv6_icmp_table_template, sizeof(ipv6_icmp_table_template), GFP_KERNEL); if (table) table[0].data = &net->ipv6.sysctl.icmpv6_time; return table; } #endif
./CrossVul/dataset_final_sorted/CWE-20/c/good_5511_0
crossvul-cpp_data_good_1363_0
/* A Bison parser, made by GNU Bison 3.2.4. */ /* Bison implementation for Yacc-like parsers in C Copyright (C) 1984, 1989-1990, 2000-2015, 2018 Free Software Foundation, Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* As a special exception, you may create a larger work that contains part or all of the Bison parser skeleton and distribute that work under terms of your choice, so long as that work isn't itself a parser generator using the skeleton or a modified version thereof as a parser skeleton. Alternatively, if you modify or redistribute the parser skeleton itself, you may (at your option) remove this special exception, which will cause the skeleton and the resulting Bison output files to be licensed under the GNU General Public License without this special exception. This special exception was added by the Free Software Foundation in version 2.2 of Bison. */ /* C LALR(1) parser skeleton written by Richard Stallman, by simplifying the original so-called "semantic" parser. */ /* All symbols defined below should begin with yy or YY, to avoid infringing on user name space. This should be done even for local variables, as they might otherwise be expanded by user macros. There are some unavoidable exceptions within include files to define necessary library symbols; they are noted "INFRINGES ON USER NAME SPACE" below. */ /* Undocumented macros, especially those whose name start with YY_, are private implementation details. Do not rely on them. */ /* Identify Bison output. */ #define YYBISON 1 /* Bison version. */ #define YYBISON_VERSION "3.2.4" /* Skeleton name. */ #define YYSKELETON_NAME "yacc.c" /* Pure parsers. */ #define YYPURE 2 /* Push parsers. */ #define YYPUSH 0 /* Pull parsers. */ #define YYPULL 1 /* First part of user prologue. */ #include <stdio.h> #include <stdarg.h> #include <string.h> #include <stdlib.h> #include "libyang.h" #include "common.h" #include "context.h" #include "resolve.h" #include "parser_yang.h" #include "parser_yang_lex.h" #include "parser.h" #define YANG_ADDELEM(current_ptr, size, array_name) \ if ((size) == LY_ARRAY_MAX(size)) { \ LOGERR(trg->ctx, LY_EINT, "Reached limit (%"PRIu64") for storing %s.", LY_ARRAY_MAX(size), array_name); \ free(s); \ YYABORT; \ } else if (!((size) % LY_YANG_ARRAY_SIZE)) { \ void *tmp; \ \ tmp = realloc((current_ptr), (sizeof *(current_ptr)) * ((size) + LY_YANG_ARRAY_SIZE)); \ if (!tmp) { \ LOGMEM(trg->ctx); \ free(s); \ YYABORT; \ } \ memset(tmp + (sizeof *(current_ptr)) * (size), 0, (sizeof *(current_ptr)) * LY_YANG_ARRAY_SIZE); \ (current_ptr) = tmp; \ } \ actual = &(current_ptr)[(size)++]; \ void yyerror(YYLTYPE *yylloc, void *scanner, struct yang_parameter *param, ...); /* pointer on the current parsed element 'actual' */ # ifndef YY_NULLPTR # if defined __cplusplus # if 201103L <= __cplusplus # define YY_NULLPTR nullptr # else # define YY_NULLPTR 0 # endif # else # define YY_NULLPTR ((void*)0) # endif # endif /* Enabling verbose error messages. */ #ifdef YYERROR_VERBOSE # undef YYERROR_VERBOSE # define YYERROR_VERBOSE 1 #else # define YYERROR_VERBOSE 0 #endif /* In a future release of Bison, this section will be replaced by #include "parser_yang_bis.h". */ #ifndef YY_YY_PARSER_YANG_BIS_H_INCLUDED # define YY_YY_PARSER_YANG_BIS_H_INCLUDED /* Debug traces. */ #ifndef YYDEBUG # define YYDEBUG 0 #endif #if YYDEBUG extern int yydebug; #endif /* Token type. */ #ifndef YYTOKENTYPE # define YYTOKENTYPE enum yytokentype { UNION_KEYWORD = 258, ANYXML_KEYWORD = 259, WHITESPACE = 260, ERROR = 261, EOL = 262, STRING = 263, STRINGS = 264, IDENTIFIER = 265, IDENTIFIERPREFIX = 266, REVISION_DATE = 267, TAB = 268, DOUBLEDOT = 269, URI = 270, INTEGER = 271, NON_NEGATIVE_INTEGER = 272, ZERO = 273, DECIMAL = 274, ARGUMENT_KEYWORD = 275, AUGMENT_KEYWORD = 276, BASE_KEYWORD = 277, BELONGS_TO_KEYWORD = 278, BIT_KEYWORD = 279, CASE_KEYWORD = 280, CHOICE_KEYWORD = 281, CONFIG_KEYWORD = 282, CONTACT_KEYWORD = 283, CONTAINER_KEYWORD = 284, DEFAULT_KEYWORD = 285, DESCRIPTION_KEYWORD = 286, ENUM_KEYWORD = 287, ERROR_APP_TAG_KEYWORD = 288, ERROR_MESSAGE_KEYWORD = 289, EXTENSION_KEYWORD = 290, DEVIATION_KEYWORD = 291, DEVIATE_KEYWORD = 292, FEATURE_KEYWORD = 293, FRACTION_DIGITS_KEYWORD = 294, GROUPING_KEYWORD = 295, IDENTITY_KEYWORD = 296, IF_FEATURE_KEYWORD = 297, IMPORT_KEYWORD = 298, INCLUDE_KEYWORD = 299, INPUT_KEYWORD = 300, KEY_KEYWORD = 301, LEAF_KEYWORD = 302, LEAF_LIST_KEYWORD = 303, LENGTH_KEYWORD = 304, LIST_KEYWORD = 305, MANDATORY_KEYWORD = 306, MAX_ELEMENTS_KEYWORD = 307, MIN_ELEMENTS_KEYWORD = 308, MODULE_KEYWORD = 309, MUST_KEYWORD = 310, NAMESPACE_KEYWORD = 311, NOTIFICATION_KEYWORD = 312, ORDERED_BY_KEYWORD = 313, ORGANIZATION_KEYWORD = 314, OUTPUT_KEYWORD = 315, PATH_KEYWORD = 316, PATTERN_KEYWORD = 317, POSITION_KEYWORD = 318, PREFIX_KEYWORD = 319, PRESENCE_KEYWORD = 320, RANGE_KEYWORD = 321, REFERENCE_KEYWORD = 322, REFINE_KEYWORD = 323, REQUIRE_INSTANCE_KEYWORD = 324, REVISION_KEYWORD = 325, REVISION_DATE_KEYWORD = 326, RPC_KEYWORD = 327, STATUS_KEYWORD = 328, SUBMODULE_KEYWORD = 329, TYPE_KEYWORD = 330, TYPEDEF_KEYWORD = 331, UNIQUE_KEYWORD = 332, UNITS_KEYWORD = 333, USES_KEYWORD = 334, VALUE_KEYWORD = 335, WHEN_KEYWORD = 336, YANG_VERSION_KEYWORD = 337, YIN_ELEMENT_KEYWORD = 338, ADD_KEYWORD = 339, CURRENT_KEYWORD = 340, DELETE_KEYWORD = 341, DEPRECATED_KEYWORD = 342, FALSE_KEYWORD = 343, NOT_SUPPORTED_KEYWORD = 344, OBSOLETE_KEYWORD = 345, REPLACE_KEYWORD = 346, SYSTEM_KEYWORD = 347, TRUE_KEYWORD = 348, UNBOUNDED_KEYWORD = 349, USER_KEYWORD = 350, ACTION_KEYWORD = 351, MODIFIER_KEYWORD = 352, ANYDATA_KEYWORD = 353, NODE = 354, NODE_PRINT = 355, EXTENSION_INSTANCE = 356, SUBMODULE_EXT_KEYWORD = 357 }; #endif /* Value type. */ #if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED union YYSTYPE { int32_t i; uint32_t uint; char *str; char **p_str; void *v; char ch; struct yang_type *type; struct lys_deviation *dev; struct lys_deviate *deviate; union { uint32_t index; struct lys_node_container *container; struct lys_node_anydata *anydata; struct type_node node; struct lys_node_case *cs; struct lys_node_grp *grouping; struct lys_refine *refine; struct lys_node_notif *notif; struct lys_node_uses *uses; struct lys_node_inout *inout; struct lys_node_augment *augment; } nodes; enum yytokentype token; struct { void *actual; enum yytokentype token; } backup_token; struct { struct lys_revision **revision; int index; } revisions; }; typedef union YYSTYPE YYSTYPE; # define YYSTYPE_IS_TRIVIAL 1 # define YYSTYPE_IS_DECLARED 1 #endif /* Location type. */ #if ! defined YYLTYPE && ! defined YYLTYPE_IS_DECLARED typedef struct YYLTYPE YYLTYPE; struct YYLTYPE { int first_line; int first_column; int last_line; int last_column; }; # define YYLTYPE_IS_DECLARED 1 # define YYLTYPE_IS_TRIVIAL 1 #endif int yyparse (void *scanner, struct yang_parameter *param); #endif /* !YY_YY_PARSER_YANG_BIS_H_INCLUDED */ #ifdef short # undef short #endif #ifdef YYTYPE_UINT8 typedef YYTYPE_UINT8 yytype_uint8; #else typedef unsigned char yytype_uint8; #endif #ifdef YYTYPE_INT8 typedef YYTYPE_INT8 yytype_int8; #else typedef signed char yytype_int8; #endif #ifdef YYTYPE_UINT16 typedef YYTYPE_UINT16 yytype_uint16; #else typedef unsigned short yytype_uint16; #endif #ifdef YYTYPE_INT16 typedef YYTYPE_INT16 yytype_int16; #else typedef short yytype_int16; #endif #ifndef YYSIZE_T # ifdef __SIZE_TYPE__ # define YYSIZE_T __SIZE_TYPE__ # elif defined size_t # define YYSIZE_T size_t # elif ! defined YYSIZE_T # include <stddef.h> /* INFRINGES ON USER NAME SPACE */ # define YYSIZE_T size_t # else # define YYSIZE_T unsigned # endif #endif #define YYSIZE_MAXIMUM ((YYSIZE_T) -1) #ifndef YY_ # if defined YYENABLE_NLS && YYENABLE_NLS # if ENABLE_NLS # include <libintl.h> /* INFRINGES ON USER NAME SPACE */ # define YY_(Msgid) dgettext ("bison-runtime", Msgid) # endif # endif # ifndef YY_ # define YY_(Msgid) Msgid # endif #endif #ifndef YY_ATTRIBUTE # if (defined __GNUC__ \ && (2 < __GNUC__ || (__GNUC__ == 2 && 96 <= __GNUC_MINOR__))) \ || defined __SUNPRO_C && 0x5110 <= __SUNPRO_C # define YY_ATTRIBUTE(Spec) __attribute__(Spec) # else # define YY_ATTRIBUTE(Spec) /* empty */ # endif #endif #ifndef YY_ATTRIBUTE_PURE # define YY_ATTRIBUTE_PURE YY_ATTRIBUTE ((__pure__)) #endif #ifndef YY_ATTRIBUTE_UNUSED # define YY_ATTRIBUTE_UNUSED YY_ATTRIBUTE ((__unused__)) #endif /* Suppress unused-variable warnings by "using" E. */ #if ! defined lint || defined __GNUC__ # define YYUSE(E) ((void) (E)) #else # define YYUSE(E) /* empty */ #endif #if defined __GNUC__ && ! defined __ICC && 407 <= __GNUC__ * 100 + __GNUC_MINOR__ /* Suppress an incorrect diagnostic about yylval being uninitialized. */ # define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN \ _Pragma ("GCC diagnostic push") \ _Pragma ("GCC diagnostic ignored \"-Wuninitialized\"")\ _Pragma ("GCC diagnostic ignored \"-Wmaybe-uninitialized\"") # define YY_IGNORE_MAYBE_UNINITIALIZED_END \ _Pragma ("GCC diagnostic pop") #else # define YY_INITIAL_VALUE(Value) Value #endif #ifndef YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN # define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN # define YY_IGNORE_MAYBE_UNINITIALIZED_END #endif #ifndef YY_INITIAL_VALUE # define YY_INITIAL_VALUE(Value) /* Nothing. */ #endif #if ! defined yyoverflow || YYERROR_VERBOSE /* The parser invokes alloca or malloc; define the necessary symbols. */ # ifdef YYSTACK_USE_ALLOCA # if YYSTACK_USE_ALLOCA # ifdef __GNUC__ # define YYSTACK_ALLOC __builtin_alloca # elif defined __BUILTIN_VA_ARG_INCR # include <alloca.h> /* INFRINGES ON USER NAME SPACE */ # elif defined _AIX # define YYSTACK_ALLOC __alloca # elif defined _MSC_VER # include <malloc.h> /* INFRINGES ON USER NAME SPACE */ # define alloca _alloca # else # define YYSTACK_ALLOC alloca # if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS # include <stdlib.h> /* INFRINGES ON USER NAME SPACE */ /* Use EXIT_SUCCESS as a witness for stdlib.h. */ # ifndef EXIT_SUCCESS # define EXIT_SUCCESS 0 # endif # endif # endif # endif # endif # ifdef YYSTACK_ALLOC /* Pacify GCC's 'empty if-body' warning. */ # define YYSTACK_FREE(Ptr) do { /* empty */; } while (0) # ifndef YYSTACK_ALLOC_MAXIMUM /* The OS might guarantee only one guard page at the bottom of the stack, and a page size can be as small as 4096 bytes. So we cannot safely invoke alloca (N) if N exceeds 4096. Use a slightly smaller number to allow for a few compiler-allocated temporary stack slots. */ # define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */ # endif # else # define YYSTACK_ALLOC YYMALLOC # define YYSTACK_FREE YYFREE # ifndef YYSTACK_ALLOC_MAXIMUM # define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM # endif # if (defined __cplusplus && ! defined EXIT_SUCCESS \ && ! ((defined YYMALLOC || defined malloc) \ && (defined YYFREE || defined free))) # include <stdlib.h> /* INFRINGES ON USER NAME SPACE */ # ifndef EXIT_SUCCESS # define EXIT_SUCCESS 0 # endif # endif # ifndef YYMALLOC # define YYMALLOC malloc # if ! defined malloc && ! defined EXIT_SUCCESS void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */ # endif # endif # ifndef YYFREE # define YYFREE free # if ! defined free && ! defined EXIT_SUCCESS void free (void *); /* INFRINGES ON USER NAME SPACE */ # endif # endif # endif #endif /* ! defined yyoverflow || YYERROR_VERBOSE */ #if (! defined yyoverflow \ && (! defined __cplusplus \ || (defined YYLTYPE_IS_TRIVIAL && YYLTYPE_IS_TRIVIAL \ && defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL))) /* A type that is properly aligned for any stack member. */ union yyalloc { yytype_int16 yyss_alloc; YYSTYPE yyvs_alloc; YYLTYPE yyls_alloc; }; /* The size of the maximum gap between one aligned stack and the next. */ # define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1) /* The size of an array large to enough to hold all stacks, each with N elements. */ # define YYSTACK_BYTES(N) \ ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE) + sizeof (YYLTYPE)) \ + 2 * YYSTACK_GAP_MAXIMUM) # define YYCOPY_NEEDED 1 /* Relocate STACK from its old location to the new one. The local variables YYSIZE and YYSTACKSIZE give the old and new number of elements in the stack, and YYPTR gives the new location of the stack. Advance YYPTR to a properly aligned location for the next stack. */ # define YYSTACK_RELOCATE(Stack_alloc, Stack) \ do \ { \ YYSIZE_T yynewbytes; \ YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \ Stack = &yyptr->Stack_alloc; \ yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \ yyptr += yynewbytes / sizeof (*yyptr); \ } \ while (0) #endif #if defined YYCOPY_NEEDED && YYCOPY_NEEDED /* Copy COUNT objects from SRC to DST. The source and destination do not overlap. */ # ifndef YYCOPY # if defined __GNUC__ && 1 < __GNUC__ # define YYCOPY(Dst, Src, Count) \ __builtin_memcpy (Dst, Src, (Count) * sizeof (*(Src))) # else # define YYCOPY(Dst, Src, Count) \ do \ { \ YYSIZE_T yyi; \ for (yyi = 0; yyi < (Count); yyi++) \ (Dst)[yyi] = (Src)[yyi]; \ } \ while (0) # endif # endif #endif /* !YYCOPY_NEEDED */ /* YYFINAL -- State number of the termination state. */ #define YYFINAL 6 /* YYLAST -- Last index in YYTABLE. */ #define YYLAST 3466 /* YYNTOKENS -- Number of terminals. */ #define YYNTOKENS 113 /* YYNNTS -- Number of nonterminals. */ #define YYNNTS 329 /* YYNRULES -- Number of rules. */ #define YYNRULES 827 /* YYNSTATES -- Number of states. */ #define YYNSTATES 1318 /* YYTRANSLATE[YYX] -- Symbol number corresponding to YYX as returned by yylex, with out-of-bounds checking. */ #define YYUNDEFTOK 2 #define YYMAXUTOK 357 #define YYTRANSLATE(YYX) \ ((unsigned) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK) /* YYTRANSLATE[TOKEN-NUM] -- Symbol number corresponding to TOKEN-NUM as returned by yylex, without out-of-bounds checking. */ static const yytype_uint8 yytranslate[] = { 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 111, 112, 2, 103, 2, 2, 2, 107, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 106, 2, 110, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 108, 2, 109, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 104, 2, 105, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102 }; #if YYDEBUG /* YYRLINE[YYN] -- Source line where rule number YYN was defined. */ static const yytype_uint16 yyrline[] = { 0, 338, 338, 339, 340, 342, 365, 368, 370, 369, 393, 404, 414, 424, 425, 431, 436, 442, 453, 463, 476, 477, 483, 485, 489, 491, 495, 497, 498, 499, 501, 509, 517, 518, 523, 534, 545, 556, 564, 569, 570, 574, 575, 586, 597, 608, 612, 614, 637, 654, 658, 660, 661, 666, 671, 676, 682, 686, 688, 692, 694, 698, 700, 704, 706, 719, 730, 731, 743, 747, 748, 752, 753, 758, 765, 765, 776, 782, 830, 849, 852, 853, 854, 855, 856, 857, 858, 859, 860, 861, 864, 879, 886, 887, 891, 892, 893, 899, 904, 910, 928, 930, 931, 935, 940, 941, 963, 964, 965, 978, 983, 985, 986, 987, 988, 1003, 1017, 1022, 1023, 1038, 1039, 1040, 1046, 1051, 1057, 1114, 1119, 1120, 1122, 1138, 1143, 1144, 1169, 1170, 1184, 1185, 1191, 1196, 1202, 1206, 1208, 1261, 1272, 1275, 1278, 1283, 1288, 1294, 1299, 1305, 1310, 1319, 1320, 1324, 1371, 1372, 1374, 1375, 1379, 1385, 1398, 1399, 1400, 1404, 1405, 1407, 1411, 1429, 1434, 1436, 1437, 1453, 1458, 1467, 1468, 1472, 1488, 1493, 1498, 1503, 1509, 1513, 1529, 1544, 1545, 1549, 1550, 1560, 1565, 1570, 1575, 1581, 1585, 1596, 1608, 1609, 1612, 1620, 1631, 1632, 1647, 1648, 1649, 1661, 1667, 1672, 1678, 1683, 1685, 1686, 1701, 1706, 1707, 1712, 1716, 1718, 1723, 1725, 1726, 1727, 1740, 1752, 1753, 1755, 1763, 1775, 1776, 1791, 1792, 1793, 1805, 1811, 1816, 1822, 1827, 1829, 1830, 1846, 1850, 1852, 1856, 1858, 1862, 1864, 1868, 1870, 1880, 1887, 1888, 1892, 1893, 1899, 1904, 1909, 1910, 1911, 1912, 1913, 1919, 1920, 1921, 1922, 1923, 1924, 1925, 1926, 1929, 1939, 1946, 1947, 1970, 1971, 1972, 1973, 1974, 1979, 1985, 1991, 1996, 2001, 2002, 2003, 2008, 2009, 2011, 2051, 2061, 2064, 2065, 2066, 2069, 2074, 2075, 2080, 2086, 2092, 2098, 2103, 2109, 2119, 2174, 2177, 2178, 2179, 2182, 2193, 2198, 2199, 2205, 2218, 2231, 2241, 2247, 2252, 2258, 2268, 2315, 2318, 2319, 2320, 2321, 2330, 2336, 2342, 2355, 2368, 2378, 2384, 2389, 2394, 2395, 2396, 2397, 2402, 2404, 2414, 2421, 2422, 2442, 2445, 2446, 2447, 2457, 2464, 2471, 2478, 2484, 2490, 2492, 2493, 2495, 2496, 2497, 2498, 2499, 2500, 2501, 2507, 2517, 2524, 2525, 2539, 2540, 2541, 2542, 2548, 2553, 2558, 2561, 2571, 2578, 2588, 2595, 2596, 2619, 2622, 2623, 2624, 2625, 2632, 2639, 2646, 2651, 2657, 2667, 2674, 2675, 2707, 2708, 2709, 2710, 2716, 2721, 2726, 2727, 2729, 2730, 2732, 2745, 2750, 2751, 2783, 2786, 2800, 2816, 2838, 2889, 2908, 2927, 2948, 2969, 2974, 2980, 2981, 2984, 2999, 3008, 3009, 3011, 3022, 3031, 3032, 3033, 3034, 3040, 3045, 3050, 3051, 3052, 3057, 3059, 3074, 3081, 3091, 3098, 3099, 3123, 3126, 3127, 3133, 3138, 3143, 3144, 3145, 3152, 3160, 3175, 3205, 3206, 3207, 3208, 3209, 3211, 3226, 3256, 3265, 3272, 3273, 3305, 3306, 3307, 3308, 3314, 3319, 3324, 3325, 3326, 3328, 3340, 3360, 3361, 3367, 3373, 3375, 3376, 3378, 3379, 3382, 3390, 3395, 3396, 3398, 3399, 3400, 3402, 3410, 3415, 3416, 3448, 3449, 3455, 3456, 3462, 3468, 3475, 3482, 3490, 3499, 3507, 3512, 3513, 3545, 3546, 3552, 3553, 3559, 3566, 3574, 3579, 3580, 3594, 3595, 3596, 3602, 3608, 3615, 3622, 3630, 3639, 3648, 3653, 3654, 3658, 3659, 3664, 3670, 3675, 3677, 3678, 3679, 3692, 3697, 3699, 3700, 3701, 3714, 3718, 3720, 3725, 3727, 3728, 3748, 3753, 3755, 3756, 3757, 3777, 3782, 3784, 3785, 3786, 3798, 3867, 3872, 3873, 3877, 3881, 3883, 3884, 3886, 3890, 3892, 3892, 3899, 3902, 3911, 3930, 3932, 3933, 3936, 3936, 3953, 3953, 3960, 3960, 3967, 3970, 3972, 3974, 3975, 3977, 3979, 3981, 3982, 3984, 3986, 3987, 3989, 3990, 3992, 3994, 3997, 4000, 4002, 4003, 4005, 4006, 4008, 4010, 4021, 4022, 4025, 4026, 4038, 4039, 4041, 4042, 4044, 4045, 4051, 4052, 4055, 4056, 4057, 4081, 4082, 4085, 4091, 4095, 4100, 4101, 4102, 4105, 4110, 4120, 4122, 4123, 4125, 4126, 4128, 4129, 4130, 4132, 4133, 4135, 4136, 4138, 4139, 4143, 4144, 4171, 4209, 4210, 4212, 4214, 4216, 4217, 4219, 4220, 4222, 4223, 4226, 4227, 4230, 4232, 4233, 4236, 4236, 4243, 4245, 4246, 4247, 4248, 4249, 4250, 4251, 4253, 4254, 4255, 4257, 4258, 4259, 4260, 4261, 4262, 4263, 4264, 4265, 4266, 4269, 4270, 4271, 4272, 4273, 4274, 4275, 4276, 4277, 4278, 4279, 4280, 4281, 4282, 4283, 4284, 4285, 4286, 4287, 4288, 4289, 4290, 4291, 4292, 4293, 4294, 4295, 4296, 4297, 4298, 4299, 4300, 4301, 4302, 4303, 4304, 4305, 4306, 4307, 4308, 4309, 4310, 4311, 4312, 4313, 4314, 4315, 4316, 4317, 4318, 4319, 4320, 4321, 4322, 4323, 4324, 4325, 4326, 4327, 4328, 4329, 4330, 4331, 4332, 4333, 4334, 4335, 4336, 4337, 4338, 4340, 4347, 4354, 4374, 4392, 4408, 4435, 4442, 4460, 4500, 4502, 4503, 4504, 4505, 4506, 4507, 4508, 4509, 4510, 4511, 4512, 4513, 4514, 4516, 4517, 4518, 4519, 4520, 4521, 4522, 4523, 4524, 4525, 4526, 4527, 4528, 4529, 4531, 4532, 4533, 4534, 4536, 4544, 4545, 4550, 4555, 4560, 4565, 4570, 4575, 4580, 4585, 4590, 4595, 4600, 4605, 4610, 4615, 4620, 4634, 4654, 4659, 4664, 4669, 4682, 4687, 4691, 4701, 4716, 4731, 4746, 4761, 4781, 4796, 4797, 4803, 4810, 4825, 4828 }; #endif #if YYDEBUG || YYERROR_VERBOSE || 0 /* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM. First, the terminals, then, starting at YYNTOKENS, nonterminals. */ static const char *const yytname[] = { "$end", "error", "$undefined", "UNION_KEYWORD", "ANYXML_KEYWORD", "WHITESPACE", "ERROR", "EOL", "STRING", "STRINGS", "IDENTIFIER", "IDENTIFIERPREFIX", "REVISION_DATE", "TAB", "DOUBLEDOT", "URI", "INTEGER", "NON_NEGATIVE_INTEGER", "ZERO", "DECIMAL", "ARGUMENT_KEYWORD", "AUGMENT_KEYWORD", "BASE_KEYWORD", "BELONGS_TO_KEYWORD", "BIT_KEYWORD", "CASE_KEYWORD", "CHOICE_KEYWORD", "CONFIG_KEYWORD", "CONTACT_KEYWORD", "CONTAINER_KEYWORD", "DEFAULT_KEYWORD", "DESCRIPTION_KEYWORD", "ENUM_KEYWORD", "ERROR_APP_TAG_KEYWORD", "ERROR_MESSAGE_KEYWORD", "EXTENSION_KEYWORD", "DEVIATION_KEYWORD", "DEVIATE_KEYWORD", "FEATURE_KEYWORD", "FRACTION_DIGITS_KEYWORD", "GROUPING_KEYWORD", "IDENTITY_KEYWORD", "IF_FEATURE_KEYWORD", "IMPORT_KEYWORD", "INCLUDE_KEYWORD", "INPUT_KEYWORD", "KEY_KEYWORD", "LEAF_KEYWORD", "LEAF_LIST_KEYWORD", "LENGTH_KEYWORD", "LIST_KEYWORD", "MANDATORY_KEYWORD", "MAX_ELEMENTS_KEYWORD", "MIN_ELEMENTS_KEYWORD", "MODULE_KEYWORD", "MUST_KEYWORD", "NAMESPACE_KEYWORD", "NOTIFICATION_KEYWORD", "ORDERED_BY_KEYWORD", "ORGANIZATION_KEYWORD", "OUTPUT_KEYWORD", "PATH_KEYWORD", "PATTERN_KEYWORD", "POSITION_KEYWORD", "PREFIX_KEYWORD", "PRESENCE_KEYWORD", "RANGE_KEYWORD", "REFERENCE_KEYWORD", "REFINE_KEYWORD", "REQUIRE_INSTANCE_KEYWORD", "REVISION_KEYWORD", "REVISION_DATE_KEYWORD", "RPC_KEYWORD", "STATUS_KEYWORD", "SUBMODULE_KEYWORD", "TYPE_KEYWORD", "TYPEDEF_KEYWORD", "UNIQUE_KEYWORD", "UNITS_KEYWORD", "USES_KEYWORD", "VALUE_KEYWORD", "WHEN_KEYWORD", "YANG_VERSION_KEYWORD", "YIN_ELEMENT_KEYWORD", "ADD_KEYWORD", "CURRENT_KEYWORD", "DELETE_KEYWORD", "DEPRECATED_KEYWORD", "FALSE_KEYWORD", "NOT_SUPPORTED_KEYWORD", "OBSOLETE_KEYWORD", "REPLACE_KEYWORD", "SYSTEM_KEYWORD", "TRUE_KEYWORD", "UNBOUNDED_KEYWORD", "USER_KEYWORD", "ACTION_KEYWORD", "MODIFIER_KEYWORD", "ANYDATA_KEYWORD", "NODE", "NODE_PRINT", "EXTENSION_INSTANCE", "SUBMODULE_EXT_KEYWORD", "'+'", "'{'", "'}'", "';'", "'/'", "'['", "']'", "'='", "'('", "')'", "$accept", "start", "tmp_string", "string_1", "string_2", "$@1", "module_arg_str", "module_stmt", "module_header_stmts", "module_header_stmt", "submodule_arg_str", "submodule_stmt", "submodule_header_stmts", "submodule_header_stmt", "yang_version_arg", "yang_version_stmt", "namespace_arg_str", "namespace_stmt", "linkage_stmts", "import_stmt", "import_arg_str", "import_opt_stmt", "include_arg_str", "include_stmt", "include_end", "include_opt_stmt", "revision_date_arg", "revision_date_stmt", "belongs_to_arg_str", "belongs_to_stmt", "prefix_arg", "prefix_stmt", "meta_stmts", "organization_arg", "organization_stmt", "contact_arg", "contact_stmt", "description_arg", "description_stmt", "reference_arg", "reference_stmt", "revision_stmts", "revision_arg_stmt", "revision_stmts_opt", "revision_stmt", "revision_end", "revision_opt_stmt", "date_arg_str", "$@2", "body_stmts_end", "body_stmts", "body_stmt", "extension_arg_str", "extension_stmt", "extension_end", "extension_opt_stmt", "argument_str", "argument_stmt", "argument_end", "yin_element_arg", "yin_element_stmt", "yin_element_arg_str", "status_arg", "status_stmt", "status_arg_str", "feature_arg_str", "feature_stmt", "feature_end", "feature_opt_stmt", "if_feature_arg", "if_feature_stmt", "if_feature_end", "identity_arg_str", "identity_stmt", "identity_end", "identity_opt_stmt", "base_arg", "base_stmt", "typedef_arg_str", "typedef_stmt", "type_opt_stmt", "type_stmt", "type_arg_str", "type_end", "type_body_stmts", "some_restrictions", "union_stmt", "union_spec", "fraction_digits_arg", "fraction_digits_stmt", "fraction_digits_arg_str", "length_stmt", "length_arg_str", "length_end", "message_opt_stmt", "pattern_sep", "pattern_stmt", "pattern_arg_str", "pattern_end", "pattern_opt_stmt", "modifier_arg", "modifier_stmt", "enum_specification", "enum_stmts", "enum_stmt", "enum_arg_str", "enum_end", "enum_opt_stmt", "value_arg", "value_stmt", "integer_value_arg_str", "range_stmt", "range_end", "path_arg", "path_stmt", "require_instance_arg", "require_instance_stmt", "require_instance_arg_str", "bits_specification", "bit_stmts", "bit_stmt", "bit_arg_str", "bit_end", "bit_opt_stmt", "position_value_arg", "position_stmt", "position_value_arg_str", "error_message_arg", "error_message_stmt", "error_app_tag_arg", "error_app_tag_stmt", "units_arg", "units_stmt", "default_arg", "default_stmt", "grouping_arg_str", "grouping_stmt", "grouping_end", "grouping_opt_stmt", "data_def_stmt", "container_arg_str", "container_stmt", "container_end", "container_opt_stmt", "leaf_stmt", "leaf_arg_str", "leaf_opt_stmt", "leaf_list_arg_str", "leaf_list_stmt", "leaf_list_opt_stmt", "list_arg_str", "list_stmt", "list_opt_stmt", "choice_arg_str", "choice_stmt", "choice_end", "choice_opt_stmt", "short_case_case_stmt", "short_case_stmt", "case_arg_str", "case_stmt", "case_end", "case_opt_stmt", "anyxml_arg_str", "anyxml_stmt", "anydata_arg_str", "anydata_stmt", "anyxml_end", "anyxml_opt_stmt", "uses_arg_str", "uses_stmt", "uses_end", "uses_opt_stmt", "refine_args_str", "refine_arg_str", "refine_stmt", "refine_end", "refine_body_opt_stmts", "uses_augment_arg_str", "uses_augment_arg", "uses_augment_stmt", "augment_arg_str", "augment_arg", "augment_stmt", "augment_opt_stmt", "action_arg_str", "action_stmt", "rpc_arg_str", "rpc_stmt", "rpc_end", "rpc_opt_stmt", "input_arg", "input_stmt", "input_output_opt_stmt", "output_arg", "output_stmt", "notification_arg_str", "notification_stmt", "notification_end", "notification_opt_stmt", "deviation_arg", "deviation_stmt", "deviation_opt_stmt", "deviation_arg_str", "deviate_body_stmt", "deviate_not_supported", "deviate_not_supported_stmt", "deviate_not_supported_end", "deviate_stmts", "deviate_add", "deviate_add_stmt", "deviate_add_end", "deviate_add_opt_stmt", "deviate_delete", "deviate_delete_stmt", "deviate_delete_end", "deviate_delete_opt_stmt", "deviate_replace", "deviate_replace_stmt", "deviate_replace_end", "deviate_replace_opt_stmt", "when_arg_str", "when_stmt", "when_end", "when_opt_stmt", "config_arg", "config_stmt", "config_arg_str", "mandatory_arg", "mandatory_stmt", "mandatory_arg_str", "presence_arg", "presence_stmt", "min_value_arg", "min_elements_stmt", "min_value_arg_str", "max_value_arg", "max_elements_stmt", "max_value_arg_str", "ordered_by_arg", "ordered_by_stmt", "ordered_by_arg_str", "must_agr_str", "must_stmt", "must_end", "unique_arg", "unique_stmt", "unique_arg_str", "key_arg", "key_stmt", "key_arg_str", "$@3", "range_arg_str", "absolute_schema_nodeid", "absolute_schema_nodeids", "absolute_schema_nodeid_opt", "descendant_schema_nodeid", "$@4", "path_arg_str", "$@5", "$@6", "absolute_path", "absolute_paths", "absolute_path_opt", "relative_path", "relative_path_part1", "relative_path_part1_opt", "descendant_path", "descendant_path_opt", "path_predicate", "path_equality_expr", "path_key_expr", "rel_path_keyexpr", "rel_path_keyexpr_part1", "rel_path_keyexpr_part1_opt", "rel_path_keyexpr_part2", "current_function_invocation", "positive_integer_value", "non_negative_integer_value", "integer_value", "integer_value_convert", "prefix_arg_str", "identifier_arg_str", "node_identifier", "identifier_ref_arg_str", "stmtend", "semicolom", "curly_bracket_close", "curly_bracket_open", "stmtsep", "unknown_statement", "string_opt", "string_opt_part1", "string_opt_part2", "unknown_string", "unknown_string_part1", "unknown_string_part2", "unknown_statement_end", "unknown_statement2_opt", "unknown_statement2", "unknown_statement2_end", "unknown_statement2_yang_stmt", "unknown_statement2_module_stmt", "unknown_statement3_opt", "unknown_statement3_opt_end", "sep_stmt", "optsep", "sep", "whitespace_opt", "string", "$@7", "strings", "identifier", "identifier1", "yang_stmt", "identifiers", "identifiers_ref", "type_ext_alloc", "typedef_ext_alloc", "iffeature_ext_alloc", "restriction_ext_alloc", "when_ext_alloc", "revision_ext_alloc", "datadef_ext_check", "not_supported_ext_check", "not_supported_ext", "datadef_ext_stmt", "restriction_ext_stmt", "ext_substatements", YY_NULLPTR }; #endif # ifdef YYPRINT /* YYTOKNUM[NUM] -- (External) token number corresponding to the (internal) symbol number NUM (which must be that of a token). */ static const yytype_uint16 yytoknum[] = { 0, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 43, 123, 125, 59, 47, 91, 93, 61, 40, 41 }; # endif #define YYPACT_NINF -1012 #define yypact_value_is_default(Yystate) \ (!!((Yystate) == (-1012))) #define YYTABLE_NINF -757 #define yytable_value_is_error(Yytable_value) \ 0 /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing STATE-NUM. */ static const yytype_int16 yypact[] = { 440, 100, -1012, -1012, 566, 1894, -1012, -1012, -1012, 266, 266, -1012, 266, -1012, 266, 266, -1012, 266, 266, 266, 266, -1012, 266, 266, -1012, -1012, -1012, -1012, 266, -1012, -1012, -1012, 266, 266, 266, 266, 266, 266, 266, 266, 266, 266, 266, 266, 266, 266, 266, 266, 266, 266, -1012, -1012, 266, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -8, 31, 79, 868, 70, 125, 481, 266, -1012, -1012, 3273, 3273, 3273, 2893, 3273, 71, 2703, 2703, 2703, 2703, 2703, 98, 2988, 94, 52, 287, 2703, 77, 2703, 104, 287, 3273, 2703, 2703, 182, 58, 246, 2988, 2703, 321, 2703, 279, 279, 266, -1012, 266, -1012, 266, -1012, 266, 266, 266, 266, -1012, -1012, -1012, -1012, -1012, 266, -1012, 266, -1012, 266, 266, 266, 266, 266, -1012, 266, 266, 266, 266, -1012, 266, 266, 266, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, 29, -1012, 134, -1012, -1012, -1012, -22, 2798, 266, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, 151, -1012, -1012, -1012, -1012, -1012, 156, -1012, 67, -1012, -1012, -1012, 224, -1012, -1012, -1012, 161, -1012, -1012, -1012, -1012, 224, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, 224, -1012, -1012, -1012, 224, -1012, 224, -1012, 224, -1012, 224, -1012, -1012, -1012, 224, -1012, -1012, -1012, -1012, 224, -1012, -1012, -1012, -1012, -1012, -1012, 224, -1012, -1012, -1012, 224, -1012, -1012, -1012, -1012, 224, -1012, -1012, -1012, 224, -1012, -1012, -1012, -1012, 224, -1012, 224, -1012, -1012, 224, -1012, 262, 202, -1012, 224, -1012, -1012, -1012, 224, -1012, -1012, 224, -1012, 224, -1012, -1012, -1012, -1012, 224, -1012, -1012, -1012, 224, -1012, -1012, -1012, -1012, -1012, 224, -1012, -1012, 224, -1012, -1012, -1012, 224, -1012, -1012, -1012, -1012, -1012, 224, -1012, -1012, -1012, 224, -1012, -1012, -1012, -1012, 2893, 279, 3273, 279, 2703, 279, 2703, 2703, 2703, -1012, 2703, 279, 2703, 279, 58, 279, 3273, 3273, 3273, 3273, 3273, 266, 3273, 3273, 3273, 3273, 266, 2893, 3273, 3273, -1012, -1012, 279, -1012, -1012, -1012, -1012, -1012, -1012, 266, -1012, 266, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, 266, 266, -1012, 266, 266, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, 266, -1012, -1012, 266, 266, -1012, 266, -1012, 266, -1012, 266, -1012, 266, 266, -1012, -1012, -1012, 3368, -1012, -1012, 291, -1012, -1012, -1012, 266, -1012, 266, -1012, -1012, 266, 266, -1012, -1012, -1012, 266, 266, 266, -1012, -1012, 266, -1012, -1012, -1012, 266, -1012, 228, 2703, 266, 274, -1012, 189, -1012, 288, -1012, 298, -1012, 303, -1012, 370, -1012, 380, -1012, 389, -1012, 393, -1012, 407, -1012, 411, -1012, 419, -1012, 426, -1012, 463, -1012, 238, -1012, 314, -1012, 317, -1012, 505, -1012, 506, -1012, 521, -1012, 407, -1012, 279, 279, 266, 266, 266, 266, 326, 279, 279, 109, 279, 112, 608, 266, 266, -1012, 262, -1012, 3083, 266, 332, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, 1964, 1994, 2191, 347, -1012, -1012, 19, -1012, 54, 266, 355, -1012, -1012, 368, 373, -1012, -1012, -1012, 48, 3368, -1012, 266, 831, 279, 186, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, 266, -1012, 127, 515, 266, -1012, -1012, -1012, 515, -1012, -1012, 188, -1012, 279, -1012, 473, -1012, 306, -1012, 2552, 266, 266, 397, 1000, -1012, -1012, -1012, -1012, 783, -1012, 230, 503, 404, 887, 359, 438, 929, 1958, 1645, 817, 947, 344, 2074, 1547, 1768, 235, 852, 279, 279, 279, 279, 266, -22, 280, -1012, 266, 266, -1012, -1012, 375, 2703, 375, 279, -1012, -1012, -1012, 224, -1012, -1012, 3368, -1012, -1012, -1012, -1012, -1012, 266, 266, -1012, 3273, 2703, -1012, -1012, -1012, -8, -1012, -1012, -1012, -1012, -1012, -1012, 279, 474, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, 266, 266, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, 3273, 3273, 279, 279, -1012, -1012, -1012, -1012, -1012, 125, 224, -1012, -1012, 266, 266, -1012, 399, 473, 266, 528, 528, 545, -1012, 567, -1012, 279, -1012, 279, 279, 279, 480, -1012, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 2988, 2988, 279, 279, 279, 279, 279, 279, 279, 279, 279, 266, 266, 423, -1012, 570, -1012, 428, 2046, -1012, -1012, 434, -1012, 465, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, 479, -1012, -1012, -1012, 571, -1012, -1012, -1012, -1012, -1012, -1012, 266, 266, 266, 266, 266, 266, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, 279, -1012, 473, 266, 279, 279, 279, 279, -1012, 266, -1012, -1012, -1012, 266, 279, 279, 266, 51, 3273, 51, 3273, 3273, 3273, 279, 266, 459, 2367, 385, 509, 279, 279, 341, 365, -1012, -1012, 483, -1012, -1012, 574, -1012, -1012, 486, -1012, -1012, 591, -1012, 592, -1012, 521, -1012, 473, -1012, 473, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, 885, 1126, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, 332, 266, -1012, -1012, -1012, -1012, 266, -1012, -1012, -1012, -1012, -1012, -1012, -1012, 467, 492, 279, 279, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, 279, 279, 279, 279, 279, 473, 473, 279, 279, 279, 279, 279, 279, 279, 279, 1460, 205, 524, 216, 201, 493, 579, -1012, -1012, -1012, -1012, -1012, -1012, 266, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, 473, -1012, -1012, 279, 293, 279, 279, 497, 3178, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, 473, -1012, -1012, 279, 73, 120, 133, 163, -1012, 50, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, 510, 222, 279, 279, 279, 473, -1012, 824, 346, 985, 3368, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, 279, 279, 279 }; /* YYDEFACT[STATE-NUM] -- Default reduction number in state STATE-NUM. Performed when YYTABLE does not specify something else to do. Zero means the default is an error. */ static const yytype_uint16 yydefact[] = { 790, 0, 2, 3, 0, 757, 1, 649, 650, 0, 0, 652, 0, 763, 0, 0, 761, 0, 0, 0, 0, 762, 0, 0, 766, 764, 765, 767, 0, 768, 769, 770, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 759, 760, 0, 771, 802, 806, 619, 792, 803, 797, 793, 794, 619, 809, 796, 815, 814, 819, 804, 813, 818, 799, 800, 795, 798, 810, 811, 805, 816, 817, 812, 820, 801, 0, 0, 0, 0, 0, 0, 0, 627, 758, 651, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 571, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 791, 822, 0, 619, 0, 619, 0, 619, 0, 0, 0, 0, 789, 787, 788, 786, 619, 0, 619, 0, 619, 0, 0, 0, 0, 0, 651, 0, 0, 0, 0, 651, 0, 0, 0, 778, 777, 780, 781, 782, 776, 775, 774, 773, 785, 772, 0, 779, 0, 784, 783, 619, 0, 629, 653, 679, 5, 669, 680, 681, 682, 683, 684, 685, 686, 687, 688, 689, 690, 691, 692, 693, 694, 695, 696, 697, 698, 699, 700, 701, 702, 703, 704, 705, 706, 707, 708, 709, 710, 711, 712, 713, 666, 714, 715, 716, 717, 718, 719, 720, 721, 722, 723, 724, 725, 726, 727, 728, 729, 730, 731, 732, 733, 734, 735, 736, 737, 738, 739, 740, 741, 742, 743, 670, 744, 671, 672, 673, 674, 745, 675, 676, 677, 678, 746, 747, 748, 651, 608, 0, 10, 749, 667, 668, 651, 0, 17, 0, 99, 750, 613, 0, 138, 651, 651, 0, 47, 651, 651, 529, 0, 525, 659, 662, 660, 664, 665, 663, 658, 0, 58, 656, 661, 0, 243, 0, 60, 0, 239, 0, 237, 598, 170, 0, 167, 651, 610, 563, 0, 559, 561, 609, 651, 651, 534, 0, 530, 651, 545, 0, 541, 651, 599, 540, 0, 537, 600, 651, 0, 25, 651, 651, 550, 0, 546, 0, 56, 575, 0, 213, 0, 0, 236, 0, 233, 651, 605, 0, 49, 651, 0, 535, 0, 62, 651, 651, 219, 0, 215, 74, 76, 0, 45, 651, 651, 651, 114, 0, 109, 558, 0, 555, 651, 569, 0, 241, 603, 604, 601, 209, 0, 206, 651, 602, 0, 191, 621, 620, 651, 0, 807, 0, 808, 0, 821, 0, 0, 0, 180, 0, 823, 0, 824, 0, 825, 0, 0, 0, 0, 0, 445, 0, 0, 0, 0, 452, 0, 0, 0, 619, 619, 826, 651, 651, 827, 651, 628, 651, 7, 619, 607, 619, 619, 101, 100, 618, 616, 139, 619, 619, 611, 612, 619, 528, 527, 526, 59, 651, 244, 61, 240, 238, 168, 169, 560, 651, 533, 532, 531, 543, 542, 544, 538, 539, 26, 549, 548, 547, 57, 214, 0, 578, 572, 0, 574, 582, 234, 235, 50, 606, 536, 63, 218, 217, 216, 651, 46, 111, 113, 112, 110, 556, 557, 567, 242, 207, 208, 192, 0, 625, 624, 0, 150, 0, 140, 0, 124, 0, 172, 0, 551, 0, 182, 0, 564, 0, 518, 0, 65, 0, 368, 0, 357, 0, 334, 0, 266, 0, 245, 0, 285, 0, 298, 0, 314, 0, 454, 0, 383, 0, 430, 0, 370, 447, 447, 645, 647, 632, 630, 6, 13, 20, 104, 614, 0, 0, 657, 562, 587, 577, 581, 0, 75, 570, 651, 634, 622, 623, 626, 619, 151, 149, 619, 619, 126, 125, 619, 173, 171, 619, 553, 552, 619, 183, 181, 619, 211, 210, 619, 520, 519, 619, 69, 68, 619, 372, 369, 619, 359, 358, 619, 336, 335, 619, 268, 267, 619, 247, 246, 619, 619, 619, 619, 456, 455, 619, 385, 384, 619, 434, 431, 371, 0, 0, 0, 631, 651, 27, 12, 27, 19, 0, 0, 617, 619, 0, 576, 579, 583, 580, 585, 0, 568, 636, 156, 142, 0, 175, 175, 185, 175, 522, 71, 374, 361, 338, 270, 249, 286, 300, 316, 458, 387, 436, 446, 619, 619, 619, 258, 259, 260, 261, 262, 263, 264, 265, 619, 453, 651, 627, 651, 0, 51, 0, 14, 15, 16, 51, 21, 619, 0, 102, 615, 48, 654, 584, 0, 565, 0, 0, 0, 0, 153, 154, 619, 155, 221, 0, 127, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 449, 450, 451, 448, 648, 0, 0, 8, 0, 0, 619, 619, 66, 0, 66, 22, 651, 651, 108, 0, 103, 655, 0, 586, 644, 635, 638, 651, 627, 627, 643, 0, 0, 152, 159, 619, 0, 162, 619, 619, 619, 158, 157, 194, 220, 141, 147, 148, 146, 619, 144, 145, 174, 178, 179, 176, 177, 554, 184, 189, 190, 186, 187, 188, 212, 521, 523, 524, 70, 72, 73, 373, 381, 382, 380, 619, 619, 378, 379, 619, 360, 365, 366, 364, 619, 619, 619, 337, 345, 346, 344, 619, 341, 350, 351, 352, 353, 356, 619, 348, 349, 354, 355, 619, 342, 343, 269, 277, 278, 276, 619, 619, 619, 619, 619, 619, 619, 275, 274, 619, 248, 251, 252, 250, 619, 619, 619, 619, 619, 284, 296, 297, 295, 619, 619, 290, 292, 619, 293, 294, 619, 299, 312, 313, 311, 619, 619, 305, 304, 619, 307, 308, 309, 310, 619, 315, 327, 328, 326, 619, 619, 619, 619, 619, 619, 619, 322, 323, 324, 325, 619, 321, 320, 457, 462, 463, 461, 619, 619, 619, 619, 619, 0, 0, 386, 391, 392, 390, 619, 619, 619, 619, 435, 439, 440, 438, 619, 619, 619, 619, 619, 646, 651, 651, 0, 0, 28, 29, 52, 53, 54, 55, 78, 64, 0, 23, 78, 107, 106, 105, 0, 654, 637, 0, 0, 0, 224, 0, 197, 164, 165, 160, 161, 163, 193, 222, 143, 376, 375, 377, 363, 367, 362, 340, 347, 339, 272, 282, 279, 283, 280, 281, 271, 273, 254, 253, 255, 256, 257, 288, 289, 287, 291, 302, 303, 301, 306, 318, 329, 330, 333, 331, 332, 317, 319, 460, 464, 465, 466, 459, 0, 0, 389, 393, 394, 388, 437, 441, 442, 443, 444, 633, 9, 0, 31, 0, 37, 0, 77, 619, 24, 0, 588, 0, 651, 641, 639, 640, 619, 225, 619, 619, 198, 196, 619, 413, 414, 0, 651, 396, 397, 0, 651, 619, 619, 39, 38, 651, 0, 0, 0, 0, 0, 0, 619, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 67, 651, 654, 645, 227, 223, 200, 195, 619, 412, 619, 399, 398, 395, 32, 41, 11, 0, 0, 0, 0, 0, 0, 79, 18, 0, 0, 0, 0, 420, 401, 0, 0, 417, 418, 0, 567, 651, 0, 90, 474, 0, 467, 651, 0, 115, 0, 128, 0, 432, 654, 589, 654, 642, 226, 231, 232, 230, 619, 229, 199, 204, 205, 203, 619, 202, 0, 0, 30, 36, 33, 34, 35, 40, 44, 42, 43, 619, 566, 416, 619, 92, 91, 619, 473, 619, 117, 116, 619, 130, 129, 433, 0, 0, 228, 201, 415, 424, 425, 423, 619, 619, 619, 619, 619, 619, 400, 410, 411, 619, 405, 406, 407, 404, 408, 409, 619, 420, 94, 469, 119, 132, 654, 654, 422, 426, 429, 427, 428, 421, 403, 402, 0, 0, 0, 0, 0, 0, 0, 419, 93, 97, 98, 619, 96, 0, 468, 470, 471, 118, 122, 123, 121, 619, 131, 136, 137, 135, 619, 133, 597, 654, 590, 593, 95, 0, 120, 134, 0, 0, 484, 497, 477, 506, 619, 651, 475, 476, 651, 481, 651, 483, 651, 482, 654, 594, 595, 472, 0, 0, 0, 0, 592, 591, 619, 479, 478, 619, 486, 485, 619, 499, 498, 619, 508, 507, 0, 0, 488, 501, 510, 654, 480, 0, 0, 0, 0, 487, 489, 492, 493, 494, 495, 496, 619, 491, 500, 502, 505, 619, 504, 509, 619, 512, 513, 514, 515, 516, 517, 596, 490, 503, 511 }; /* YYPGOTO[NTERM-NUM]. */ static const yytype_int16 yypgoto[] = { -1012, -1012, -1012, 245, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -16, -1012, -2, -9, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1011, -1012, 22, -1012, -534, -24, -1012, 658, -1012, 681, -1012, 63, -1012, 105, -41, -1012, -1012, -237, -1012, -1012, 305, -1012, -225, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -486, -1012, -1012, -1012, -1012, -1012, 41, -1012, -1012, -1012, -1012, -1012, -1012, -11, -1012, -1012, -1012, -1012, -1012, -1012, -657, -1012, -3, -1012, -653, -1012, -1012, -1012, -1012, -1012, -1012, -1012, 23, -1012, 30, -1012, -1012, 53, -1012, 35, -1012, -1012, -1012, -1012, 10, -1012, -1012, -236, -1012, -1012, -1012, -1012, -366, -1012, 38, -1012, -1012, 40, -1012, 43, -1012, -1012, -1012, -21, -1012, -1012, -1012, -1012, -355, -1012, -1012, 12, -1012, 18, -1012, -704, -1012, -480, -1012, 16, -1012, -1012, -560, -1012, -80, -1012, -1012, -67, -1012, -1012, -1012, -63, -1012, -1012, -39, -1012, -1012, -36, -1012, -1012, -1012, -1012, -1012, -33, -1012, -1012, -1012, -28, -1012, -27, 206, -1012, -1012, 667, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -435, -1012, -62, -1012, -1012, -365, -1012, -1012, 42, 211, -1012, 44, -1012, -87, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, 74, -1012, -1012, -1012, -473, -1012, -1012, -667, -1012, -1012, -675, -1012, -722, -1012, -1012, -662, -1012, -1012, -271, -1012, -1012, -35, -1012, -1012, -725, -1012, -1012, 46, -1012, -1012, -1012, -390, -319, -335, -461, -1012, -1012, -1012, -1012, 214, 97, -1012, -1012, 239, -1012, -1012, -1012, 135, -1012, -1012, -1012, -441, -1012, -1012, -1012, 155, 693, -1012, -1012, -1012, 185, -93, -328, 1164, -1012, -1012, -1012, 526, 110, -1012, -1012, -1012, -433, -1012, -1012, -1012, -1012, -1012, -143, -1012, -1012, -260, 84, -4, 1453, 166, -694, 119, -1012, 660, -12, -1012, 137, -20, -23, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012, -1012 }; /* YYDEFGOTO[NTERM-NUM]. */ static const yytype_int16 yydefgoto[] = { -1, 1, 261, 262, 553, 933, 263, 2, 631, 632, 269, 3, 633, 634, 944, 688, 332, 54, 686, 740, 1023, 1106, 1025, 741, 1056, 1107, 365, 55, 279, 56, 351, 57, 742, 339, 938, 293, 939, 299, 783, 356, 784, 942, 521, 943, 144, 597, 718, 366, 489, 1027, 1028, 1064, 1113, 1065, 1157, 1208, 271, 62, 438, 749, 636, 750, 371, 1174, 372, 1119, 1066, 1162, 1210, 509, 1175, 579, 1121, 1067, 1165, 1211, 275, 64, 507, 669, 711, 127, 505, 575, 705, 706, 765, 766, 307, 65, 308, 136, 511, 582, 713, 401, 137, 515, 588, 715, 388, 66, 707, 964, 708, 957, 1043, 1103, 384, 67, 385, 138, 591, 342, 68, 361, 69, 362, 709, 774, 710, 955, 1040, 1102, 347, 70, 348, 303, 785, 301, 786, 378, 73, 297, 74, 531, 670, 612, 723, 671, 529, 672, 609, 722, 673, 533, 724, 535, 674, 725, 537, 675, 726, 527, 676, 606, 721, 828, 829, 525, 1177, 603, 720, 523, 677, 545, 678, 600, 719, 541, 679, 621, 728, 1050, 1051, 919, 1087, 1142, 1046, 1047, 920, 1109, 1110, 1071, 1141, 543, 1178, 1123, 1072, 624, 729, 170, 171, 626, 172, 173, 539, 1179, 618, 727, 1116, 1074, 1209, 1117, 1249, 1250, 1251, 1271, 1252, 1253, 1254, 1274, 1288, 1255, 1256, 1277, 1289, 1257, 1258, 1280, 1290, 519, 1180, 594, 717, 284, 75, 285, 319, 76, 320, 354, 77, 328, 78, 329, 323, 79, 324, 337, 80, 338, 513, 680, 585, 374, 81, 375, 312, 82, 313, 459, 517, 646, 1112, 567, 376, 497, 343, 344, 345, 475, 476, 563, 478, 479, 565, 643, 699, 640, 950, 1126, 1237, 1238, 1244, 1268, 1127, 330, 331, 386, 387, 352, 264, 377, 276, 441, 442, 638, 443, 124, 390, 502, 503, 571, 176, 430, 629, 570, 702, 757, 1036, 758, 759, 628, 428, 391, 4, 177, 752, 294, 451, 295, 265, 266, 267, 268, 392, 83, 84, 85, 86, 87, 88, 89, 90, 91, 175, 140, 5 }; /* YYTABLE[YYPACT[STATE-NUM]] -- What to do in state STATE-NUM. If positive, shift that token. If negative, reduce the rule whose number is the opposite. If YYTABLE_NINF, syntax error. */ static const yytype_int16 yytable[] = { 11, 901, 174, 881, 897, 92, 92, 780, 92, 160, 92, 92, 314, 92, 92, 92, 92, 71, 92, 92, 865, 877, 161, 72, 92, 639, 162, 169, 92, 92, 92, 92, 92, 92, 92, 92, 92, 92, 92, 92, 92, 92, 92, 92, 92, 92, 63, 848, 92, 764, 163, 139, 808, 164, 835, 751, 165, 869, 779, 180, 180, 166, 167, 882, 898, 506, 180, 126, 60, 305, 363, 864, 876, 278, 131, 36, 277, 15, 7, 180, 8, 129, 426, 41, 427, 180, 92, 296, 296, 296, 296, 296, 542, 315, 353, 1144, 1149, 296, 690, 296, 6, 687, 180, 296, 296, 159, 180, 128, 315, 296, 61, 296, 180, 960, 7, 305, 8, 7, -573, 8, 273, 130, 92, 273, 92, 7, 92, 8, 92, 92, 92, 92, 7, 423, 8, 737, 687, 92, 7, 92, 8, 92, 92, 92, 92, 92, 321, 92, 92, 92, 92, 141, 92, 92, 92, -587, -587, -654, 645, 281, 815, 142, 843, 856, 282, 296, 892, 910, 7, 334, 8, 436, 335, 437, 11, 93, 94, 1269, 95, 1270, 96, 97, 316, 98, 99, 100, 101, 317, 102, 103, 180, 7, 635, 8, 104, 143, 180, 273, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 477, 637, 123, 298, 300, 302, 304, 14, 1272, 12, 1273, 7, 333, 8, 340, 781, 20, 273, 355, 357, 20, 1275, 424, 1276, 379, 822, 389, 130, 866, 878, 807, 20, 834, 847, 735, 868, 880, 896, 180, 433, 912, 1033, 130, 309, 435, 20, 325, 22, 23, 446, 20, 1278, 43, 1279, 358, 7, 43, 8, 46, 359, 746, 130, 46, 270, 272, 747, 280, 43, 7, 7, 8, 8, 932, 46, 273, 712, 393, 576, 395, 180, 397, 43, 399, 400, 402, 403, 43, 913, 305, 326, 1229, 405, 46, 407, 1215, 409, 410, 411, 412, 413, 141, 415, 416, 417, 418, 1224, 420, 421, 422, 953, 954, 1287, 439, 180, 440, 367, 568, 368, 569, 782, 369, 380, 381, 382, 914, 274, 613, 283, 292, 292, 292, 292, 292, 306, 311, 318, 322, 327, 292, 336, 292, 341, 346, 350, 292, 292, 360, 364, 370, 373, 292, 383, 292, 474, 278, 17, 20, 277, 19, 20, 19, 1245, 573, 1246, 574, 562, 1247, 1100, 1248, 296, 130, 296, 296, 296, 20, 296, 577, 296, 578, 33, 20, 278, 564, 133, 277, 133, 580, 18, 581, 41, 20, 583, 43, 584, 11, 43, 45, 474, 698, 11, 20, 46, 614, 126, 1189, 615, 48, 47, 48, 141, 43, 130, 11, 630, 11, 1167, 43, 1168, 38, 20, 45, 22, 23, 645, 11, 11, 43, 11, 11, -651, 1143, -651, 40, 859, 684, 1301, 43, 11, 883, 899, 11, 11, 46, 11, 695, 11, 315, 11, 795, 11, 11, 1188, 1070, 20, 1148, 43, 644, 697, 586, 1187, 587, 11, 751, 11, 1190, 698, 11, 11, 589, 145, 590, 11, 11, 11, 1129, 296, 11, 592, -651, 593, 11, 595, 703, 596, 11, 52, 763, 1212, 1213, 43, 146, 147, 1032, 788, 148, 598, 704, 599, -651, 601, 510, 602, 512, 514, 516, 149, 518, 604, 520, 605, 150, 1053, 151, 152, 607, 153, 608, 1057, 20, 683, 22, 23, 154, 1076, 20, 155, 1243, 798, 1125, 11, 11, 11, 11, 1048, 1052, 130, 701, 315, 1234, 20, 11, 11, 738, 739, 156, 1220, 11, 1300, 1305, 1267, 1297, 610, 1312, 611, 43, 7, 1145, 8, 1281, 1077, 43, 157, 1197, 158, 508, 1176, 46, 1083, 1293, 1302, 1308, 1152, 125, 49, 1158, 43, 1291, 1236, 524, 526, 528, 530, 532, 1198, 534, 536, 538, 540, 1259, 1235, 544, 546, 787, 616, 619, 617, 620, 7, 1135, 8, 315, 1286, 692, 273, 9, 1296, 572, 1311, 691, 622, 1298, 623, 1313, 1221, 689, 92, 1034, 315, 1035, 845, 858, 1307, 274, 894, 10, 823, 292, 11, 292, 292, 292, 1176, 292, 1038, 292, 1039, 364, 394, 824, 396, 693, 398, 825, 951, 844, 857, 1185, 58, 893, 274, 404, 744, 406, 1186, 408, 1041, 41, 1042, 1054, 1085, 1055, 1086, 1155, 92, 1156, 11, 826, 92, 809, 827, 59, 849, 830, 870, 884, 900, 911, 831, 832, 1160, 1163, 1161, 1164, 92, 92, 425, 1111, 946, 1111, 714, 1029, 716, 805, 814, 821, 840, 522, 863, 875, 889, 907, 918, 926, 841, 854, 1031, 1218, 890, 908, 791, 927, 792, 1044, 767, 11, 296, 11, 793, 92, 92, 768, 1140, 842, 855, 315, 769, 891, 909, 770, 928, 771, 1134, 292, 772, 296, 625, 778, 965, 92, 92, 168, 1207, 1166, 627, 804, 813, 820, 839, 853, 862, 874, 888, 906, 917, 925, 929, 902, 930, 776, 1118, 1153, 641, 789, 700, 796, 799, 802, 811, 818, 837, 851, 860, 872, 886, 904, 915, 923, 806, 816, 833, 846, 753, 867, 879, 895, 694, 921, 1260, 642, 940, 349, 940, 1294, 1303, 1309, 1037, 756, 19, 20, 1295, 777, 1310, 1101, 931, 790, 145, 797, 800, 803, 812, 819, 838, 852, 861, 873, 887, 905, 916, 924, 0, 7, 431, 8, 760, 0, 0, 273, 147, 17, 0, 148, 941, 20, 941, 43, 17, 0, 743, 19, 703, 46, 149, 126, 130, 0, 48, 945, 704, 151, 152, 0, 153, 0, 761, 762, 0, 133, 0, 154, 33, 34, 35, 0, 133, 0, 958, 42, 20, 43, 0, 0, 0, 775, 145, 46, 0, 149, 128, 130, 0, 156, 150, 141, 0, 0, 47, 48, 0, 934, 935, 0, 0, 92, 92, 146, 147, 155, 157, 148, 158, 20, 132, 20, 43, 22, 23, 836, 133, 0, 46, 0, 130, 128, 1292, 134, 0, 151, 152, 135, 153, 0, 0, 0, 748, 0, 1073, 154, 11, 11, 0, 956, 0, 11, 547, 548, 145, 43, 0, 43, 0, 17, 922, 46, 554, 20, 555, 556, 0, 156, 0, 141, 0, 557, 558, 0, 130, 559, 147, 0, 0, 148, 0, 20, 0, 33, 157, 0, 158, 133, 0, 0, 149, 292, 0, 1171, 0, 794, 0, 151, 152, 43, 153, 315, 315, 0, 0, 46, 0, 154, 0, 0, 292, 683, 0, 141, 0, 17, 0, 43, 19, 0, 11, 11, 0, 46, 0, 14, 128, 0, 1068, 156, 0, 0, 0, 0, 0, 0, 0, 801, 0, 33, 34, 35, 28, 0, 0, 0, 157, 1069, 158, 0, 0, 0, 132, 0, 0, 850, 0, 92, 92, 92, 92, 92, 92, 126, 39, 134, 48, 0, 0, 135, 0, 0, 44, 0, 0, 0, 0, 11, -166, 0, 0, 1010, 1011, 11, 0, 0, 0, 11, 0, 0, 11, 0, 315, 1306, 1133, 1139, 0, 0, 11, 0, 0, 0, 648, 0, 0, 649, 650, 0, 0, 651, 1191, 0, 652, 0, 0, 653, 0, 0, 654, 0, 0, 655, 1024, 1026, 656, 0, 0, 657, 0, 0, 658, 0, 0, 659, 1184, 0, 660, 0, 0, 661, 0, 0, 662, 663, 664, 665, 1132, 1138, 666, 0, 0, 667, 0, 11, 1261, 0, 17, 0, 11, 19, 20, 0, 0, 0, 0, 0, 0, 696, 1130, 1136, 0, 130, 1146, 1150, 0, 0, 0, 0, 0, 0, 33, 34, 35, 0, 133, 0, 0, 0, 0, 0, 0, 0, 0, 0, 42, 0, 43, 0, 730, 731, 732, 1314, 1228, 1233, 0, 0, 0, 1172, 1182, 733, 1131, 1137, 0, 0, 1147, 1151, 0, 0, 0, 92, 0, 0, 745, 0, 0, 0, 0, 1092, 1093, 1094, 1095, 1096, 1097, 0, 1181, 315, 0, 773, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1173, 1183, 0, 1219, 0, 1227, 1232, 1299, 1304, 1045, 1049, 0, 0, 11, 11, 11, 11, 0, 0, 0, 936, 937, 0, 0, 1172, 1216, 1222, 1225, 1230, 0, 0, 0, 1114, 315, 1120, 1122, 1124, 0, 0, 0, 0, 0, 0, 0, 0, 959, 0, 0, 961, 962, 963, 0, 0, 0, 0, 0, 0, 0, 0, 966, 0, 0, 0, 0, 0, 0, 1173, 1217, 1223, 1226, 1231, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 967, 968, 0, 0, 969, 0, 1108, 0, 1115, 970, 971, 972, 0, 0, 0, 0, 973, 0, 0, 0, 0, 0, 0, 974, 0, 0, 0, 0, 975, 0, 0, 0, 0, 0, 0, 976, 977, 978, 979, 980, 981, 982, 0, 0, 983, 0, 0, 0, 0, 984, 985, 986, 987, 988, 0, 1240, 0, 0, 989, 990, 0, 0, 991, 0, 0, 992, 0, 0, 0, 0, 993, 994, 0, 0, 995, 0, 0, 0, 0, 996, 0, 0, 0, 0, 997, 998, 999, 1000, 1001, 1002, 1003, 0, 0, 0, 0, 1004, 0, 0, 0, 0, 0, 0, 1005, 1006, 1007, 1008, 1009, 0, 0, 0, 0, 0, 0, 1012, 1013, 1014, 1015, 449, 0, 0, 0, 1016, 1017, 1018, 1019, 1020, 450, 0, 0, 0, 452, 0, 453, 145, 454, 0, 455, 0, 0, 0, 456, 0, 0, 0, 0, 458, 0, 0, 0, 0, 0, 0, 462, 0, 146, 147, 464, 0, 148, 0, 20, 466, 0, 0, 0, 468, 0, 0, 0, 0, 471, 130, 472, 0, 0, 473, 151, 152, 0, 153, 480, 0, 0, 0, 482, 0, 154, 484, 0, 485, 0, 0, 0, 0, 488, 0, 43, 0, 490, 0, 0, 0, 46, 0, 494, 0, 0, 495, 156, 0, 141, 498, 0, 0, 178, 0, 0, 499, 0, 0, 145, 501, 0, 0, 1075, 157, 0, 158, 0, 0, 0, 0, 0, 1079, 1214, 1080, 1081, 0, 0, 1082, 0, 0, 147, 17, 0, 148, 0, 20, 1089, 1090, 0, 0, 0, 0, 0, 0, 149, 0, 130, 1098, 0, 0, 32, 151, 152, 0, 153, 0, 34, 35, 0, 133, 414, 154, 37, 0, 0, 419, 1104, 0, 1105, 0, 0, 43, 0, 0, 0, 0, 0, 46, 0, 0, 128, 47, 0, 156, 0, 141, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 157, 0, 158, 0, 0, 0, 145, 0, 0, 885, 0, 0, 0, 0, 0, 0, 1169, 0, 0, 0, 0, 0, 1170, 0, 0, 0, 0, 146, 147, 17, 0, 148, 19, 20, 0, 1192, 0, 0, 1193, 0, 0, 1194, 0, 1195, 130, 0, 1196, 0, 0, 151, 152, 0, 153, 33, 0, 0, 0, 0, 1199, 1200, 1201, 1202, 1203, 1204, 0, 0, 0, 1205, 0, 43, 0, 432, 0, 0, 1206, 46, 0, 0, 434, 0, 0, 0, 0, 141, 0, 0, 0, 444, 445, 0, 0, 447, 448, 0, 0, 0, 0, 0, 0, 0, 158, 1239, 0, 0, 0, 0, 0, 817, 0, 0, 0, 1241, 0, 0, 0, 0, 1242, 0, 0, 457, 0, 0, 0, 0, 0, 0, 460, 461, 0, 145, 0, 463, 1262, 0, 0, 465, 0, 0, 0, 0, 0, 467, 0, 0, 469, 470, 0, 0, 0, 0, 0, 147, 1282, 0, 148, 1283, 20, 0, 1284, 481, 0, 1285, 0, 483, 0, 149, 0, 130, 486, 487, 0, 0, 151, 152, 0, 153, 0, 491, 492, 493, 133, 0, 1315, 0, 0, 0, 496, 1316, 0, 0, 1317, 0, 43, 0, 0, 0, 500, 0, 46, 0, 0, 128, 504, 0, 156, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 158, 0, 0, 0, 0, 0, 0, 903, 0, 0, 0, 0, 0, 549, 550, 0, 551, 0, 552, 0, 0, 0, 0, 0, 0, 0, 0, 0, -4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 560, 0, 0, 0, 0, 0, 0, 0, 561, 949, 12, 13, 14, 15, 16, 0, 0, 17, 18, 0, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 0, 29, -753, 30, 31, 0, 32, 0, 566, -754, 0, 33, 34, 35, 0, -754, 36, 0, 37, 38, 0, 39, -754, 40, 41, 42, -754, 43, 145, 44, -756, 45, 0, 46, 145, -751, -752, 47, 48, 0, 49, -755, 50, 51, 0, 0, 0, 0, 0, 0, 147, 0, 0, 148, 0, 20, 147, 52, 0, 148, 0, 0, 53, 0, 145, 0, 130, 0, 0, 0, 149, 151, 152, 0, 153, 0, 0, 151, 152, 0, 153, 0, 0, 0, 0, 133, 147, 647, 0, 148, 0, 43, 0, 0, 0, 0, 0, 46, 0, 0, 149, 0, 0, 156, 0, 141, 128, 151, 152, 156, 153, 0, 0, 0, 0, 133, 145, 0, 0, 0, 0, 0, 158, 0, 0, 0, 0, 0, 158, 810, 0, 0, 0, 1058, 0, 668, 128, 0, 147, 156, 0, 148, 0, 0, 0, 0, 0, 1059, 1060, 685, 1061, 0, 149, 1062, 0, 0, 0, 0, 158, 151, 152, 0, 153, 0, 0, 681, 0, 17, 0, 154, 19, 20, 0, 0, 1030, 0, 0, 0, 0, 0, 0, 0, 130, 0, 1063, 0, 0, 0, 128, 0, 0, 156, 34, 35, 0, 133, 0, 0, 37, 0, 0, 734, 0, 736, 0, 0, 0, 43, 0, 0, 158, 0, 0, 46, 0, 126, 0, 0, 48, 0, 0, 141, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 871, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 179, 0, 0, 0, 947, 948, 181, 310, 0, 0, 0, 0, 0, 0, 0, 952, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 0, 0, 0, 0, 0, 0, 682, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 179, 0, 0, 0, 0, 0, 181, 310, 0, 0, 0, 0, 0, 0, 1021, 1022, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 0, 0, 0, 0, 0, 0, 1128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1078, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1084, 0, 0, 0, 1088, 0, 0, 0, 0, 1091, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1099, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 179, 0, 0, 0, 0, 0, 0, 273, 0, 1154, 0, 0, 0, 0, 0, 1159, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 754, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 0, 248, 0, 0, 0, 0, 253, 0, 0, 0, 0, 258, 259, 260, 0, 0, 0, 0, 0, 0, 755, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1263, 0, 0, 1264, 179, 1265, 0, 1266, 180, 286, 181, 287, 288, 0, 0, 0, 289, 290, 291, 0, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 179, 0, 0, 0, 429, 286, 181, 287, 288, 0, 0, 0, 289, 290, 291, 0, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 179, 0, 0, 0, 180, 0, 181, 273, 0, 0, 0, 0, 0, 0, 0, 0, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 179, 0, 0, 0, 180, 0, 181, 310, 0, 0, 0, 0, 0, 0, 0, 0, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 179, 0, 0, 0, 0, 0, 181, 310, 0, 0, 477, 0, 0, 0, 0, 0, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 179, 0, 0, 0, 0, 0, 181, 310, 0, 0, 1236, 0, 0, 0, 0, 0, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 179, 0, 0, 0, 180, 0, 181, 0, 0, 0, 0, 0, 0, 0, 0, 0, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 179, 0, 0, 0, 0, 0, 181, 310, 0, 0, 0, 0, 0, 0, 0, 0, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260 }; static const yytype_int16 yycheck[] = { 4, 726, 89, 725, 726, 9, 10, 711, 12, 89, 14, 15, 105, 17, 18, 19, 20, 5, 22, 23, 724, 725, 89, 5, 28, 559, 89, 89, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 5, 722, 52, 706, 89, 86, 719, 89, 721, 5, 89, 724, 711, 8, 8, 89, 89, 725, 726, 393, 8, 75, 5, 17, 12, 724, 725, 96, 85, 56, 96, 23, 5, 8, 7, 84, 104, 64, 106, 8, 90, 99, 100, 101, 102, 103, 420, 105, 114, 1106, 1107, 109, 632, 111, 0, 82, 8, 115, 116, 89, 8, 76, 120, 121, 5, 123, 8, 766, 5, 17, 7, 5, 14, 7, 11, 42, 126, 11, 128, 5, 130, 7, 132, 133, 134, 135, 5, 104, 7, 8, 82, 141, 5, 143, 7, 145, 146, 147, 148, 149, 94, 151, 152, 153, 154, 81, 156, 157, 158, 107, 108, 107, 107, 88, 720, 87, 722, 723, 93, 177, 726, 727, 5, 92, 7, 104, 95, 106, 178, 9, 10, 104, 12, 106, 14, 15, 88, 17, 18, 19, 20, 93, 22, 23, 8, 5, 83, 7, 28, 70, 8, 11, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 14, 105, 52, 100, 101, 102, 103, 22, 104, 20, 106, 5, 109, 7, 111, 711, 31, 11, 115, 116, 31, 104, 104, 106, 121, 721, 123, 42, 724, 725, 719, 31, 721, 722, 683, 724, 725, 726, 8, 104, 21, 951, 42, 104, 104, 31, 107, 33, 34, 104, 31, 104, 67, 106, 88, 5, 67, 7, 73, 93, 88, 42, 73, 94, 95, 93, 97, 67, 5, 5, 7, 7, 8, 73, 11, 105, 126, 104, 128, 8, 130, 67, 132, 133, 134, 135, 67, 68, 17, 18, 105, 141, 73, 143, 105, 145, 146, 147, 148, 149, 81, 151, 152, 153, 154, 105, 156, 157, 158, 758, 759, 105, 104, 8, 106, 85, 104, 87, 106, 105, 90, 16, 17, 18, 105, 96, 104, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 107, 393, 27, 31, 393, 30, 31, 30, 84, 104, 86, 106, 474, 89, 1077, 91, 397, 42, 399, 400, 401, 31, 403, 104, 405, 106, 51, 31, 420, 107, 55, 420, 55, 104, 28, 106, 64, 31, 104, 67, 106, 414, 67, 71, 107, 108, 419, 31, 73, 104, 75, 1142, 104, 78, 77, 78, 81, 67, 42, 432, 103, 434, 1125, 67, 1127, 59, 31, 71, 33, 34, 107, 444, 445, 67, 447, 448, 5, 105, 7, 63, 105, 103, 105, 67, 457, 725, 726, 460, 461, 73, 463, 105, 465, 474, 467, 105, 469, 470, 1142, 1028, 31, 105, 67, 565, 105, 104, 1142, 106, 481, 5, 483, 1142, 108, 486, 487, 104, 4, 106, 491, 492, 493, 105, 503, 496, 104, 54, 106, 500, 104, 24, 106, 504, 97, 105, 1197, 1198, 67, 25, 26, 109, 105, 29, 104, 32, 106, 74, 104, 397, 106, 399, 400, 401, 40, 403, 104, 405, 106, 45, 104, 47, 48, 104, 50, 106, 105, 31, 628, 33, 34, 57, 105, 31, 60, 1236, 105, 85, 549, 550, 551, 552, 1010, 1011, 42, 645, 565, 1211, 31, 560, 561, 43, 44, 79, 37, 566, 1288, 1289, 1259, 1288, 104, 1290, 106, 67, 5, 1106, 7, 1268, 110, 67, 96, 111, 98, 395, 1141, 73, 104, 1288, 1289, 1290, 104, 62, 80, 104, 67, 1286, 14, 409, 410, 411, 412, 413, 107, 415, 416, 417, 418, 107, 112, 421, 422, 105, 104, 104, 106, 106, 5, 105, 7, 628, 107, 634, 11, 54, 1288, 503, 1290, 633, 104, 1288, 106, 1290, 105, 632, 635, 104, 645, 106, 722, 723, 1290, 393, 726, 74, 721, 397, 647, 399, 400, 401, 1207, 403, 104, 405, 106, 407, 127, 721, 129, 634, 131, 721, 752, 722, 723, 1142, 5, 726, 420, 140, 691, 142, 1142, 144, 104, 64, 106, 104, 104, 106, 106, 104, 683, 106, 685, 721, 687, 719, 721, 5, 722, 721, 724, 725, 726, 727, 721, 721, 104, 104, 106, 106, 703, 704, 175, 1092, 744, 1094, 652, 943, 654, 719, 720, 721, 722, 407, 724, 725, 726, 727, 728, 729, 722, 723, 946, 1208, 726, 727, 715, 729, 715, 964, 706, 734, 743, 736, 715, 738, 739, 706, 1103, 722, 723, 752, 706, 726, 727, 706, 729, 706, 1102, 503, 706, 762, 545, 711, 774, 758, 759, 89, 1192, 1123, 548, 719, 720, 721, 722, 723, 724, 725, 726, 727, 728, 729, 729, 726, 729, 711, 1094, 1111, 563, 715, 644, 717, 718, 719, 720, 721, 722, 723, 724, 725, 726, 727, 728, 729, 719, 720, 721, 722, 700, 724, 725, 726, 635, 728, 1244, 565, 742, 113, 744, 1288, 1289, 1290, 954, 702, 30, 31, 1288, 711, 1290, 1078, 735, 715, 4, 717, 718, 719, 720, 721, 722, 723, 724, 725, 726, 727, 728, 729, -1, 5, 177, 7, 702, -1, -1, 11, 26, 27, -1, 29, 742, 31, 744, 67, 27, -1, 687, 30, 24, 73, 40, 75, 42, -1, 78, 743, 32, 47, 48, -1, 50, -1, 703, 704, -1, 55, -1, 57, 51, 52, 53, -1, 55, -1, 762, 65, 31, 67, -1, -1, -1, 105, 4, 73, -1, 40, 76, 42, -1, 79, 45, 81, -1, -1, 77, 78, -1, 738, 739, -1, -1, 912, 913, 25, 26, 60, 96, 29, 98, 31, 49, 31, 67, 33, 34, 105, 55, -1, 73, -1, 42, 76, 105, 62, -1, 47, 48, 66, 50, -1, -1, -1, 694, -1, 1028, 57, 947, 948, -1, 761, -1, 952, 423, 424, 4, 67, -1, 67, -1, 27, 105, 73, 433, 31, 435, 436, -1, 79, -1, 81, -1, 442, 443, -1, 42, 446, 26, -1, -1, 29, -1, 31, -1, 51, 96, -1, 98, 55, -1, -1, 40, 743, -1, 105, -1, 105, -1, 47, 48, 67, 50, 1010, 1011, -1, -1, 73, -1, 57, -1, -1, 762, 1101, -1, 81, -1, 27, -1, 67, 30, -1, 1021, 1022, -1, 73, -1, 22, 76, -1, 1028, 79, -1, -1, -1, -1, -1, -1, -1, 105, -1, 51, 52, 53, 39, -1, -1, -1, 96, 1028, 98, -1, -1, -1, 49, -1, -1, 105, -1, 1058, 1059, 1060, 1061, 1062, 1063, 75, 61, 62, 78, -1, -1, 66, -1, -1, 69, -1, -1, -1, -1, 1078, 75, -1, -1, 912, 913, 1084, -1, -1, -1, 1088, -1, -1, 1091, -1, 1101, 105, 1102, 1103, -1, -1, 1099, -1, -1, -1, 573, -1, -1, 576, 577, -1, -1, 580, 1142, -1, 583, -1, -1, 586, -1, -1, 589, -1, -1, 592, 934, 935, 595, -1, -1, 598, -1, -1, 601, -1, -1, 604, 1142, -1, 607, -1, -1, 610, -1, -1, 613, 614, 615, 616, 1102, 1103, 619, -1, -1, 622, -1, 1154, 1244, -1, 27, -1, 1159, 30, 31, -1, -1, -1, -1, -1, -1, 638, 1102, 1103, -1, 42, 1106, 1107, -1, -1, -1, -1, -1, -1, 51, 52, 53, -1, 55, -1, -1, -1, -1, -1, -1, -1, -1, -1, 65, -1, 67, -1, 669, 670, 671, 1291, 1210, 1211, -1, -1, -1, 1141, 1142, 680, 1102, 1103, -1, -1, 1106, 1107, -1, -1, -1, 1220, -1, -1, 693, -1, -1, -1, -1, 1058, 1059, 1060, 1061, 1062, 1063, -1, 105, 1244, -1, 708, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1141, 1142, -1, 1208, -1, 1210, 1211, 1288, 1289, 1010, 1011, -1, -1, 1263, 1264, 1265, 1266, -1, -1, -1, 740, 741, -1, -1, 1207, 1208, 1209, 1210, 1211, -1, -1, -1, 1093, 1291, 1095, 1096, 1097, -1, -1, -1, -1, -1, -1, -1, -1, 765, -1, -1, 768, 769, 770, -1, -1, -1, -1, -1, -1, -1, -1, 779, -1, -1, -1, -1, -1, -1, 1207, 1208, 1209, 1210, 1211, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 805, 806, -1, -1, 809, -1, 1092, -1, 1094, 814, 815, 816, -1, -1, -1, -1, 821, -1, -1, -1, -1, -1, -1, 828, -1, -1, -1, -1, 833, -1, -1, -1, -1, -1, -1, 840, 841, 842, 843, 844, 845, 846, -1, -1, 849, -1, -1, -1, -1, 854, 855, 856, 857, 858, -1, 1220, -1, -1, 863, 864, -1, -1, 867, -1, -1, 870, -1, -1, -1, -1, 875, 876, -1, -1, 879, -1, -1, -1, -1, 884, -1, -1, -1, -1, 889, 890, 891, 892, 893, 894, 895, -1, -1, -1, -1, 900, -1, -1, -1, -1, -1, -1, 907, 908, 909, 910, 911, -1, -1, -1, -1, -1, -1, 918, 919, 920, 921, 284, -1, -1, -1, 926, 927, 928, 929, 930, 293, -1, -1, -1, 297, -1, 299, 4, 301, -1, 303, -1, -1, -1, 307, -1, -1, -1, -1, 312, -1, -1, -1, -1, -1, -1, 319, -1, 25, 26, 323, -1, 29, -1, 31, 328, -1, -1, -1, 332, -1, -1, -1, -1, 337, 42, 339, -1, -1, 342, 47, 48, -1, 50, 347, -1, -1, -1, 351, -1, 57, 354, -1, 356, -1, -1, -1, -1, 361, -1, 67, -1, 365, -1, -1, -1, 73, -1, 371, -1, -1, 374, 79, -1, 81, 378, -1, -1, 92, -1, -1, 384, -1, -1, 4, 388, -1, -1, 1029, 96, -1, 98, -1, -1, -1, -1, -1, 1038, 105, 1040, 1041, -1, -1, 1044, -1, -1, 26, 27, -1, 29, -1, 31, 1053, 1054, -1, -1, -1, -1, -1, -1, 40, -1, 42, 1064, -1, -1, 46, 47, 48, -1, 50, -1, 52, 53, -1, 55, 150, 57, 58, -1, -1, 155, 1083, -1, 1085, -1, -1, 67, -1, -1, -1, -1, -1, 73, -1, -1, 76, 77, -1, 79, -1, 81, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 96, -1, 98, -1, -1, -1, 4, -1, -1, 105, -1, -1, -1, -1, -1, -1, 1133, -1, -1, -1, -1, -1, 1139, -1, -1, -1, -1, 25, 26, 27, -1, 29, 30, 31, -1, 1152, -1, -1, 1155, -1, -1, 1158, -1, 1160, 42, -1, 1163, -1, -1, 47, 48, -1, 50, 51, -1, -1, -1, -1, 1175, 1176, 1177, 1178, 1179, 1180, -1, -1, -1, 1184, -1, 67, -1, 261, -1, -1, 1191, 73, -1, -1, 268, -1, -1, -1, -1, 81, -1, -1, -1, 277, 278, -1, -1, 281, 282, -1, -1, -1, -1, -1, -1, -1, 98, 1218, -1, -1, -1, -1, -1, 105, -1, -1, -1, 1228, -1, -1, -1, -1, 1233, -1, -1, 309, -1, -1, -1, -1, -1, -1, 316, 317, -1, 4, -1, 321, 1249, -1, -1, 325, -1, -1, -1, -1, -1, 331, -1, -1, 334, 335, -1, -1, -1, -1, -1, 26, 1269, -1, 29, 1272, 31, -1, 1275, 349, -1, 1278, -1, 353, -1, 40, -1, 42, 358, 359, -1, -1, 47, 48, -1, 50, -1, 367, 368, 369, 55, -1, 1299, -1, -1, -1, 376, 1304, -1, -1, 1307, -1, 67, -1, -1, -1, 386, -1, 73, -1, -1, 76, 392, -1, 79, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 98, -1, -1, -1, -1, -1, -1, 105, -1, -1, -1, -1, -1, 426, 427, -1, 429, -1, 431, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, 451, -1, -1, -1, -1, -1, -1, -1, 459, 749, 20, 21, 22, 23, 24, -1, -1, 27, 28, -1, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, -1, 41, 42, 43, 44, -1, 46, -1, 489, 49, -1, 51, 52, 53, -1, 55, 56, -1, 58, 59, -1, 61, 62, 63, 64, 65, 66, 67, 4, 69, 70, 71, -1, 73, 4, 75, 76, 77, 78, -1, 80, 81, 82, 83, -1, -1, -1, -1, -1, -1, 26, -1, -1, 29, -1, 31, 26, 97, -1, 29, -1, -1, 102, -1, 4, -1, 42, -1, -1, -1, 40, 47, 48, -1, 50, -1, -1, 47, 48, -1, 50, -1, -1, -1, -1, 55, 26, 568, -1, 29, -1, 67, -1, -1, -1, -1, -1, 73, -1, -1, 40, -1, -1, 79, -1, 81, 76, 47, 48, 79, 50, -1, -1, -1, -1, 55, 4, -1, -1, -1, -1, -1, 98, -1, -1, -1, -1, -1, 98, 105, -1, -1, -1, 21, -1, 105, 76, -1, 26, 79, -1, 29, -1, -1, -1, -1, -1, 35, 36, 630, 38, -1, 40, 41, -1, -1, -1, -1, 98, 47, 48, -1, 50, -1, -1, 105, -1, 27, -1, 57, 30, 31, -1, -1, 944, -1, -1, -1, -1, -1, -1, -1, 42, -1, 72, -1, -1, -1, 76, -1, -1, 79, 52, 53, -1, 55, -1, -1, 58, -1, -1, 682, -1, 684, -1, -1, -1, 67, -1, -1, 98, -1, -1, 73, -1, 75, -1, -1, 78, -1, -1, 81, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 105, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 4, -1, -1, -1, 746, 747, 10, 11, -1, -1, -1, -1, -1, -1, -1, 757, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, -1, -1, -1, -1, -1, -1, 105, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 4, -1, -1, -1, -1, -1, 10, 11, -1, -1, -1, -1, -1, -1, 932, 933, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, -1, -1, -1, -1, -1, -1, 105, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1034, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1048, -1, -1, -1, 1052, -1, -1, -1, -1, 1057, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1076, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 4, -1, -1, -1, -1, -1, -1, 11, -1, 1112, -1, -1, -1, -1, -1, 1118, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, -1, 86, -1, -1, -1, -1, 91, -1, -1, -1, -1, 96, 97, 98, -1, -1, -1, -1, -1, -1, 105, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1250, -1, -1, 1253, 4, 1255, -1, 1257, 8, 9, 10, 11, 12, -1, -1, -1, 16, 17, 18, -1, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 4, -1, -1, -1, 8, 9, 10, 11, 12, -1, -1, -1, 16, 17, 18, -1, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 4, -1, -1, -1, 8, -1, 10, 11, -1, -1, -1, -1, -1, -1, -1, -1, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 4, -1, -1, -1, 8, -1, 10, 11, -1, -1, -1, -1, -1, -1, -1, -1, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 4, -1, -1, -1, -1, -1, 10, 11, -1, -1, 14, -1, -1, -1, -1, -1, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 4, -1, -1, -1, -1, -1, 10, 11, -1, -1, 14, -1, -1, -1, -1, -1, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 4, -1, -1, -1, 8, -1, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 4, -1, -1, -1, -1, -1, 10, 11, -1, -1, -1, -1, -1, -1, -1, -1, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98 }; /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing symbol of state STATE-NUM. */ static const yytype_uint16 yystos[] = { 0, 114, 120, 124, 419, 441, 0, 5, 7, 54, 74, 418, 20, 21, 22, 23, 24, 27, 28, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 41, 43, 44, 46, 51, 52, 53, 56, 58, 59, 61, 63, 64, 65, 67, 69, 71, 73, 77, 78, 80, 82, 83, 97, 102, 130, 140, 142, 144, 147, 149, 151, 153, 170, 176, 190, 202, 214, 222, 227, 229, 238, 241, 243, 245, 247, 339, 342, 345, 347, 350, 353, 359, 362, 430, 431, 432, 433, 434, 435, 436, 437, 438, 418, 420, 420, 420, 420, 420, 420, 420, 420, 420, 420, 420, 420, 420, 420, 420, 420, 420, 420, 420, 420, 420, 420, 420, 420, 420, 420, 420, 420, 420, 420, 420, 402, 402, 75, 194, 76, 192, 42, 183, 49, 55, 62, 66, 204, 209, 224, 356, 440, 81, 335, 70, 157, 4, 25, 26, 29, 40, 45, 47, 48, 50, 57, 60, 79, 96, 98, 249, 254, 257, 261, 264, 267, 273, 277, 279, 283, 299, 304, 305, 307, 308, 310, 439, 407, 420, 419, 4, 8, 10, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 115, 116, 119, 395, 425, 426, 427, 428, 123, 395, 169, 395, 11, 116, 189, 397, 428, 429, 141, 395, 88, 93, 116, 338, 340, 9, 11, 12, 16, 17, 18, 116, 148, 422, 424, 425, 246, 422, 150, 422, 242, 422, 240, 422, 17, 116, 201, 203, 390, 11, 116, 361, 363, 396, 425, 88, 93, 116, 341, 343, 94, 116, 349, 351, 390, 18, 116, 346, 348, 390, 391, 129, 422, 92, 95, 116, 352, 354, 146, 422, 116, 226, 371, 372, 373, 116, 237, 239, 391, 116, 143, 394, 428, 344, 422, 152, 422, 88, 93, 116, 228, 230, 12, 116, 139, 160, 85, 87, 90, 116, 175, 177, 116, 358, 360, 369, 396, 244, 422, 16, 17, 18, 116, 221, 223, 392, 393, 213, 422, 403, 418, 429, 420, 402, 420, 402, 420, 402, 420, 420, 208, 420, 420, 402, 420, 402, 420, 402, 420, 420, 420, 420, 420, 419, 420, 420, 420, 420, 419, 420, 420, 420, 104, 104, 402, 104, 106, 417, 8, 408, 424, 419, 104, 419, 104, 104, 106, 171, 104, 106, 398, 399, 401, 419, 419, 104, 419, 419, 398, 398, 423, 398, 398, 398, 398, 398, 419, 398, 364, 419, 419, 398, 419, 398, 419, 398, 419, 398, 419, 419, 398, 398, 398, 107, 374, 375, 14, 377, 378, 398, 419, 398, 419, 398, 398, 419, 419, 398, 161, 398, 419, 419, 419, 398, 398, 419, 370, 398, 398, 419, 398, 404, 405, 419, 195, 397, 191, 395, 182, 422, 205, 422, 355, 422, 210, 422, 365, 422, 334, 422, 155, 160, 276, 395, 272, 395, 266, 395, 253, 395, 248, 395, 258, 395, 260, 395, 263, 395, 309, 395, 282, 397, 298, 395, 278, 395, 402, 402, 419, 419, 419, 419, 117, 402, 402, 402, 402, 402, 402, 419, 419, 396, 376, 107, 379, 419, 368, 104, 106, 410, 406, 422, 104, 106, 196, 104, 104, 106, 184, 104, 106, 206, 104, 106, 357, 104, 106, 211, 104, 106, 225, 104, 106, 336, 104, 106, 158, 104, 106, 280, 104, 106, 274, 104, 106, 268, 104, 106, 255, 104, 106, 250, 104, 104, 104, 104, 106, 311, 104, 106, 284, 104, 106, 302, 280, 306, 306, 416, 409, 103, 121, 122, 125, 126, 83, 173, 105, 400, 144, 382, 374, 378, 380, 396, 107, 366, 419, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 105, 192, 249, 252, 254, 257, 261, 264, 267, 277, 279, 283, 356, 105, 105, 396, 103, 419, 131, 82, 128, 130, 144, 131, 128, 142, 420, 105, 402, 105, 108, 381, 382, 396, 411, 24, 32, 197, 198, 215, 217, 231, 233, 193, 105, 207, 207, 212, 207, 337, 159, 281, 275, 269, 256, 251, 259, 262, 265, 312, 285, 303, 402, 402, 402, 402, 419, 407, 419, 8, 43, 44, 132, 136, 145, 420, 145, 402, 88, 93, 116, 172, 174, 5, 421, 375, 54, 105, 403, 412, 414, 415, 427, 420, 420, 105, 190, 199, 200, 202, 204, 209, 224, 227, 229, 402, 232, 105, 151, 153, 176, 194, 245, 247, 105, 151, 153, 241, 243, 105, 105, 151, 153, 214, 241, 243, 105, 105, 151, 153, 105, 151, 153, 105, 151, 153, 176, 183, 335, 339, 342, 356, 105, 151, 153, 176, 183, 252, 335, 105, 151, 153, 176, 183, 247, 254, 257, 261, 264, 267, 270, 271, 273, 277, 279, 335, 339, 342, 105, 151, 153, 176, 183, 192, 249, 252, 299, 310, 335, 339, 345, 356, 105, 151, 153, 176, 192, 249, 252, 299, 310, 105, 151, 153, 176, 183, 194, 245, 247, 335, 339, 342, 356, 105, 151, 153, 176, 183, 194, 245, 247, 335, 339, 347, 350, 353, 356, 105, 151, 153, 176, 183, 192, 249, 252, 299, 310, 335, 339, 347, 350, 353, 356, 359, 362, 105, 151, 153, 176, 183, 192, 249, 252, 356, 21, 68, 105, 151, 153, 176, 183, 288, 293, 335, 105, 151, 153, 176, 183, 192, 249, 305, 308, 417, 8, 118, 420, 420, 402, 402, 147, 149, 151, 153, 154, 156, 127, 422, 154, 419, 419, 398, 383, 396, 419, 407, 407, 234, 395, 218, 422, 402, 194, 402, 402, 402, 216, 233, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 402, 420, 420, 402, 402, 402, 402, 402, 402, 402, 402, 402, 419, 419, 133, 395, 135, 395, 162, 163, 157, 398, 162, 109, 421, 104, 106, 413, 413, 104, 106, 235, 104, 106, 219, 217, 116, 291, 292, 369, 116, 286, 287, 369, 104, 104, 106, 137, 105, 21, 35, 36, 38, 41, 72, 164, 166, 179, 186, 192, 249, 252, 296, 301, 310, 314, 402, 105, 110, 419, 402, 402, 402, 402, 104, 419, 104, 106, 289, 419, 402, 402, 419, 420, 420, 420, 420, 420, 420, 402, 419, 421, 416, 236, 220, 402, 402, 134, 138, 116, 294, 295, 366, 367, 165, 395, 116, 313, 316, 367, 178, 395, 185, 395, 300, 395, 85, 384, 389, 105, 105, 151, 153, 176, 183, 238, 105, 151, 153, 176, 183, 222, 297, 290, 105, 140, 144, 151, 153, 105, 140, 151, 153, 104, 368, 419, 104, 106, 167, 104, 419, 104, 106, 180, 104, 106, 187, 302, 421, 421, 402, 402, 105, 151, 153, 176, 183, 252, 273, 299, 310, 335, 105, 151, 153, 183, 247, 339, 342, 345, 347, 350, 356, 402, 402, 402, 402, 402, 111, 107, 402, 402, 402, 402, 402, 402, 402, 402, 297, 168, 315, 181, 188, 421, 421, 105, 105, 151, 153, 170, 176, 37, 105, 151, 153, 105, 151, 153, 176, 183, 105, 151, 153, 176, 183, 190, 112, 14, 385, 386, 402, 420, 402, 402, 421, 387, 84, 86, 89, 91, 317, 318, 319, 321, 322, 323, 326, 327, 330, 331, 107, 386, 396, 402, 419, 419, 419, 419, 421, 388, 104, 106, 320, 104, 106, 324, 104, 106, 328, 104, 106, 332, 421, 402, 402, 402, 402, 107, 105, 325, 329, 333, 421, 105, 245, 247, 339, 342, 347, 350, 356, 359, 105, 245, 247, 356, 359, 105, 194, 245, 247, 339, 342, 347, 350, 396, 402, 402, 402 }; /* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */ static const yytype_uint16 yyr1[] = { 0, 113, 114, 114, 114, 115, 116, 117, 118, 117, 119, 120, 121, 122, 122, 122, 122, 123, 124, 125, 126, 126, 126, 127, 128, 129, 130, 131, 131, 131, 132, 133, 134, 134, 134, 134, 134, 135, 136, 137, 137, 138, 138, 138, 138, 139, 140, 141, 142, 143, 144, 145, 145, 145, 145, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 156, 157, 158, 158, 159, 159, 159, 161, 160, 160, 162, 163, 163, 164, 164, 164, 164, 164, 164, 164, 164, 164, 164, 165, 166, 167, 167, 168, 168, 168, 168, 168, 169, 170, 171, 171, 172, 173, 173, 174, 174, 174, 175, 176, 177, 177, 177, 177, 178, 179, 180, 180, 181, 181, 181, 181, 181, 182, 183, 184, 184, 185, 186, 187, 187, 188, 188, 188, 188, 188, 188, 189, 190, 191, 192, 193, 193, 193, 193, 193, 193, 193, 194, 195, 196, 196, 197, 197, 197, 198, 198, 198, 198, 198, 198, 198, 198, 198, 199, 200, 201, 202, 203, 203, 204, 205, 206, 206, 207, 207, 207, 207, 207, 208, 209, 210, 211, 211, 212, 212, 212, 212, 212, 212, 213, 214, 215, 216, 216, 217, 218, 219, 219, 220, 220, 220, 220, 220, 220, 221, 222, 223, 223, 224, 225, 225, 226, 227, 228, 229, 230, 230, 230, 231, 232, 232, 233, 234, 235, 235, 236, 236, 236, 236, 236, 236, 237, 238, 239, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 250, 251, 251, 251, 251, 251, 251, 251, 251, 251, 252, 252, 252, 252, 252, 252, 252, 252, 253, 254, 255, 255, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 257, 258, 259, 259, 259, 259, 259, 259, 259, 259, 259, 259, 259, 259, 260, 261, 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, 263, 264, 265, 265, 265, 265, 265, 265, 265, 265, 265, 265, 265, 265, 265, 265, 265, 265, 265, 265, 266, 267, 268, 268, 269, 269, 269, 269, 269, 269, 269, 269, 269, 269, 270, 270, 271, 271, 271, 271, 271, 271, 271, 272, 273, 274, 274, 275, 275, 275, 275, 275, 275, 275, 276, 277, 278, 279, 280, 280, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 283, 284, 284, 285, 285, 285, 285, 285, 285, 285, 285, 286, 286, 287, 288, 289, 289, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 291, 291, 292, 293, 294, 294, 295, 296, 297, 297, 297, 297, 297, 297, 297, 297, 297, 297, 298, 299, 300, 301, 302, 302, 303, 303, 303, 303, 303, 303, 303, 303, 303, 304, 305, 306, 306, 306, 306, 306, 307, 308, 309, 310, 311, 311, 312, 312, 312, 312, 312, 312, 312, 312, 312, 313, 314, 315, 315, 315, 315, 316, 316, 317, 317, 318, 319, 320, 320, 321, 321, 321, 322, 323, 324, 324, 325, 325, 325, 325, 325, 325, 325, 325, 325, 326, 327, 328, 328, 329, 329, 329, 329, 329, 330, 331, 332, 332, 333, 333, 333, 333, 333, 333, 333, 333, 334, 335, 336, 336, 337, 337, 337, 338, 339, 340, 340, 340, 341, 342, 343, 343, 343, 344, 345, 346, 347, 348, 348, 349, 350, 351, 351, 351, 352, 353, 354, 354, 354, 355, 356, 357, 357, 358, 359, 360, 360, 361, 362, 364, 363, 363, 365, 366, 367, 368, 368, 370, 369, 372, 371, 373, 371, 371, 374, 375, 376, 376, 377, 378, 379, 379, 380, 381, 381, 382, 382, 383, 384, 385, 386, 387, 387, 388, 388, 389, 390, 391, 391, 392, 392, 393, 393, 394, 394, 395, 395, 396, 396, 397, 397, 397, 398, 398, 399, 400, 401, 402, 402, 402, 403, 404, 405, 406, 406, 407, 407, 408, 408, 408, 409, 409, 410, 410, 411, 411, 412, 412, 412, 413, 413, 414, 415, 416, 416, 417, 417, 418, 418, 419, 419, 420, 421, 421, 423, 422, 422, 424, 424, 424, 424, 424, 424, 424, 425, 425, 425, 426, 426, 426, 426, 426, 426, 426, 426, 426, 426, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 438, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 439, 440, 440, 440, 440, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441, 441 }; /* YYR2[YYN] -- Number of symbols on the right hand side of rule YYN. */ static const yytype_uint8 yyr2[] = { 0, 2, 1, 1, 1, 1, 3, 0, 0, 6, 1, 13, 1, 0, 2, 2, 2, 1, 13, 1, 0, 2, 3, 1, 4, 1, 4, 0, 3, 3, 7, 1, 0, 2, 2, 2, 2, 1, 4, 1, 4, 0, 2, 2, 2, 1, 4, 1, 7, 1, 4, 0, 2, 2, 2, 2, 1, 4, 1, 4, 1, 4, 1, 4, 1, 1, 0, 3, 4, 1, 4, 0, 2, 2, 0, 3, 1, 1, 0, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 1, 4, 0, 3, 2, 2, 2, 1, 4, 1, 4, 1, 0, 4, 2, 2, 1, 1, 4, 2, 2, 2, 1, 1, 4, 1, 4, 0, 3, 2, 2, 2, 1, 4, 1, 3, 1, 4, 1, 4, 0, 2, 3, 2, 2, 2, 1, 4, 1, 7, 0, 3, 2, 2, 2, 2, 2, 4, 1, 1, 4, 1, 1, 1, 0, 2, 2, 2, 3, 3, 2, 3, 3, 2, 0, 1, 4, 2, 1, 4, 1, 1, 4, 0, 2, 2, 2, 2, 1, 4, 1, 1, 4, 0, 2, 2, 2, 2, 2, 1, 4, 3, 0, 3, 4, 1, 1, 4, 0, 3, 2, 2, 2, 2, 1, 4, 2, 1, 4, 1, 4, 1, 4, 1, 4, 2, 2, 1, 2, 0, 2, 5, 1, 1, 4, 0, 3, 2, 2, 2, 2, 1, 4, 2, 1, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 0, 2, 2, 2, 3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 1, 4, 0, 3, 3, 3, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 7, 1, 0, 3, 3, 3, 2, 3, 2, 2, 2, 2, 2, 2, 1, 7, 0, 3, 3, 3, 2, 2, 3, 2, 2, 2, 2, 2, 2, 2, 1, 7, 0, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 1, 4, 1, 4, 0, 3, 3, 2, 2, 2, 2, 2, 2, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 1, 4, 0, 3, 3, 2, 2, 2, 3, 1, 4, 1, 4, 1, 4, 0, 3, 3, 3, 2, 2, 2, 2, 2, 1, 4, 1, 4, 0, 3, 3, 2, 2, 2, 3, 3, 2, 1, 1, 4, 1, 4, 0, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 7, 2, 1, 1, 7, 0, 3, 3, 2, 2, 2, 3, 3, 3, 3, 1, 4, 1, 4, 1, 4, 0, 3, 2, 2, 2, 3, 3, 3, 3, 2, 5, 0, 3, 3, 3, 3, 2, 5, 1, 4, 1, 4, 0, 3, 3, 2, 2, 2, 3, 3, 3, 1, 7, 0, 2, 2, 5, 2, 1, 1, 1, 1, 3, 1, 3, 1, 1, 1, 1, 3, 1, 4, 0, 2, 3, 2, 2, 2, 2, 2, 2, 1, 3, 1, 4, 0, 2, 3, 2, 2, 1, 3, 1, 4, 0, 3, 2, 2, 2, 2, 2, 2, 1, 4, 1, 4, 0, 2, 2, 1, 4, 2, 2, 1, 1, 4, 2, 2, 1, 1, 4, 1, 4, 2, 1, 1, 4, 2, 2, 1, 1, 4, 2, 2, 1, 1, 4, 1, 4, 1, 4, 2, 1, 1, 4, 0, 3, 1, 1, 2, 2, 0, 2, 0, 3, 0, 2, 0, 2, 1, 3, 2, 0, 2, 3, 2, 0, 2, 2, 0, 2, 0, 5, 5, 5, 4, 4, 0, 2, 0, 5, 5, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 2, 2, 1, 2, 4, 1, 1, 1, 0, 2, 2, 3, 2, 1, 0, 1, 0, 2, 0, 2, 3, 0, 5, 1, 4, 0, 3, 1, 3, 3, 1, 4, 1, 1, 0, 4, 2, 5, 1, 1, 0, 2, 2, 0, 1, 0, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 3, 4, 4, 4, 4, 4 }; #define yyerrok (yyerrstatus = 0) #define yyclearin (yychar = YYEMPTY) #define YYEMPTY (-2) #define YYEOF 0 #define YYACCEPT goto yyacceptlab #define YYABORT goto yyabortlab #define YYERROR goto yyerrorlab #define YYRECOVERING() (!!yyerrstatus) #define YYBACKUP(Token, Value) \ do \ if (yychar == YYEMPTY) \ { \ yychar = (Token); \ yylval = (Value); \ YYPOPSTACK (yylen); \ yystate = *yyssp; \ goto yybackup; \ } \ else \ { \ yyerror (&yylloc, scanner, param, YY_("syntax error: cannot back up")); \ YYERROR; \ } \ while (0) /* Error token number */ #define YYTERROR 1 #define YYERRCODE 256 /* YYLLOC_DEFAULT -- Set CURRENT to span from RHS[1] to RHS[N]. If N is 0, then set CURRENT to the empty location which ends the previous symbol: RHS[0] (always defined). */ #ifndef YYLLOC_DEFAULT # define YYLLOC_DEFAULT(Current, Rhs, N) \ do \ if (N) \ { \ (Current).first_line = YYRHSLOC (Rhs, 1).first_line; \ (Current).first_column = YYRHSLOC (Rhs, 1).first_column; \ (Current).last_line = YYRHSLOC (Rhs, N).last_line; \ (Current).last_column = YYRHSLOC (Rhs, N).last_column; \ } \ else \ { \ (Current).first_line = (Current).last_line = \ YYRHSLOC (Rhs, 0).last_line; \ (Current).first_column = (Current).last_column = \ YYRHSLOC (Rhs, 0).last_column; \ } \ while (0) #endif #define YYRHSLOC(Rhs, K) ((Rhs)[K]) /* Enable debugging if requested. */ #if YYDEBUG # ifndef YYFPRINTF # include <stdio.h> /* INFRINGES ON USER NAME SPACE */ # define YYFPRINTF fprintf # endif # define YYDPRINTF(Args) \ do { \ if (yydebug) \ YYFPRINTF Args; \ } while (0) /* YY_LOCATION_PRINT -- Print the location on the stream. This macro was not mandated originally: define only if we know we won't break user code: when these are the locations we know. */ #ifndef YY_LOCATION_PRINT # if defined YYLTYPE_IS_TRIVIAL && YYLTYPE_IS_TRIVIAL /* Print *YYLOCP on YYO. Private, do not rely on its existence. */ YY_ATTRIBUTE_UNUSED static int yy_location_print_ (FILE *yyo, YYLTYPE const * const yylocp) { int res = 0; int end_col = 0 != yylocp->last_column ? yylocp->last_column - 1 : 0; if (0 <= yylocp->first_line) { res += YYFPRINTF (yyo, "%d", yylocp->first_line); if (0 <= yylocp->first_column) res += YYFPRINTF (yyo, ".%d", yylocp->first_column); } if (0 <= yylocp->last_line) { if (yylocp->first_line < yylocp->last_line) { res += YYFPRINTF (yyo, "-%d", yylocp->last_line); if (0 <= end_col) res += YYFPRINTF (yyo, ".%d", end_col); } else if (0 <= end_col && yylocp->first_column < end_col) res += YYFPRINTF (yyo, "-%d", end_col); } return res; } # define YY_LOCATION_PRINT(File, Loc) \ yy_location_print_ (File, &(Loc)) # else # define YY_LOCATION_PRINT(File, Loc) ((void) 0) # endif #endif # define YY_SYMBOL_PRINT(Title, Type, Value, Location) \ do { \ if (yydebug) \ { \ YYFPRINTF (stderr, "%s ", Title); \ yy_symbol_print (stderr, \ Type, Value, Location, scanner, param); \ YYFPRINTF (stderr, "\n"); \ } \ } while (0) /*-----------------------------------. | Print this symbol's value on YYO. | `-----------------------------------*/ static void yy_symbol_value_print (FILE *yyo, int yytype, YYSTYPE const * const yyvaluep, YYLTYPE const * const yylocationp, void *scanner, struct yang_parameter *param) { FILE *yyoutput = yyo; YYUSE (yyoutput); YYUSE (yylocationp); YYUSE (scanner); YYUSE (param); if (!yyvaluep) return; # ifdef YYPRINT if (yytype < YYNTOKENS) YYPRINT (yyo, yytoknum[yytype], *yyvaluep); # endif YYUSE (yytype); } /*---------------------------. | Print this symbol on YYO. | `---------------------------*/ static void yy_symbol_print (FILE *yyo, int yytype, YYSTYPE const * const yyvaluep, YYLTYPE const * const yylocationp, void *scanner, struct yang_parameter *param) { YYFPRINTF (yyo, "%s %s (", yytype < YYNTOKENS ? "token" : "nterm", yytname[yytype]); YY_LOCATION_PRINT (yyo, *yylocationp); YYFPRINTF (yyo, ": "); yy_symbol_value_print (yyo, yytype, yyvaluep, yylocationp, scanner, param); YYFPRINTF (yyo, ")"); } /*------------------------------------------------------------------. | yy_stack_print -- Print the state stack from its BOTTOM up to its | | TOP (included). | `------------------------------------------------------------------*/ static void yy_stack_print (yytype_int16 *yybottom, yytype_int16 *yytop) { YYFPRINTF (stderr, "Stack now"); for (; yybottom <= yytop; yybottom++) { int yybot = *yybottom; YYFPRINTF (stderr, " %d", yybot); } YYFPRINTF (stderr, "\n"); } # define YY_STACK_PRINT(Bottom, Top) \ do { \ if (yydebug) \ yy_stack_print ((Bottom), (Top)); \ } while (0) /*------------------------------------------------. | Report that the YYRULE is going to be reduced. | `------------------------------------------------*/ static void yy_reduce_print (yytype_int16 *yyssp, YYSTYPE *yyvsp, YYLTYPE *yylsp, int yyrule, void *scanner, struct yang_parameter *param) { unsigned long yylno = yyrline[yyrule]; int yynrhs = yyr2[yyrule]; int yyi; YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n", yyrule - 1, yylno); /* The symbols being reduced. */ for (yyi = 0; yyi < yynrhs; yyi++) { YYFPRINTF (stderr, " $%d = ", yyi + 1); yy_symbol_print (stderr, yystos[yyssp[yyi + 1 - yynrhs]], &(yyvsp[(yyi + 1) - (yynrhs)]) , &(yylsp[(yyi + 1) - (yynrhs)]) , scanner, param); YYFPRINTF (stderr, "\n"); } } # define YY_REDUCE_PRINT(Rule) \ do { \ if (yydebug) \ yy_reduce_print (yyssp, yyvsp, yylsp, Rule, scanner, param); \ } while (0) /* Nonzero means print parse trace. It is left uninitialized so that multiple parsers can coexist. */ int yydebug; #else /* !YYDEBUG */ # define YYDPRINTF(Args) # define YY_SYMBOL_PRINT(Title, Type, Value, Location) # define YY_STACK_PRINT(Bottom, Top) # define YY_REDUCE_PRINT(Rule) #endif /* !YYDEBUG */ /* YYINITDEPTH -- initial size of the parser's stacks. */ #ifndef YYINITDEPTH # define YYINITDEPTH 200 #endif /* YYMAXDEPTH -- maximum size the stacks can grow to (effective only if the built-in stack extension method is used). Do not make this value too large; the results are undefined if YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH) evaluated with infinite-precision integer arithmetic. */ #ifndef YYMAXDEPTH # define YYMAXDEPTH 10000 #endif #if YYERROR_VERBOSE # ifndef yystrlen # if defined __GLIBC__ && defined _STRING_H # define yystrlen strlen # else /* Return the length of YYSTR. */ static YYSIZE_T yystrlen (const char *yystr) { YYSIZE_T yylen; for (yylen = 0; yystr[yylen]; yylen++) continue; return yylen; } # endif # endif # ifndef yystpcpy # if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE # define yystpcpy stpcpy # else /* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in YYDEST. */ static char * yystpcpy (char *yydest, const char *yysrc) { char *yyd = yydest; const char *yys = yysrc; while ((*yyd++ = *yys++) != '\0') continue; return yyd - 1; } # endif # endif # ifndef yytnamerr /* Copy to YYRES the contents of YYSTR after stripping away unnecessary quotes and backslashes, so that it's suitable for yyerror. The heuristic is that double-quoting is unnecessary unless the string contains an apostrophe, a comma, or backslash (other than backslash-backslash). YYSTR is taken from yytname. If YYRES is null, do not copy; instead, return the length of what the result would have been. */ static YYSIZE_T yytnamerr (char *yyres, const char *yystr) { if (*yystr == '"') { YYSIZE_T yyn = 0; char const *yyp = yystr; for (;;) switch (*++yyp) { case '\'': case ',': goto do_not_strip_quotes; case '\\': if (*++yyp != '\\') goto do_not_strip_quotes; /* Fall through. */ default: if (yyres) yyres[yyn] = *yyp; yyn++; break; case '"': if (yyres) yyres[yyn] = '\0'; return yyn; } do_not_strip_quotes: ; } if (! yyres) return yystrlen (yystr); return (YYSIZE_T) (yystpcpy (yyres, yystr) - yyres); } # endif /* Copy into *YYMSG, which is of size *YYMSG_ALLOC, an error message about the unexpected token YYTOKEN for the state stack whose top is YYSSP. Return 0 if *YYMSG was successfully written. Return 1 if *YYMSG is not large enough to hold the message. In that case, also set *YYMSG_ALLOC to the required number of bytes. Return 2 if the required number of bytes is too large to store. */ static int yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg, yytype_int16 *yyssp, int yytoken) { YYSIZE_T yysize0 = yytnamerr (YY_NULLPTR, yytname[yytoken]); YYSIZE_T yysize = yysize0; enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 }; /* Internationalized format string. */ const char *yyformat = YY_NULLPTR; /* Arguments of yyformat. */ char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM]; /* Number of reported tokens (one for the "unexpected", one per "expected"). */ int yycount = 0; /* There are many possibilities here to consider: - If this state is a consistent state with a default action, then the only way this function was invoked is if the default action is an error action. In that case, don't check for expected tokens because there are none. - The only way there can be no lookahead present (in yychar) is if this state is a consistent state with a default action. Thus, detecting the absence of a lookahead is sufficient to determine that there is no unexpected or expected token to report. In that case, just report a simple "syntax error". - Don't assume there isn't a lookahead just because this state is a consistent state with a default action. There might have been a previous inconsistent state, consistent state with a non-default action, or user semantic action that manipulated yychar. - Of course, the expected token list depends on states to have correct lookahead information, and it depends on the parser not to perform extra reductions after fetching a lookahead from the scanner and before detecting a syntax error. Thus, state merging (from LALR or IELR) and default reductions corrupt the expected token list. However, the list is correct for canonical LR with one exception: it will still contain any token that will not be accepted due to an error action in a later state. */ if (yytoken != YYEMPTY) { int yyn = yypact[*yyssp]; yyarg[yycount++] = yytname[yytoken]; if (!yypact_value_is_default (yyn)) { /* Start YYX at -YYN if negative to avoid negative indexes in YYCHECK. In other words, skip the first -YYN actions for this state because they are default actions. */ int yyxbegin = yyn < 0 ? -yyn : 0; /* Stay within bounds of both yycheck and yytname. */ int yychecklim = YYLAST - yyn + 1; int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS; int yyx; for (yyx = yyxbegin; yyx < yyxend; ++yyx) if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR && !yytable_value_is_error (yytable[yyx + yyn])) { if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM) { yycount = 1; yysize = yysize0; break; } yyarg[yycount++] = yytname[yyx]; { YYSIZE_T yysize1 = yysize + yytnamerr (YY_NULLPTR, yytname[yyx]); if (! (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM)) return 2; yysize = yysize1; } } } } switch (yycount) { # define YYCASE_(N, S) \ case N: \ yyformat = S; \ break default: /* Avoid compiler warnings. */ YYCASE_(0, YY_("syntax error")); YYCASE_(1, YY_("syntax error, unexpected %s")); YYCASE_(2, YY_("syntax error, unexpected %s, expecting %s")); YYCASE_(3, YY_("syntax error, unexpected %s, expecting %s or %s")); YYCASE_(4, YY_("syntax error, unexpected %s, expecting %s or %s or %s")); YYCASE_(5, YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s")); # undef YYCASE_ } { YYSIZE_T yysize1 = yysize + yystrlen (yyformat); if (! (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM)) return 2; yysize = yysize1; } if (*yymsg_alloc < yysize) { *yymsg_alloc = 2 * yysize; if (! (yysize <= *yymsg_alloc && *yymsg_alloc <= YYSTACK_ALLOC_MAXIMUM)) *yymsg_alloc = YYSTACK_ALLOC_MAXIMUM; return 1; } /* Avoid sprintf, as that infringes on the user's name space. Don't have undefined behavior even if the translation produced a string with the wrong number of "%s"s. */ { char *yyp = *yymsg; int yyi = 0; while ((*yyp = *yyformat) != '\0') if (*yyp == '%' && yyformat[1] == 's' && yyi < yycount) { yyp += yytnamerr (yyp, yyarg[yyi++]); yyformat += 2; } else { yyp++; yyformat++; } } return 0; } #endif /* YYERROR_VERBOSE */ /*-----------------------------------------------. | Release the memory associated to this symbol. | `-----------------------------------------------*/ static void yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep, YYLTYPE *yylocationp, void *scanner, struct yang_parameter *param) { YYUSE (yyvaluep); YYUSE (yylocationp); YYUSE (scanner); YYUSE (param); if (!yymsg) yymsg = "Deleting"; YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp); YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN switch (yytype) { case 115: /* tmp_string */ { free((((*yyvaluep).p_str)) ? *((*yyvaluep).p_str) : NULL); } break; case 210: /* pattern_arg_str */ { free(((*yyvaluep).str)); } break; case 399: /* semicolom */ { free(((*yyvaluep).str)); } break; case 401: /* curly_bracket_open */ { free(((*yyvaluep).str)); } break; case 405: /* string_opt_part1 */ { free(((*yyvaluep).str)); } break; case 430: /* type_ext_alloc */ { yang_type_free(param->module->ctx, ((*yyvaluep).v)); } break; case 431: /* typedef_ext_alloc */ { yang_type_free(param->module->ctx, &((struct lys_tpdf *)((*yyvaluep).v))->type); } break; default: break; } YY_IGNORE_MAYBE_UNINITIALIZED_END } /*----------. | yyparse. | `----------*/ int yyparse (void *scanner, struct yang_parameter *param) { /* The lookahead symbol. */ int yychar; char *s = NULL, *tmp_s = NULL, *ext_name = NULL; struct lys_module *trg = NULL; struct lys_node *tpdf_parent = NULL, *data_node = NULL; struct lys_ext_instance_complex *ext_instance = NULL; int is_ext_instance; void *actual = NULL; enum yytokentype backup_type, actual_type = MODULE_KEYWORD; int64_t cnt_val = 0; int is_value = 0; void *yang_type = NULL; /* The semantic value of the lookahead symbol. */ /* Default value used for initialization, for pacifying older GCCs or non-GCC compilers. */ YY_INITIAL_VALUE (static YYSTYPE yyval_default;) YYSTYPE yylval YY_INITIAL_VALUE (= yyval_default); /* Location data for the lookahead symbol. */ static YYLTYPE yyloc_default # if defined YYLTYPE_IS_TRIVIAL && YYLTYPE_IS_TRIVIAL = { 1, 1, 1, 1 } # endif ; YYLTYPE yylloc = yyloc_default; /* Number of syntax errors so far. */ int yynerrs; int yystate; /* Number of tokens to shift before error messages enabled. */ int yyerrstatus; /* The stacks and their tools: 'yyss': related to states. 'yyvs': related to semantic values. 'yyls': related to locations. Refer to the stacks through separate pointers, to allow yyoverflow to reallocate them elsewhere. */ /* The state stack. */ yytype_int16 yyssa[YYINITDEPTH]; yytype_int16 *yyss; yytype_int16 *yyssp; /* The semantic value stack. */ YYSTYPE yyvsa[YYINITDEPTH]; YYSTYPE *yyvs; YYSTYPE *yyvsp; /* The location stack. */ YYLTYPE yylsa[YYINITDEPTH]; YYLTYPE *yyls; YYLTYPE *yylsp; /* The locations where the error started and ended. */ YYLTYPE yyerror_range[3]; YYSIZE_T yystacksize; int yyn; int yyresult; /* Lookahead token as an internal (translated) token number. */ int yytoken = 0; /* The variables used to return semantic value and location from the action routines. */ YYSTYPE yyval; YYLTYPE yyloc; #if YYERROR_VERBOSE /* Buffer for error messages, and its allocated size. */ char yymsgbuf[128]; char *yymsg = yymsgbuf; YYSIZE_T yymsg_alloc = sizeof yymsgbuf; #endif #define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N), yylsp -= (N)) /* The number of symbols on the RHS of the reduced rule. Keep to zero when no symbol should be popped. */ int yylen = 0; yyssp = yyss = yyssa; yyvsp = yyvs = yyvsa; yylsp = yyls = yylsa; yystacksize = YYINITDEPTH; YYDPRINTF ((stderr, "Starting parse\n")); yystate = 0; yyerrstatus = 0; yynerrs = 0; yychar = YYEMPTY; /* Cause a token to be read. */ /* User initialization code. */ { yylloc.last_column = 0; if (param->flags & EXT_INSTANCE_SUBSTMT) { is_ext_instance = 1; ext_instance = (struct lys_ext_instance_complex *)param->actual_node; ext_name = (char *)param->data_node; } else { is_ext_instance = 0; } yylloc.last_line = is_ext_instance; /* HACK for flex - return SUBMODULE_KEYWORD or SUBMODULE_EXT_KEYWORD */ param->value = &s; param->data_node = (void **)&data_node; param->actual_node = &actual; backup_type = NODE; trg = (param->submodule) ? (struct lys_module *)param->submodule : param->module; } yylsp[0] = yylloc; goto yysetstate; /*------------------------------------------------------------. | yynewstate -- Push a new state, which is found in yystate. | `------------------------------------------------------------*/ yynewstate: /* In all cases, when you get here, the value and location stacks have just been pushed. So pushing a state here evens the stacks. */ yyssp++; yysetstate: *yyssp = (yytype_int16) yystate; if (yyss + yystacksize - 1 <= yyssp) { /* Get the current used size of the three stacks, in elements. */ YYSIZE_T yysize = (YYSIZE_T) (yyssp - yyss + 1); #ifdef yyoverflow { /* Give user a chance to reallocate the stack. Use copies of these so that the &'s don't force the real ones into memory. */ YYSTYPE *yyvs1 = yyvs; yytype_int16 *yyss1 = yyss; YYLTYPE *yyls1 = yyls; /* Each stack pointer address is followed by the size of the data in use in that stack, in bytes. This used to be a conditional around just the two extra args, but that might be undefined if yyoverflow is a macro. */ yyoverflow (YY_("memory exhausted"), &yyss1, yysize * sizeof (*yyssp), &yyvs1, yysize * sizeof (*yyvsp), &yyls1, yysize * sizeof (*yylsp), &yystacksize); yyss = yyss1; yyvs = yyvs1; yyls = yyls1; } #else /* no yyoverflow */ # ifndef YYSTACK_RELOCATE goto yyexhaustedlab; # else /* Extend the stack our own way. */ if (YYMAXDEPTH <= yystacksize) goto yyexhaustedlab; yystacksize *= 2; if (YYMAXDEPTH < yystacksize) yystacksize = YYMAXDEPTH; { yytype_int16 *yyss1 = yyss; union yyalloc *yyptr = (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize)); if (! yyptr) goto yyexhaustedlab; YYSTACK_RELOCATE (yyss_alloc, yyss); YYSTACK_RELOCATE (yyvs_alloc, yyvs); YYSTACK_RELOCATE (yyls_alloc, yyls); # undef YYSTACK_RELOCATE if (yyss1 != yyssa) YYSTACK_FREE (yyss1); } # endif #endif /* no yyoverflow */ yyssp = yyss + yysize - 1; yyvsp = yyvs + yysize - 1; yylsp = yyls + yysize - 1; YYDPRINTF ((stderr, "Stack size increased to %lu\n", (unsigned long) yystacksize)); if (yyss + yystacksize - 1 <= yyssp) YYABORT; } YYDPRINTF ((stderr, "Entering state %d\n", yystate)); if (yystate == YYFINAL) YYACCEPT; goto yybackup; /*-----------. | yybackup. | `-----------*/ yybackup: /* Do appropriate processing given the current state. Read a lookahead token if we need one and don't already have one. */ /* First try to decide what to do without reference to lookahead token. */ yyn = yypact[yystate]; if (yypact_value_is_default (yyn)) goto yydefault; /* Not known => get a lookahead token if don't already have one. */ /* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */ if (yychar == YYEMPTY) { YYDPRINTF ((stderr, "Reading a token: ")); yychar = yylex (&yylval, &yylloc, scanner); } if (yychar <= YYEOF) { yychar = yytoken = YYEOF; YYDPRINTF ((stderr, "Now at end of input.\n")); } else { yytoken = YYTRANSLATE (yychar); YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc); } /* If the proper action on seeing token YYTOKEN is to reduce or to detect an error, take that action. */ yyn += yytoken; if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken) goto yydefault; yyn = yytable[yyn]; if (yyn <= 0) { if (yytable_value_is_error (yyn)) goto yyerrlab; yyn = -yyn; goto yyreduce; } /* Count tokens shifted since error; after three, turn off error status. */ if (yyerrstatus) yyerrstatus--; /* Shift the lookahead token. */ YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc); /* Discard the shifted token. */ yychar = YYEMPTY; yystate = yyn; YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN *++yyvsp = yylval; YY_IGNORE_MAYBE_UNINITIALIZED_END *++yylsp = yylloc; goto yynewstate; /*-----------------------------------------------------------. | yydefault -- do the default action for the current state. | `-----------------------------------------------------------*/ yydefault: yyn = yydefact[yystate]; if (yyn == 0) goto yyerrlab; goto yyreduce; /*-----------------------------. | yyreduce -- Do a reduction. | `-----------------------------*/ yyreduce: /* yyn is the number of a rule to reduce with. */ yylen = yyr2[yyn]; /* If YYLEN is nonzero, implement the default value of the action: '$$ = $1'. Otherwise, the following line sets YYVAL to garbage. This behavior is undocumented and Bison users should not rely upon it. Assigning to YYVAL unconditionally makes the parser a bit smaller, and it avoids a GCC warning that YYVAL may be used uninitialized. */ yyval = yyvsp[1-yylen]; /* Default location. */ YYLLOC_DEFAULT (yyloc, (yylsp - yylen), yylen); yyerror_range[1] = yyloc; YY_REDUCE_PRINT (yyn); switch (yyn) { case 5: { if (yyget_text(scanner)[0] == '"') { char *tmp; s = malloc(yyget_leng(scanner) - 1 + 7 * yylval.i); if (!s) { LOGMEM(trg->ctx); YYABORT; } if (!(tmp = yang_read_string(trg->ctx, yyget_text(scanner) + 1, s, yyget_leng(scanner) - 2, 0, yylloc.first_column))) { YYABORT; } s = tmp; } else { s = calloc(1, yyget_leng(scanner) - 1); if (!s) { LOGMEM(trg->ctx); YYABORT; } memcpy(s, yyget_text(scanner) + 1, yyget_leng(scanner) - 2); } (yyval.p_str) = &s; } break; case 8: { if (yyget_leng(scanner) > 2) { int length_s = strlen(s), length_tmp = yyget_leng(scanner); char *tmp; tmp = realloc(s, length_s + length_tmp - 1); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } s = tmp; if (yyget_text(scanner)[0] == '"') { if (!(tmp = yang_read_string(trg->ctx, yyget_text(scanner) + 1, s, length_tmp - 2, length_s, yylloc.first_column))) { YYABORT; } s = tmp; } else { memcpy(s + length_s, yyget_text(scanner) + 1, length_tmp - 2); s[length_s + length_tmp - 2] = '\0'; } } } break; case 10: { if (param->submodule) { free(s); LOGVAL(trg->ctx, LYE_INSTMT, LY_VLOG_NONE, NULL, "module"); YYABORT; } trg = param->module; yang_read_common(trg,s,MODULE_KEYWORD); s = NULL; actual_type = MODULE_KEYWORD; } break; case 12: { if (!param->module->ns) { LOGVAL(trg->ctx, LYE_MISSCHILDSTMT, LY_VLOG_NONE, NULL, "namespace", "module"); YYABORT; } if (!param->module->prefix) { LOGVAL(trg->ctx, LYE_MISSCHILDSTMT, LY_VLOG_NONE, NULL, "prefix", "module"); YYABORT; } } break; case 13: { (yyval.i) = 0; } break; case 14: { if (yang_check_version(param->module, param->submodule, s, (yyvsp[-1].i))) { YYABORT; } (yyval.i) = 1; s = NULL; } break; case 15: { if (yang_read_common(param->module, s, NAMESPACE_KEYWORD)) { YYABORT; } s = NULL; } break; case 16: { if (yang_read_prefix(trg, NULL, s)) { YYABORT; } s = NULL; } break; case 17: { if (!param->submodule) { free(s); LOGVAL(trg->ctx, LYE_SUBMODULE, LY_VLOG_NONE, NULL); YYABORT; } trg = (struct lys_module *)param->submodule; yang_read_common(trg,s,MODULE_KEYWORD); s = NULL; actual_type = SUBMODULE_KEYWORD; } break; case 19: { if (!param->submodule->prefix) { LOGVAL(trg->ctx, LYE_MISSCHILDSTMT, LY_VLOG_NONE, NULL, "belongs-to", "submodule"); YYABORT; } if (!(yyvsp[0].i)) { /* check version compatibility with the main module */ if (param->module->version > 1) { LOGVAL(trg->ctx, LYE_INVER, LY_VLOG_NONE, NULL); YYABORT; } } } break; case 20: { (yyval.i) = 0; } break; case 21: { if (yang_check_version(param->module, param->submodule, s, (yyvsp[-1].i))) { YYABORT; } (yyval.i) = 1; s = NULL; } break; case 23: { backup_type = actual_type; actual_type = YANG_VERSION_KEYWORD; } break; case 25: { backup_type = actual_type; actual_type = NAMESPACE_KEYWORD; } break; case 30: { actual_type = (yyvsp[-4].token); backup_type = NODE; actual = NULL; } break; case 31: { YANG_ADDELEM(trg->imp, trg->imp_size, "imports"); /* HACK for unres */ ((struct lys_import *)actual)->module = (struct lys_module *)s; s = NULL; (yyval.token) = actual_type; actual_type = IMPORT_KEYWORD; } break; case 32: { (yyval.i) = 0; } break; case 33: { if (yang_read_prefix(trg, actual, s)) { YYABORT; } s = NULL; } break; case 34: { if (trg->version != 2) { LOGVAL(trg->ctx, LYE_INSTMT, LY_VLOG_NONE, NULL, "description"); free(s); YYABORT; } if (yang_read_description(trg, actual, s, "import", IMPORT_KEYWORD)) { YYABORT; } s = NULL; (yyval.i) = (yyvsp[-1].i); } break; case 35: { if (trg->version != 2) { LOGVAL(trg->ctx, LYE_INSTMT, LY_VLOG_NONE, NULL, "reference"); free(s); YYABORT; } if (yang_read_reference(trg, actual, s, "import", IMPORT_KEYWORD)) { YYABORT; } s = NULL; (yyval.i) = (yyvsp[-1].i); } break; case 36: { if ((yyvsp[-1].i)) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_NONE, NULL, "revision-date", "import"); free(s); YYABORT; } memcpy(((struct lys_import *)actual)->rev, s, LY_REV_SIZE-1); free(s); s = NULL; (yyval.i) = 1; } break; case 37: { YANG_ADDELEM(trg->inc, trg->inc_size, "includes"); /* HACK for unres */ ((struct lys_include *)actual)->submodule = (struct lys_submodule *)s; s = NULL; (yyval.token) = actual_type; actual_type = INCLUDE_KEYWORD; } break; case 38: { actual_type = (yyvsp[-1].token); backup_type = NODE; actual = NULL; } break; case 41: { (yyval.i) = 0; } break; case 42: { if (trg->version != 2) { LOGVAL(trg->ctx, LYE_INSTMT, LY_VLOG_NONE, NULL, "description"); free(s); YYABORT; } if (yang_read_description(trg, actual, s, "include", INCLUDE_KEYWORD)) { YYABORT; } s = NULL; (yyval.i) = (yyvsp[-1].i); } break; case 43: { if (trg->version != 2) { LOGVAL(trg->ctx, LYE_INSTMT, LY_VLOG_NONE, NULL, "reference"); free(s); YYABORT; } if (yang_read_reference(trg, actual, s, "include", INCLUDE_KEYWORD)) { YYABORT; } s = NULL; (yyval.i) = (yyvsp[-1].i); } break; case 44: { if ((yyvsp[-1].i)) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_NONE, NULL, "revision-date", "include"); free(s); YYABORT; } memcpy(((struct lys_include *)actual)->rev, s, LY_REV_SIZE-1); free(s); s = NULL; (yyval.i) = 1; } break; case 45: { backup_type = actual_type; actual_type = REVISION_DATE_KEYWORD; } break; case 47: { (yyval.token) = actual_type; if (is_ext_instance) { if (yang_read_extcomplex_str(trg, ext_instance, "belongs-to", ext_name, s, 0, LY_STMT_BELONGSTO)) { YYABORT; } } else { if (param->submodule->prefix) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_NONE, NULL, "belongs-to", "submodule"); free(s); YYABORT; } if (!ly_strequal(s, param->submodule->belongsto->name, 0)) { LOGVAL(trg->ctx, LYE_INARG, LY_VLOG_NONE, NULL, s, "belongs-to"); free(s); YYABORT; } free(s); } s = NULL; actual_type = BELONGS_TO_KEYWORD; } break; case 48: { if (is_ext_instance) { if (yang_read_extcomplex_str(trg, ext_instance, "prefix", "belongs-to", s, LY_STMT_BELONGSTO, LY_STMT_PREFIX)) { YYABORT; } } else { if (yang_read_prefix(trg, NULL, s)) { YYABORT; } } s = NULL; actual_type = (yyvsp[-4].token); } break; case 49: { backup_type = actual_type; actual_type = PREFIX_KEYWORD; } break; case 52: { if (yang_read_common(trg, s, ORGANIZATION_KEYWORD)) { YYABORT; } s = NULL; } break; case 53: { if (yang_read_common(trg, s, CONTACT_KEYWORD)) { YYABORT; } s = NULL; } break; case 54: { if (yang_read_description(trg, NULL, s, NULL, MODULE_KEYWORD)) { YYABORT; } s = NULL; } break; case 55: { if (yang_read_reference(trg, NULL, s, NULL, MODULE_KEYWORD)) { YYABORT; } s=NULL; } break; case 56: { backup_type = actual_type; actual_type = ORGANIZATION_KEYWORD; } break; case 58: { backup_type = actual_type; actual_type = CONTACT_KEYWORD; } break; case 60: { backup_type = actual_type; actual_type = DESCRIPTION_KEYWORD; } break; case 62: { backup_type = actual_type; actual_type = REFERENCE_KEYWORD; } break; case 64: { if (trg->rev_size) { struct lys_revision *tmp; tmp = realloc(trg->rev, trg->rev_size * sizeof *trg->rev); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } trg->rev = tmp; } } break; case 65: { (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; if (!is_ext_instance) { YANG_ADDELEM(trg->rev, trg->rev_size, "revisions"); } memcpy(((struct lys_revision *)actual)->date, s, LY_REV_SIZE); free(s); s = NULL; actual_type = REVISION_KEYWORD; } break; case 67: { int i; /* check uniqueness of the revision date - not required by RFC */ for (i = 0; i < (trg->rev_size - 1); i++) { if (!strcmp(trg->rev[i].date, trg->rev[trg->rev_size - 1].date)) { LOGWRN(trg->ctx, "Module's revisions are not unique (%s).", trg->rev[trg->rev_size - 1].date); break; } } } break; case 68: { actual_type = (yyvsp[-1].backup_token).token; actual = (yyvsp[-1].backup_token).actual; } break; case 72: { if (yang_read_description(trg, actual, s, "revision",REVISION_KEYWORD)) { YYABORT; } s = NULL; } break; case 73: { if (yang_read_reference(trg, actual, s, "revision", REVISION_KEYWORD)) { YYABORT; } s = NULL; } break; case 74: { s = strdup(yyget_text(scanner)); if (!s) { LOGMEM(trg->ctx); YYABORT; } if (lyp_check_date(trg->ctx, s)) { free(s); YYABORT; } } break; case 76: { if (lyp_check_date(trg->ctx, s)) { free(s); YYABORT; } } break; case 77: { void *tmp; if (trg->tpdf_size) { tmp = realloc(trg->tpdf, trg->tpdf_size * sizeof *trg->tpdf); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } trg->tpdf = tmp; } if (trg->features_size) { tmp = realloc(trg->features, trg->features_size * sizeof *trg->features); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } trg->features = tmp; } if (trg->ident_size) { tmp = realloc(trg->ident, trg->ident_size * sizeof *trg->ident); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } trg->ident = tmp; } if (trg->augment_size) { tmp = realloc(trg->augment, trg->augment_size * sizeof *trg->augment); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } trg->augment = tmp; } if (trg->extensions_size) { tmp = realloc(trg->extensions, trg->extensions_size * sizeof *trg->extensions); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } trg->extensions = tmp; } } break; case 78: { /* check the module with respect to the context now */ if (!param->submodule) { switch (lyp_ctx_check_module(trg)) { case -1: YYABORT; case 0: break; case 1: /* it's already there */ param->flags |= YANG_EXIST_MODULE; YYABORT; } } param->flags &= (~YANG_REMOVE_IMPORT); if (yang_check_imports(trg, param->unres)) { YYABORT; } actual = NULL; } break; case 79: { actual = NULL; } break; case 90: { (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; YANG_ADDELEM(trg->extensions, trg->extensions_size, "extensions"); trg->extensions_size--; ((struct lys_ext *)actual)->name = lydict_insert_zc(param->module->ctx, s); ((struct lys_ext *)actual)->module = trg; if (lyp_check_identifier(trg->ctx, ((struct lys_ext *)actual)->name, LY_IDENT_EXTENSION, trg, NULL)) { trg->extensions_size++; YYABORT; } trg->extensions_size++; s = NULL; actual_type = EXTENSION_KEYWORD; } break; case 91: { struct lys_ext *ext = actual; ext->plugin = ext_get_plugin(ext->name, ext->module->name, ext->module->rev ? ext->module->rev[0].date : NULL); actual_type = (yyvsp[-1].backup_token).token; actual = (yyvsp[-1].backup_token).actual; } break; case 96: { if (((struct lys_ext *)actual)->flags & LYS_STATUS_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_NONE, NULL, "status", "extension"); YYABORT; } ((struct lys_ext *)actual)->flags |= (yyvsp[0].i); } break; case 97: { if (yang_read_description(trg, actual, s, "extension", NODE)) { YYABORT; } s = NULL; } break; case 98: { if (yang_read_reference(trg, actual, s, "extension", NODE)) { YYABORT; } s = NULL; } break; case 99: { (yyval.token) = actual_type; if (is_ext_instance) { if (yang_read_extcomplex_str(trg, ext_instance, "argument", ext_name, s, 0, LY_STMT_ARGUMENT)) { YYABORT; } } else { if (((struct lys_ext *)actual)->argument) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_NONE, NULL, "argument", "extension"); free(s); YYABORT; } ((struct lys_ext *)actual)->argument = lydict_insert_zc(param->module->ctx, s); } s = NULL; actual_type = ARGUMENT_KEYWORD; } break; case 100: { actual_type = (yyvsp[-1].token); } break; case 103: { (yyval.uint) = (yyvsp[0].uint); backup_type = actual_type; actual_type = YIN_ELEMENT_KEYWORD; } break; case 105: { if (is_ext_instance) { int c; const char ***p; uint8_t *val; struct lyext_substmt *info; c = 0; p = lys_ext_complex_get_substmt(LY_STMT_ARGUMENT, ext_instance, &info); if (info->cardinality >= LY_STMT_CARD_SOME) { /* get the index in the array to add new item */ for (c = 0; p[0][c + 1]; c++); val = (uint8_t *)p[1]; } else { val = (uint8_t *)(p + 1); } val[c] = ((yyvsp[-1].uint) == LYS_YINELEM) ? 1 : 2; } else { ((struct lys_ext *)actual)->flags |= (yyvsp[-1].uint); } } break; case 106: { (yyval.uint) = LYS_YINELEM; } break; case 107: { (yyval.uint) = 0; } break; case 108: { if (!strcmp(s, "true")) { (yyval.uint) = LYS_YINELEM; } else if (!strcmp(s, "false")) { (yyval.uint) = 0; } else { LOGVAL(trg->ctx, LYE_INSTMT, LY_VLOG_NONE, NULL, s); free(s); YYABORT; } free(s); s = NULL; } break; case 109: { (yyval.i) = (yyvsp[0].i); backup_type = actual_type; actual_type = STATUS_KEYWORD; } break; case 110: { (yyval.i) = (yyvsp[-1].i); } break; case 111: { (yyval.i) = LYS_STATUS_CURR; } break; case 112: { (yyval.i) = LYS_STATUS_OBSLT; } break; case 113: { (yyval.i) = LYS_STATUS_DEPRC; } break; case 114: { if (!strcmp(s, "current")) { (yyval.i) = LYS_STATUS_CURR; } else if (!strcmp(s, "obsolete")) { (yyval.i) = LYS_STATUS_OBSLT; } else if (!strcmp(s, "deprecated")) { (yyval.i) = LYS_STATUS_DEPRC; } else { LOGVAL(trg->ctx,LYE_INSTMT, LY_VLOG_NONE, NULL, s); free(s); YYABORT; } free(s); s = NULL; } break; case 115: { /* check uniqueness of feature's names */ if (lyp_check_identifier(trg->ctx, s, LY_IDENT_FEATURE, trg, NULL)) { free(s); YYABORT; } (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; YANG_ADDELEM(trg->features, trg->features_size, "features"); ((struct lys_feature *)actual)->name = lydict_insert_zc(trg->ctx, s); ((struct lys_feature *)actual)->module = trg; s = NULL; actual_type = FEATURE_KEYWORD; } break; case 116: { actual = (yyvsp[-1].backup_token).actual; actual_type = (yyvsp[-1].backup_token).token; } break; case 118: { struct lys_iffeature *tmp; if (((struct lys_feature *)actual)->iffeature_size) { tmp = realloc(((struct lys_feature *)actual)->iffeature, ((struct lys_feature *)actual)->iffeature_size * sizeof *tmp); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } ((struct lys_feature *)actual)->iffeature = tmp; } } break; case 121: { if (((struct lys_feature *)actual)->flags & LYS_STATUS_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_NONE, NULL, "status", "feature"); YYABORT; } ((struct lys_feature *)actual)->flags |= (yyvsp[0].i); } break; case 122: { if (yang_read_description(trg, actual, s, "feature", NODE)) { YYABORT; } s = NULL; } break; case 123: { if (yang_read_reference(trg, actual, s, "feature", NODE)) { YYABORT; } s = NULL; } break; case 124: { (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; switch (actual_type) { case FEATURE_KEYWORD: YANG_ADDELEM(((struct lys_feature *)actual)->iffeature, ((struct lys_feature *)actual)->iffeature_size, "if-features"); break; case IDENTITY_KEYWORD: if (trg->version < 2) { LOGVAL(trg->ctx, LYE_INSTMT, LY_VLOG_NONE, NULL, "if-feature", "identity"); free(s); YYABORT; } YANG_ADDELEM(((struct lys_ident *)actual)->iffeature, ((struct lys_ident *)actual)->iffeature_size, "if-features"); break; case ENUM_KEYWORD: if (trg->version < 2) { LOGVAL(trg->ctx, LYE_INSTMT, LY_VLOG_NONE, NULL, "if-feature"); free(s); YYABORT; } YANG_ADDELEM(((struct lys_type_enum *)actual)->iffeature, ((struct lys_type_enum *)actual)->iffeature_size, "if-features"); break; case BIT_KEYWORD: if (trg->version < 2) { LOGVAL(trg->ctx, LYE_INSTMT, LY_VLOG_NONE, NULL, "if-feature", "bit"); free(s); YYABORT; } YANG_ADDELEM(((struct lys_type_bit *)actual)->iffeature, ((struct lys_type_bit *)actual)->iffeature_size, "if-features"); break; case REFINE_KEYWORD: if (trg->version < 2) { LOGVAL(trg->ctx, LYE_INSTMT, LY_VLOG_NONE, NULL, "if-feature"); free(s); YYABORT; } YANG_ADDELEM(((struct lys_refine *)actual)->iffeature, ((struct lys_refine *)actual)->iffeature_size, "if-features"); break; case EXTENSION_INSTANCE: /* nothing change */ break; default: /* lys_node_* */ YANG_ADDELEM(((struct lys_node *)actual)->iffeature, ((struct lys_node *)actual)->iffeature_size, "if-features"); break; } ((struct lys_iffeature *)actual)->features = (struct lys_feature **)s; s = NULL; actual_type = IF_FEATURE_KEYWORD; } break; case 125: { actual = (yyvsp[-1].backup_token).actual; actual_type = (yyvsp[-1].backup_token).token; } break; case 128: { const char *tmp; tmp = lydict_insert_zc(trg->ctx, s); s = NULL; if (dup_identities_check(tmp, trg)) { lydict_remove(trg->ctx, tmp); YYABORT; } (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; YANG_ADDELEM(trg->ident, trg->ident_size, "identities"); ((struct lys_ident *)actual)->name = tmp; ((struct lys_ident *)actual)->module = trg; actual_type = IDENTITY_KEYWORD; } break; case 129: { actual = (yyvsp[-1].backup_token).actual; actual_type = (yyvsp[-1].backup_token).token; } break; case 131: { void *tmp; if (((struct lys_ident *)actual)->base_size) { tmp = realloc(((struct lys_ident *)actual)->base, ((struct lys_ident *)actual)->base_size * sizeof *((struct lys_ident *)actual)->base); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } ((struct lys_ident *)actual)->base = tmp; } if (((struct lys_ident *)actual)->iffeature_size) { tmp = realloc(((struct lys_ident *)actual)->iffeature, ((struct lys_ident *)actual)->iffeature_size * sizeof *((struct lys_ident *)actual)->iffeature); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } ((struct lys_ident *)actual)->iffeature = tmp; } } break; case 133: { void *identity; if ((trg->version < 2) && ((struct lys_ident *)actual)->base_size) { free(s); LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_NONE, NULL, "base", "identity"); YYABORT; } identity = actual; YANG_ADDELEM(((struct lys_ident *)actual)->base, ((struct lys_ident *)actual)->base_size, "bases"); *((struct lys_ident **)actual) = (struct lys_ident *)s; s = NULL; actual = identity; } break; case 135: { if (((struct lys_ident *)actual)->flags & LYS_STATUS_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_NONE, NULL, "status", "identity"); YYABORT; } ((struct lys_ident *)actual)->flags |= (yyvsp[0].i); } break; case 136: { if (yang_read_description(trg, actual, s, "identity", NODE)) { YYABORT; } s = NULL; } break; case 137: { if (yang_read_reference(trg, actual, s, "identity", NODE)) { YYABORT; } s = NULL; } break; case 138: { backup_type = actual_type; actual_type = BASE_KEYWORD; } break; case 140: { tpdf_parent = (actual_type == EXTENSION_INSTANCE) ? ext_instance : actual; (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; if (lyp_check_identifier(trg->ctx, s, LY_IDENT_TYPE, trg, tpdf_parent)) { free(s); YYABORT; } switch (actual_type) { case MODULE_KEYWORD: case SUBMODULE_KEYWORD: YANG_ADDELEM(trg->tpdf, trg->tpdf_size, "typedefs"); break; case GROUPING_KEYWORD: YANG_ADDELEM(((struct lys_node_grp *)tpdf_parent)->tpdf, ((struct lys_node_grp *)tpdf_parent)->tpdf_size, "typedefs"); break; case CONTAINER_KEYWORD: YANG_ADDELEM(((struct lys_node_container *)tpdf_parent)->tpdf, ((struct lys_node_container *)tpdf_parent)->tpdf_size, "typedefs"); break; case LIST_KEYWORD: YANG_ADDELEM(((struct lys_node_list *)tpdf_parent)->tpdf, ((struct lys_node_list *)tpdf_parent)->tpdf_size, "typedefs"); break; case RPC_KEYWORD: case ACTION_KEYWORD: YANG_ADDELEM(((struct lys_node_rpc_action *)tpdf_parent)->tpdf, ((struct lys_node_rpc_action *)tpdf_parent)->tpdf_size, "typedefs"); break; case INPUT_KEYWORD: case OUTPUT_KEYWORD: YANG_ADDELEM(((struct lys_node_inout *)tpdf_parent)->tpdf, ((struct lys_node_inout *)tpdf_parent)->tpdf_size, "typedefs"); break; case NOTIFICATION_KEYWORD: YANG_ADDELEM(((struct lys_node_notif *)tpdf_parent)->tpdf, ((struct lys_node_notif *)tpdf_parent)->tpdf_size, "typedefs"); break; case EXTENSION_INSTANCE: /* typedef is already allocated */ break; default: /* another type of nodetype is error*/ LOGINT(trg->ctx); free(s); YYABORT; } ((struct lys_tpdf *)actual)->name = lydict_insert_zc(param->module->ctx, s); ((struct lys_tpdf *)actual)->module = trg; s = NULL; actual_type = TYPEDEF_KEYWORD; } break; case 141: { if (!((yyvsp[-1].nodes).node.flag & LYS_TYPE_DEF)) { LOGVAL(trg->ctx, LYE_MISSCHILDSTMT, LY_VLOG_NONE, NULL, "type", "typedef"); YYABORT; } actual_type = (yyvsp[-4].backup_token).token; actual = (yyvsp[-4].backup_token).actual; } break; case 142: { (yyval.nodes).node.ptr_tpdf = actual; (yyval.nodes).node.flag = 0; } break; case 143: { (yyvsp[-2].nodes).node.flag |= LYS_TYPE_DEF; (yyval.nodes) = (yyvsp[-2].nodes); } break; case 144: { if (yang_read_units(trg, (yyvsp[-1].nodes).node.ptr_tpdf, s, TYPEDEF_KEYWORD)) { YYABORT; } s = NULL; } break; case 145: { if (yang_read_default(trg, (yyvsp[-1].nodes).node.ptr_tpdf, s, TYPEDEF_KEYWORD)) { YYABORT; } s = NULL; } break; case 146: { if ((yyvsp[-1].nodes).node.ptr_tpdf->flags & LYS_STATUS_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_NONE, NULL, "status", "typedef"); YYABORT; } (yyvsp[-1].nodes).node.ptr_tpdf->flags |= (yyvsp[0].i); } break; case 147: { if (yang_read_description(trg, (yyvsp[-1].nodes).node.ptr_tpdf, s, "typedef", NODE)) { YYABORT; } s = NULL; } break; case 148: { if (yang_read_reference(trg, (yyvsp[-1].nodes).node.ptr_tpdf, s, "typedef", NODE)) { YYABORT; } s = NULL; } break; case 149: { actual_type = (yyvsp[-1].backup_token).token; actual = (yyvsp[-1].backup_token).actual; } break; case 150: { (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; if (!(actual = yang_read_type(trg->ctx, actual, s, actual_type))) { YYABORT; } s = NULL; actual_type = TYPE_KEYWORD; } break; case 153: { if (((struct yang_type *)actual)->base == LY_TYPE_STRING && ((struct yang_type *)actual)->type->info.str.pat_count) { void *tmp; tmp = realloc(((struct yang_type *)actual)->type->info.str.patterns, ((struct yang_type *)actual)->type->info.str.pat_count * sizeof *((struct yang_type *)actual)->type->info.str.patterns); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } ((struct yang_type *)actual)->type->info.str.patterns = tmp; #ifdef LY_ENABLED_CACHE if (!(trg->ctx->models.flags & LY_CTX_TRUSTED) && ((struct yang_type *)actual)->type->info.str.patterns_pcre) { tmp = realloc(((struct yang_type *)actual)->type->info.str.patterns_pcre, 2 * ((struct yang_type *)actual)->type->info.str.pat_count * sizeof *((struct yang_type *)actual)->type->info.str.patterns_pcre); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } ((struct yang_type *)actual)->type->info.str.patterns_pcre = tmp; } #endif } if (((struct yang_type *)actual)->base == LY_TYPE_UNION) { struct lys_type *tmp; tmp = realloc(((struct yang_type *)actual)->type->info.uni.types, ((struct yang_type *)actual)->type->info.uni.count * sizeof *tmp); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } ((struct yang_type *)actual)->type->info.uni.types = tmp; } if (((struct yang_type *)actual)->base == LY_TYPE_IDENT) { struct lys_ident **tmp; tmp = realloc(((struct yang_type *)actual)->type->info.ident.ref, ((struct yang_type *)actual)->type->info.ident.count* sizeof *tmp); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } ((struct yang_type *)actual)->type->info.ident.ref = tmp; } } break; case 157: { if (yang_read_require_instance(trg->ctx, actual, (yyvsp[0].i))) { YYABORT; } } break; case 158: { /* leafref_specification */ if (yang_read_leafref_path(trg, actual, s)) { YYABORT; } s = NULL; } break; case 159: { /* identityref_specification */ if (((struct yang_type *)actual)->base && ((struct yang_type *)actual)->base != LY_TYPE_IDENT) { LOGVAL(trg->ctx, LYE_INSTMT, LY_VLOG_NONE, NULL, "base"); return EXIT_FAILURE; } ((struct yang_type *)actual)->base = LY_TYPE_IDENT; yang_type = actual; YANG_ADDELEM(((struct yang_type *)actual)->type->info.ident.ref, ((struct yang_type *)actual)->type->info.ident.count, "identity refs"); *((struct lys_ident **)actual) = (struct lys_ident *)s; actual = yang_type; s = NULL; } break; case 162: { if (yang_read_fraction(trg->ctx, actual, (yyvsp[0].uint))) { YYABORT; } } break; case 165: { actual_type = (yyvsp[-1].backup_token).token; actual = (yyvsp[-1].backup_token).actual; } break; case 166: { struct yang_type *stype = (struct yang_type *)actual; (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; if (stype->base != 0 && stype->base != LY_TYPE_UNION) { LOGVAL(trg->ctx, LYE_SPEC, LY_VLOG_NONE, NULL, "Unexpected type statement."); YYABORT; } stype->base = LY_TYPE_UNION; if (strcmp(stype->name, "union")) { /* type can be a substatement only in "union" type, not in derived types */ LOGVAL(trg->ctx, LYE_INCHILDSTMT, LY_VLOG_NONE, NULL, "type", "derived type"); YYABORT; } YANG_ADDELEM(stype->type->info.uni.types, stype->type->info.uni.count, "union types") actual_type = UNION_KEYWORD; } break; case 167: { (yyval.uint) = (yyvsp[0].uint); backup_type = actual_type; actual_type = FRACTION_DIGITS_KEYWORD; } break; case 168: { (yyval.uint) = (yyvsp[-1].uint); } break; case 169: { (yyval.uint) = (yyvsp[-1].uint); } break; case 170: { char *endptr = NULL; unsigned long val; errno = 0; val = strtoul(s, &endptr, 10); if (*endptr || s[0] == '-' || errno || val == 0 || val > UINT32_MAX) { LOGVAL(trg->ctx, LYE_INARG, LY_VLOG_NONE, NULL, s, "fraction-digits"); free(s); s = NULL; YYABORT; } (yyval.uint) = (uint32_t) val; free(s); s =NULL; } break; case 171: { actual = (yyvsp[-1].backup_token).actual; actual_type = (yyvsp[-1].backup_token).token; } break; case 172: { (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; if (!(actual = yang_read_length(trg->ctx, actual, s, is_ext_instance))) { YYABORT; } actual_type = LENGTH_KEYWORD; s = NULL; } break; case 175: { switch (actual_type) { case MUST_KEYWORD: (yyval.str) = "must"; break; case LENGTH_KEYWORD: (yyval.str) = "length"; break; case RANGE_KEYWORD: (yyval.str) = "range"; break; default: LOGINT(trg->ctx); YYABORT; break; } } break; case 176: { if (yang_read_message(trg, actual, s, (yyvsp[-1].str), ERROR_MESSAGE_KEYWORD)) { YYABORT; } s = NULL; } break; case 177: { if (yang_read_message(trg, actual, s, (yyvsp[-1].str), ERROR_APP_TAG_KEYWORD)) { YYABORT; } s = NULL; } break; case 178: { if (yang_read_description(trg, actual, s, (yyvsp[-1].str), NODE)) { YYABORT; } s = NULL; } break; case 179: { if (yang_read_reference(trg, actual, s, (yyvsp[-1].str), NODE)) { YYABORT; } s = NULL; } break; case 180: { (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; } break; case 181: {struct lys_restr *pattern = actual; actual = NULL; #ifdef LY_ENABLED_CACHE if ((yyvsp[-2].backup_token).token != EXTENSION_INSTANCE && !(data_node && data_node->nodetype != LYS_GROUPING && lys_ingrouping(data_node))) { unsigned int c = 2 * (((struct yang_type *)(yyvsp[-2].backup_token).actual)->type->info.str.pat_count - 1); YANG_ADDELEM(((struct yang_type *)(yyvsp[-2].backup_token).actual)->type->info.str.patterns_pcre, c, "patterns"); ++c; YANG_ADDELEM(((struct yang_type *)(yyvsp[-2].backup_token).actual)->type->info.str.patterns_pcre, c, "patterns"); actual = &(((struct yang_type *)(yyvsp[-2].backup_token).actual)->type->info.str.patterns_pcre)[2 * (((struct yang_type *)(yyvsp[-2].backup_token).actual)->type->info.str.pat_count - 1)]; } #endif if (yang_read_pattern(trg->ctx, pattern, actual, (yyvsp[-1].str), (yyvsp[0].ch))) { YYABORT; } actual_type = (yyvsp[-2].backup_token).token; actual = (yyvsp[-2].backup_token).actual; } break; case 182: { if (actual_type != EXTENSION_INSTANCE) { if (((struct yang_type *)actual)->base != 0 && ((struct yang_type *)actual)->base != LY_TYPE_STRING) { free(s); LOGVAL(trg->ctx, LYE_SPEC, LY_VLOG_NONE, NULL, "Unexpected pattern statement."); YYABORT; } ((struct yang_type *)actual)->base = LY_TYPE_STRING; YANG_ADDELEM(((struct yang_type *)actual)->type->info.str.patterns, ((struct yang_type *)actual)->type->info.str.pat_count, "patterns"); } (yyval.str) = s; s = NULL; actual_type = PATTERN_KEYWORD; } break; case 183: { (yyval.ch) = 0x06; } break; case 184: { (yyval.ch) = (yyvsp[-1].ch); } break; case 185: { (yyval.ch) = 0x06; /* ACK */ } break; case 186: { if (trg->version < 2) { LOGVAL(trg->ctx, LYE_INSTMT, LY_VLOG_NONE, NULL, "modifier"); YYABORT; } if ((yyvsp[-1].ch) != 0x06) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_NONE, NULL, "modifier", "pattern"); YYABORT; } (yyval.ch) = (yyvsp[0].ch); } break; case 187: { if (yang_read_message(trg, actual, s, "pattern", ERROR_MESSAGE_KEYWORD)) { YYABORT; } s = NULL; } break; case 188: { if (yang_read_message(trg, actual, s, "pattern", ERROR_APP_TAG_KEYWORD)) { YYABORT; } s = NULL; } break; case 189: { if (yang_read_description(trg, actual, s, "pattern", NODE)) { YYABORT; } s = NULL; } break; case 190: { if (yang_read_reference(trg, actual, s, "pattern", NODE)) { YYABORT; } s = NULL; } break; case 191: { backup_type = actual_type; actual_type = MODIFIER_KEYWORD; } break; case 192: { if (!strcmp(s, "invert-match")) { (yyval.ch) = 0x15; free(s); s = NULL; } else { LOGVAL(trg->ctx, LYE_INSTMT, LY_VLOG_NONE, NULL, s); free(s); YYABORT; } } break; case 193: { struct lys_type_enum * tmp; cnt_val = 0; tmp = realloc(((struct yang_type *)actual)->type->info.enums.enm, ((struct yang_type *)actual)->type->info.enums.count * sizeof *tmp); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } ((struct yang_type *)actual)->type->info.enums.enm = tmp; } break; case 196: { if (yang_check_enum(trg->ctx, yang_type, actual, &cnt_val, is_value)) { YYABORT; } actual = (yyvsp[-1].backup_token).actual; actual_type = (yyvsp[-1].backup_token).token; } break; case 197: { (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = yang_type = actual; YANG_ADDELEM(((struct yang_type *)actual)->type->info.enums.enm, ((struct yang_type *)actual)->type->info.enums.count, "enums"); if (yang_read_enum(trg->ctx, yang_type, actual, s)) { YYABORT; } s = NULL; is_value = 0; actual_type = ENUM_KEYWORD; } break; case 199: { if (((struct lys_type_enum *)actual)->iffeature_size) { struct lys_iffeature *tmp; tmp = realloc(((struct lys_type_enum *)actual)->iffeature, ((struct lys_type_enum *)actual)->iffeature_size * sizeof *tmp); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } ((struct lys_type_enum *)actual)->iffeature = tmp; } } break; case 202: { if (is_value) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_NONE, NULL, "value", "enum"); YYABORT; } ((struct lys_type_enum *)actual)->value = (yyvsp[0].i); /* keep the highest enum value for automatic increment */ if ((yyvsp[0].i) >= cnt_val) { cnt_val = (yyvsp[0].i) + 1; } is_value = 1; } break; case 203: { if (((struct lys_type_enum *)actual)->flags & LYS_STATUS_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_NONE, NULL, "status", "enum"); YYABORT; } ((struct lys_type_enum *)actual)->flags |= (yyvsp[0].i); } break; case 204: { if (yang_read_description(trg, actual, s, "enum", NODE)) { YYABORT; } s = NULL; } break; case 205: { if (yang_read_reference(trg, actual, s, "enum", NODE)) { YYABORT; } s = NULL; } break; case 206: { (yyval.i) = (yyvsp[0].i); backup_type = actual_type; actual_type = VALUE_KEYWORD; } break; case 207: { (yyval.i) = (yyvsp[-1].i); } break; case 208: { (yyval.i) = (yyvsp[-1].i); } break; case 209: { /* convert it to int32_t */ int64_t val; char *endptr; val = strtoll(s, &endptr, 10); if (val < INT32_MIN || val > INT32_MAX || *endptr) { LOGVAL(trg->ctx, LYE_INARG, LY_VLOG_NONE, NULL, s, "value"); free(s); YYABORT; } free(s); s = NULL; (yyval.i) = (int32_t) val; } break; case 210: { actual_type = (yyvsp[-1].backup_token).token; actual = (yyvsp[-1].backup_token).actual; } break; case 213: { backup_type = actual_type; actual_type = PATH_KEYWORD; } break; case 215: { (yyval.i) = (yyvsp[0].i); backup_type = actual_type; actual_type = REQUIRE_INSTANCE_KEYWORD; } break; case 216: { (yyval.i) = (yyvsp[-1].i); } break; case 217: { (yyval.i) = 1; } break; case 218: { (yyval.i) = -1; } break; case 219: { if (!strcmp(s,"true")) { (yyval.i) = 1; } else if (!strcmp(s,"false")) { (yyval.i) = -1; } else { LOGVAL(trg->ctx, LYE_INARG, LY_VLOG_NONE, NULL, s, "require-instance"); free(s); YYABORT; } free(s); s = NULL; } break; case 220: { struct lys_type_bit * tmp; cnt_val = 0; tmp = realloc(((struct yang_type *)actual)->type->info.bits.bit, ((struct yang_type *)actual)->type->info.bits.count * sizeof *tmp); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } ((struct yang_type *)actual)->type->info.bits.bit = tmp; } break; case 223: { if (yang_check_bit(trg->ctx, yang_type, actual, &cnt_val, is_value)) { YYABORT; } actual = (yyvsp[-2].backup_token).actual; actual_type = (yyvsp[-2].backup_token).token; } break; case 224: { (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = yang_type = actual; YANG_ADDELEM(((struct yang_type *)actual)->type->info.bits.bit, ((struct yang_type *)actual)->type->info.bits.count, "bits"); if (yang_read_bit(trg->ctx, yang_type, actual, s)) { YYABORT; } s = NULL; is_value = 0; actual_type = BIT_KEYWORD; } break; case 226: { if (((struct lys_type_bit *)actual)->iffeature_size) { struct lys_iffeature *tmp; tmp = realloc(((struct lys_type_bit *)actual)->iffeature, ((struct lys_type_bit *)actual)->iffeature_size * sizeof *tmp); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } ((struct lys_type_bit *)actual)->iffeature = tmp; } } break; case 229: { if (is_value) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_NONE, NULL, "position", "bit"); YYABORT; } ((struct lys_type_bit *)actual)->pos = (yyvsp[0].uint); /* keep the highest position value for automatic increment */ if ((yyvsp[0].uint) >= cnt_val) { cnt_val = (yyvsp[0].uint) + 1; } is_value = 1; } break; case 230: { if (((struct lys_type_bit *)actual)->flags & LYS_STATUS_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_NONE, NULL, "status", "bit"); YYABORT; } ((struct lys_type_bit *)actual)->flags |= (yyvsp[0].i); } break; case 231: { if (yang_read_description(trg, actual, s, "bit", NODE)) { YYABORT; } s = NULL; } break; case 232: { if (yang_read_reference(trg, actual, s, "bit", NODE)) { YYABORT; } s = NULL; } break; case 233: { (yyval.uint) = (yyvsp[0].uint); backup_type = actual_type; actual_type = POSITION_KEYWORD; } break; case 234: { (yyval.uint) = (yyvsp[-1].uint); } break; case 235: { (yyval.uint) = (yyvsp[-1].uint); } break; case 236: { /* convert it to uint32_t */ unsigned long val; char *endptr = NULL; errno = 0; val = strtoul(s, &endptr, 10); if (s[0] == '-' || *endptr || errno || val > UINT32_MAX) { LOGVAL(trg->ctx, LYE_INARG, LY_VLOG_NONE, NULL, s, "position"); free(s); YYABORT; } free(s); s = NULL; (yyval.uint) = (uint32_t) val; } break; case 237: { backup_type = actual_type; actual_type = ERROR_MESSAGE_KEYWORD; } break; case 239: { backup_type = actual_type; actual_type = ERROR_APP_TAG_KEYWORD; } break; case 241: { backup_type = actual_type; actual_type = UNITS_KEYWORD; } break; case 243: { backup_type = actual_type; actual_type = DEFAULT_KEYWORD; } break; case 245: { (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; if (!(actual = yang_read_node(trg, actual, param->node, s, LYS_GROUPING, sizeof(struct lys_node_grp)))) { YYABORT; } s = NULL; data_node = actual; actual_type = GROUPING_KEYWORD; } break; case 246: { LOGDBG(LY_LDGYANG, "finished parsing grouping statement \"%s\"", data_node->name); actual_type = (yyvsp[-1].backup_token).token; actual = (yyvsp[-1].backup_token).actual; data_node = (yyvsp[-1].backup_token).actual; } break; case 249: { (yyval.nodes).grouping = actual; } break; case 250: { if ((yyvsp[-1].nodes).grouping->flags & LYS_STATUS_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_LYS, (yyvsp[-1].nodes).grouping, "status", "grouping"); YYABORT; } (yyvsp[-1].nodes).grouping->flags |= (yyvsp[0].i); } break; case 251: { if (yang_read_description(trg, (yyvsp[-1].nodes).grouping, s, "grouping", NODE_PRINT)) { YYABORT; } s = NULL; } break; case 252: { if (yang_read_reference(trg, (yyvsp[-1].nodes).grouping, s, "grouping", NODE_PRINT)) { YYABORT; } s = NULL; } break; case 257: { if (trg->version < 2) { LOGVAL(trg->ctx, LYE_INSTMT, LY_VLOG_LYS, (yyvsp[-2].nodes).grouping, "notification"); YYABORT; } } break; case 266: { (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; if (!(actual = yang_read_node(trg, actual, param->node, s, LYS_CONTAINER, sizeof(struct lys_node_container)))) { YYABORT; } data_node = actual; s = NULL; actual_type = CONTAINER_KEYWORD; } break; case 267: { LOGDBG(LY_LDGYANG, "finished parsing container statement \"%s\"", data_node->name); actual_type = (yyvsp[-1].backup_token).token; actual = (yyvsp[-1].backup_token).actual; data_node = (yyvsp[-1].backup_token).actual; } break; case 269: { void *tmp; if ((yyvsp[-1].nodes).container->iffeature_size) { tmp = realloc((yyvsp[-1].nodes).container->iffeature, (yyvsp[-1].nodes).container->iffeature_size * sizeof *(yyvsp[-1].nodes).container->iffeature); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } (yyvsp[-1].nodes).container->iffeature = tmp; } if ((yyvsp[-1].nodes).container->must_size) { tmp = realloc((yyvsp[-1].nodes).container->must, (yyvsp[-1].nodes).container->must_size * sizeof *(yyvsp[-1].nodes).container->must); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } (yyvsp[-1].nodes).container->must = tmp; } } break; case 270: { (yyval.nodes).container = actual; } break; case 274: { if (yang_read_presence(trg, (yyvsp[-1].nodes).container, s)) { YYABORT; } s = NULL; } break; case 275: { if ((yyvsp[-1].nodes).container->flags & LYS_CONFIG_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_LYS, (yyvsp[-1].nodes).container, "config", "container"); YYABORT; } (yyvsp[-1].nodes).container->flags |= (yyvsp[0].i); } break; case 276: { if ((yyvsp[-1].nodes).container->flags & LYS_STATUS_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_LYS, (yyvsp[-1].nodes).container, "status", "container"); YYABORT; } (yyvsp[-1].nodes).container->flags |= (yyvsp[0].i); } break; case 277: { if (yang_read_description(trg, (yyvsp[-1].nodes).container, s, "container", NODE_PRINT)) { YYABORT; } s = NULL; } break; case 278: { if (yang_read_reference(trg, (yyvsp[-1].nodes).container, s, "container", NODE_PRINT)) { YYABORT; } s = NULL; } break; case 281: { if (trg->version < 2) { LOGVAL(trg->ctx, LYE_INSTMT, LY_VLOG_LYS, (yyvsp[-2].nodes).container, "notification"); YYABORT; } } break; case 284: { void *tmp; if (!((yyvsp[-1].nodes).node.flag & LYS_TYPE_DEF)) { LOGVAL(trg->ctx, LYE_MISSCHILDSTMT, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_leaf, "type", "leaf"); YYABORT; } if ((yyvsp[-1].nodes).node.ptr_leaf->dflt && ((yyvsp[-1].nodes).node.ptr_leaf->flags & LYS_MAND_TRUE)) { /* RFC 6020, 7.6.4 - default statement must not with mandatory true */ LOGVAL(trg->ctx, LYE_INCHILDSTMT, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_leaf, "mandatory", "leaf"); LOGVAL(trg->ctx, LYE_SPEC, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_leaf, "The \"mandatory\" statement is forbidden on leaf with \"default\"."); YYABORT; } if ((yyvsp[-1].nodes).node.ptr_leaf->iffeature_size) { tmp = realloc((yyvsp[-1].nodes).node.ptr_leaf->iffeature, (yyvsp[-1].nodes).node.ptr_leaf->iffeature_size * sizeof *(yyvsp[-1].nodes).node.ptr_leaf->iffeature); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } (yyvsp[-1].nodes).node.ptr_leaf->iffeature = tmp; } if ((yyvsp[-1].nodes).node.ptr_leaf->must_size) { tmp = realloc((yyvsp[-1].nodes).node.ptr_leaf->must, (yyvsp[-1].nodes).node.ptr_leaf->must_size * sizeof *(yyvsp[-1].nodes).node.ptr_leaf->must); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } (yyvsp[-1].nodes).node.ptr_leaf->must = tmp; } LOGDBG(LY_LDGYANG, "finished parsing leaf statement \"%s\"", data_node->name); actual_type = (yyvsp[-4].backup_token).token; actual = (yyvsp[-4].backup_token).actual; data_node = (yyvsp[-4].backup_token).actual; } break; case 285: { (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; if (!(actual = yang_read_node(trg, actual, param->node, s, LYS_LEAF, sizeof(struct lys_node_leaf)))) { YYABORT; } data_node = actual; s = NULL; actual_type = LEAF_KEYWORD; } break; case 286: { (yyval.nodes).node.ptr_leaf = actual; (yyval.nodes).node.flag = 0; } break; case 289: { (yyvsp[-2].nodes).node.flag |= LYS_TYPE_DEF; (yyval.nodes) = (yyvsp[-2].nodes); } break; case 290: { if (yang_read_units(trg, (yyvsp[-1].nodes).node.ptr_leaf, s, LEAF_KEYWORD)) { YYABORT; } s = NULL; } break; case 292: { if (yang_read_default(trg, (yyvsp[-1].nodes).node.ptr_leaf, s, LEAF_KEYWORD)) { YYABORT; } s = NULL; } break; case 293: { if ((yyvsp[-1].nodes).node.ptr_leaf->flags & LYS_CONFIG_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_leaf, "config", "leaf"); YYABORT; } (yyvsp[-1].nodes).node.ptr_leaf->flags |= (yyvsp[0].i); } break; case 294: { if ((yyvsp[-1].nodes).node.ptr_leaf->flags & LYS_MAND_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_leaf, "mandatory", "leaf"); YYABORT; } (yyvsp[-1].nodes).node.ptr_leaf->flags |= (yyvsp[0].i); } break; case 295: { if ((yyvsp[-1].nodes).node.ptr_leaf->flags & LYS_STATUS_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_leaf, "status", "leaf"); YYABORT; } (yyvsp[-1].nodes).node.ptr_leaf->flags |= (yyvsp[0].i); } break; case 296: { if (yang_read_description(trg, (yyvsp[-1].nodes).node.ptr_leaf, s, "leaf", NODE_PRINT)) { YYABORT; } s = NULL; } break; case 297: { if (yang_read_reference(trg, (yyvsp[-1].nodes).node.ptr_leaf, s, "leaf", NODE_PRINT)) { YYABORT; } s = NULL; } break; case 298: { (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; if (!(actual = yang_read_node(trg, actual, param->node, s, LYS_LEAFLIST, sizeof(struct lys_node_leaflist)))) { YYABORT; } data_node = actual; s = NULL; actual_type = LEAF_LIST_KEYWORD; } break; case 299: { void *tmp; if ((yyvsp[-1].nodes).node.ptr_leaflist->flags & LYS_CONFIG_R) { /* RFC 6020, 7.7.5 - ignore ordering when the list represents state data * ignore oredering MASK - 0x7F */ (yyvsp[-1].nodes).node.ptr_leaflist->flags &= 0x7F; } if (!((yyvsp[-1].nodes).node.flag & LYS_TYPE_DEF)) { LOGVAL(trg->ctx, LYE_MISSCHILDSTMT, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_leaflist, "type", "leaf-list"); YYABORT; } if ((yyvsp[-1].nodes).node.ptr_leaflist->dflt_size && (yyvsp[-1].nodes).node.ptr_leaflist->min) { LOGVAL(trg->ctx, LYE_INCHILDSTMT, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_leaflist, "min-elements", "leaf-list"); LOGVAL(trg->ctx, LYE_SPEC, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_leaflist, "The \"min-elements\" statement with non-zero value is forbidden on leaf-lists with the \"default\" statement."); YYABORT; } if ((yyvsp[-1].nodes).node.ptr_leaflist->iffeature_size) { tmp = realloc((yyvsp[-1].nodes).node.ptr_leaflist->iffeature, (yyvsp[-1].nodes).node.ptr_leaflist->iffeature_size * sizeof *(yyvsp[-1].nodes).node.ptr_leaflist->iffeature); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } (yyvsp[-1].nodes).node.ptr_leaflist->iffeature = tmp; } if ((yyvsp[-1].nodes).node.ptr_leaflist->must_size) { tmp = realloc((yyvsp[-1].nodes).node.ptr_leaflist->must, (yyvsp[-1].nodes).node.ptr_leaflist->must_size * sizeof *(yyvsp[-1].nodes).node.ptr_leaflist->must); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } (yyvsp[-1].nodes).node.ptr_leaflist->must = tmp; } if ((yyvsp[-1].nodes).node.ptr_leaflist->dflt_size) { tmp = realloc((yyvsp[-1].nodes).node.ptr_leaflist->dflt, (yyvsp[-1].nodes).node.ptr_leaflist->dflt_size * sizeof *(yyvsp[-1].nodes).node.ptr_leaflist->dflt); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } (yyvsp[-1].nodes).node.ptr_leaflist->dflt = tmp; } LOGDBG(LY_LDGYANG, "finished parsing leaf-list statement \"%s\"", data_node->name); actual_type = (yyvsp[-4].backup_token).token; actual = (yyvsp[-4].backup_token).actual; data_node = (yyvsp[-4].backup_token).actual; } break; case 300: { (yyval.nodes).node.ptr_leaflist = actual; (yyval.nodes).node.flag = 0; } break; case 303: { (yyvsp[-2].nodes).node.flag |= LYS_TYPE_DEF; (yyval.nodes) = (yyvsp[-2].nodes); } break; case 304: { if (trg->version < 2) { free(s); LOGVAL(trg->ctx, LYE_INSTMT, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_leaflist, "default"); YYABORT; } YANG_ADDELEM((yyvsp[-1].nodes).node.ptr_leaflist->dflt, (yyvsp[-1].nodes).node.ptr_leaflist->dflt_size, "defaults"); (*(const char **)actual) = lydict_insert_zc(param->module->ctx, s); s = NULL; actual = (yyvsp[-1].nodes).node.ptr_leaflist; } break; case 305: { if (yang_read_units(trg, (yyvsp[-1].nodes).node.ptr_leaflist, s, LEAF_LIST_KEYWORD)) { YYABORT; } s = NULL; } break; case 307: { if ((yyvsp[-1].nodes).node.ptr_leaflist->flags & LYS_CONFIG_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_leaflist, "config", "leaf-list"); YYABORT; } (yyvsp[-1].nodes).node.ptr_leaflist->flags |= (yyvsp[0].i); } break; case 308: { if ((yyvsp[-1].nodes).node.flag & LYS_MIN_ELEMENTS) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_leaflist, "min-elements", "leaf-list"); YYABORT; } (yyvsp[-1].nodes).node.ptr_leaflist->min = (yyvsp[0].uint); (yyvsp[-1].nodes).node.flag |= LYS_MIN_ELEMENTS; (yyval.nodes) = (yyvsp[-1].nodes); if ((yyvsp[-1].nodes).node.ptr_leaflist->max && ((yyvsp[-1].nodes).node.ptr_leaflist->min > (yyvsp[-1].nodes).node.ptr_leaflist->max)) { LOGVAL(trg->ctx, LYE_SPEC, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_leaflist, "Invalid value \"%d\" of \"%s\".", (yyvsp[0].uint), "min-elements"); LOGVAL(trg->ctx, LYE_SPEC, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_leaflist, "\"min-elements\" is bigger than \"max-elements\"."); YYABORT; } } break; case 309: { if ((yyvsp[-1].nodes).node.flag & LYS_MAX_ELEMENTS) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_leaflist, "max-elements", "leaf-list"); YYABORT; } (yyvsp[-1].nodes).node.ptr_leaflist->max = (yyvsp[0].uint); (yyvsp[-1].nodes).node.flag |= LYS_MAX_ELEMENTS; (yyval.nodes) = (yyvsp[-1].nodes); if ((yyvsp[-1].nodes).node.ptr_leaflist->min > (yyvsp[-1].nodes).node.ptr_leaflist->max) { LOGVAL(trg->ctx, LYE_SPEC, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_leaflist, "Invalid value \"%d\" of \"%s\".", (yyvsp[0].uint), "max-elements"); LOGVAL(trg->ctx, LYE_SPEC, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_leaflist, "\"max-elements\" is smaller than \"min-elements\"."); YYABORT; } } break; case 310: { if ((yyvsp[-1].nodes).node.flag & LYS_ORDERED_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_leaflist, "ordered by", "leaf-list"); YYABORT; } if ((yyvsp[0].i) & LYS_USERORDERED) { (yyvsp[-1].nodes).node.ptr_leaflist->flags |= LYS_USERORDERED; } (yyvsp[-1].nodes).node.flag |= (yyvsp[0].i); (yyval.nodes) = (yyvsp[-1].nodes); } break; case 311: { if ((yyvsp[-1].nodes).node.ptr_leaflist->flags & LYS_STATUS_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_leaflist, "status", "leaf-list"); YYABORT; } (yyvsp[-1].nodes).node.ptr_leaflist->flags |= (yyvsp[0].i); } break; case 312: { if (yang_read_description(trg, (yyvsp[-1].nodes).node.ptr_leaflist, s, "leaf-list", NODE_PRINT)) { YYABORT; } s = NULL; } break; case 313: { if (yang_read_reference(trg, (yyvsp[-1].nodes).node.ptr_leaflist, s, "leaf-list", NODE_PRINT)) { YYABORT; } s = NULL; } break; case 314: { (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; if (!(actual = yang_read_node(trg, actual, param->node, s, LYS_LIST, sizeof(struct lys_node_list)))) { YYABORT; } data_node = actual; s = NULL; actual_type = LIST_KEYWORD; } break; case 315: { void *tmp; if ((yyvsp[-1].nodes).node.ptr_list->iffeature_size) { tmp = realloc((yyvsp[-1].nodes).node.ptr_list->iffeature, (yyvsp[-1].nodes).node.ptr_list->iffeature_size * sizeof *(yyvsp[-1].nodes).node.ptr_list->iffeature); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } (yyvsp[-1].nodes).node.ptr_list->iffeature = tmp; } if ((yyvsp[-1].nodes).node.ptr_list->must_size) { tmp = realloc((yyvsp[-1].nodes).node.ptr_list->must, (yyvsp[-1].nodes).node.ptr_list->must_size * sizeof *(yyvsp[-1].nodes).node.ptr_list->must); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } (yyvsp[-1].nodes).node.ptr_list->must = tmp; } if ((yyvsp[-1].nodes).node.ptr_list->tpdf_size) { tmp = realloc((yyvsp[-1].nodes).node.ptr_list->tpdf, (yyvsp[-1].nodes).node.ptr_list->tpdf_size * sizeof *(yyvsp[-1].nodes).node.ptr_list->tpdf); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } (yyvsp[-1].nodes).node.ptr_list->tpdf = tmp; } if ((yyvsp[-1].nodes).node.ptr_list->unique_size) { tmp = realloc((yyvsp[-1].nodes).node.ptr_list->unique, (yyvsp[-1].nodes).node.ptr_list->unique_size * sizeof *(yyvsp[-1].nodes).node.ptr_list->unique); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } (yyvsp[-1].nodes).node.ptr_list->unique = tmp; } LOGDBG(LY_LDGYANG, "finished parsing list statement \"%s\"", data_node->name); actual_type = (yyvsp[-4].backup_token).token; actual = (yyvsp[-4].backup_token).actual; data_node = (yyvsp[-4].backup_token).actual; } break; case 316: { (yyval.nodes).node.ptr_list = actual; (yyval.nodes).node.flag = 0; } break; case 320: { if ((yyvsp[-1].nodes).node.ptr_list->keys) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_list, "key", "list"); free(s); YYABORT; } (yyvsp[-1].nodes).node.ptr_list->keys = (struct lys_node_leaf **)s; (yyval.nodes) = (yyvsp[-1].nodes); s = NULL; } break; case 321: { YANG_ADDELEM((yyvsp[-1].nodes).node.ptr_list->unique, (yyvsp[-1].nodes).node.ptr_list->unique_size, "uniques"); ((struct lys_unique *)actual)->expr = (const char **)s; (yyval.nodes) = (yyvsp[-1].nodes); s = NULL; actual = (yyvsp[-1].nodes).node.ptr_list; } break; case 322: { if ((yyvsp[-1].nodes).node.ptr_list->flags & LYS_CONFIG_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_list, "config", "list"); YYABORT; } (yyvsp[-1].nodes).node.ptr_list->flags |= (yyvsp[0].i); } break; case 323: { if ((yyvsp[-1].nodes).node.flag & LYS_MIN_ELEMENTS) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_list, "min-elements", "list"); YYABORT; } (yyvsp[-1].nodes).node.ptr_list->min = (yyvsp[0].uint); (yyvsp[-1].nodes).node.flag |= LYS_MIN_ELEMENTS; (yyval.nodes) = (yyvsp[-1].nodes); if ((yyvsp[-1].nodes).node.ptr_list->max && ((yyvsp[-1].nodes).node.ptr_list->min > (yyvsp[-1].nodes).node.ptr_list->max)) { LOGVAL(trg->ctx, LYE_SPEC, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_list, "Invalid value \"%d\" of \"%s\".", (yyvsp[0].uint), "min-elements"); LOGVAL(trg->ctx, LYE_SPEC, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_list, "\"min-elements\" is bigger than \"max-elements\"."); YYABORT; } } break; case 324: { if ((yyvsp[-1].nodes).node.flag & LYS_MAX_ELEMENTS) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_list, "max-elements", "list"); YYABORT; } (yyvsp[-1].nodes).node.ptr_list->max = (yyvsp[0].uint); (yyvsp[-1].nodes).node.flag |= LYS_MAX_ELEMENTS; (yyval.nodes) = (yyvsp[-1].nodes); if ((yyvsp[-1].nodes).node.ptr_list->min > (yyvsp[-1].nodes).node.ptr_list->max) { LOGVAL(trg->ctx, LYE_SPEC, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_list, "Invalid value \"%d\" of \"%s\".", (yyvsp[0].uint), "min-elements"); LOGVAL(trg->ctx, LYE_SPEC, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_list, "\"max-elements\" is smaller than \"min-elements\"."); YYABORT; } } break; case 325: { if ((yyvsp[-1].nodes).node.flag & LYS_ORDERED_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_list, "ordered by", "list"); YYABORT; } if ((yyvsp[0].i) & LYS_USERORDERED) { (yyvsp[-1].nodes).node.ptr_list->flags |= LYS_USERORDERED; } (yyvsp[-1].nodes).node.flag |= (yyvsp[0].i); (yyval.nodes) = (yyvsp[-1].nodes); } break; case 326: { if ((yyvsp[-1].nodes).node.ptr_list->flags & LYS_STATUS_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_list, "status", "list"); YYABORT; } (yyvsp[-1].nodes).node.ptr_list->flags |= (yyvsp[0].i); } break; case 327: { if (yang_read_description(trg, (yyvsp[-1].nodes).node.ptr_list, s, "list", NODE_PRINT)) { YYABORT; } s = NULL; } break; case 328: { if (yang_read_reference(trg, (yyvsp[-1].nodes).node.ptr_list, s, "list", NODE_PRINT)) { YYABORT; } s = NULL; } break; case 332: { if (trg->version < 2) { LOGVAL(trg->ctx, LYE_INSTMT, LY_VLOG_LYS, (yyvsp[-2].nodes).node.ptr_list, "notification"); YYABORT; } } break; case 334: { (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; if (!(actual = yang_read_node(trg, actual, param->node, s, LYS_CHOICE, sizeof(struct lys_node_choice)))) { YYABORT; } data_node = actual; s = NULL; actual_type = CHOICE_KEYWORD; } break; case 335: { LOGDBG(LY_LDGYANG, "finished parsing choice statement \"%s\"", data_node->name); actual_type = (yyvsp[-1].backup_token).token; actual = (yyvsp[-1].backup_token).actual; data_node = (yyvsp[-1].backup_token).actual; } break; case 337: { struct lys_iffeature *tmp; if (((yyvsp[-1].nodes).node.ptr_choice->flags & LYS_MAND_TRUE) && (yyvsp[-1].nodes).node.ptr_choice->dflt) { LOGVAL(trg->ctx, LYE_INCHILDSTMT, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_choice, "default", "choice"); LOGVAL(trg->ctx, LYE_SPEC, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_choice, "The \"default\" statement is forbidden on choices with \"mandatory\"."); YYABORT; } if ((yyvsp[-1].nodes).node.ptr_choice->iffeature_size) { tmp = realloc((yyvsp[-1].nodes).node.ptr_choice->iffeature, (yyvsp[-1].nodes).node.ptr_choice->iffeature_size * sizeof *tmp); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } (yyvsp[-1].nodes).node.ptr_choice->iffeature = tmp; } } break; case 338: { (yyval.nodes).node.ptr_choice = actual; (yyval.nodes).node.flag = 0; } break; case 341: { if ((yyvsp[-1].nodes).node.flag & LYS_CHOICE_DEFAULT) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_choice, "default", "choice"); free(s); YYABORT; } (yyvsp[-1].nodes).node.ptr_choice->dflt = (struct lys_node *) s; s = NULL; (yyval.nodes) = (yyvsp[-1].nodes); (yyval.nodes).node.flag |= LYS_CHOICE_DEFAULT; } break; case 342: { if ((yyvsp[-1].nodes).node.ptr_choice->flags & LYS_CONFIG_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_choice, "config", "choice"); YYABORT; } (yyvsp[-1].nodes).node.ptr_choice->flags |= (yyvsp[0].i); (yyval.nodes) = (yyvsp[-1].nodes); } break; case 343: { if ((yyvsp[-1].nodes).node.ptr_choice->flags & LYS_MAND_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_choice, "mandatory", "choice"); YYABORT; } (yyvsp[-1].nodes).node.ptr_choice->flags |= (yyvsp[0].i); (yyval.nodes) = (yyvsp[-1].nodes); } break; case 344: { if ((yyvsp[-1].nodes).node.ptr_choice->flags & LYS_STATUS_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_choice, "status", "choice"); YYABORT; } (yyvsp[-1].nodes).node.ptr_choice->flags |= (yyvsp[0].i); (yyval.nodes) = (yyvsp[-1].nodes); } break; case 345: { if (yang_read_description(trg, (yyvsp[-1].nodes).node.ptr_choice, s, "choice", NODE_PRINT)) { YYABORT; } s = NULL; (yyval.nodes) = (yyvsp[-1].nodes); } break; case 346: { if (yang_read_reference(trg, (yyvsp[-1].nodes).node.ptr_choice, s, "choice", NODE_PRINT)) { YYABORT; } s = NULL; (yyval.nodes) = (yyvsp[-1].nodes); } break; case 356: { if (trg->version < 2 ) { LOGVAL(trg->ctx, LYE_INSTMT, LY_VLOG_LYS, actual, "choice"); YYABORT; } } break; case 357: { (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; if (!(actual = yang_read_node(trg, actual, param->node, s, LYS_CASE, sizeof(struct lys_node_case)))) { YYABORT; } data_node = actual; s = NULL; actual_type = CASE_KEYWORD; } break; case 358: { LOGDBG(LY_LDGYANG, "finished parsing case statement \"%s\"", data_node->name); actual_type = (yyvsp[-1].backup_token).token; actual = (yyvsp[-1].backup_token).actual; data_node = (yyvsp[-1].backup_token).actual; } break; case 360: { struct lys_iffeature *tmp; if ((yyvsp[-1].nodes).cs->iffeature_size) { tmp = realloc((yyvsp[-1].nodes).cs->iffeature, (yyvsp[-1].nodes).cs->iffeature_size * sizeof *tmp); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } (yyvsp[-1].nodes).cs->iffeature = tmp; } } break; case 361: { (yyval.nodes).cs = actual; } break; case 364: { if ((yyvsp[-1].nodes).cs->flags & LYS_STATUS_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_LYS, (yyvsp[-1].nodes).cs, "status", "case"); YYABORT; } (yyvsp[-1].nodes).cs->flags |= (yyvsp[0].i); } break; case 365: { if (yang_read_description(trg, (yyvsp[-1].nodes).cs, s, "case", NODE_PRINT)) { YYABORT; } s = NULL; } break; case 366: { if (yang_read_reference(trg, (yyvsp[-1].nodes).cs, s, "case", NODE_PRINT)) { YYABORT; } s = NULL; } break; case 368: { (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; if (!(actual = yang_read_node(trg, actual, param->node, s, LYS_ANYXML, sizeof(struct lys_node_anydata)))) { YYABORT; } data_node = actual; s = NULL; actual_type = ANYXML_KEYWORD; } break; case 369: { LOGDBG(LY_LDGYANG, "finished parsing anyxml statement \"%s\"", data_node->name); actual_type = (yyvsp[-1].backup_token).token; actual = (yyvsp[-1].backup_token).actual; data_node = (yyvsp[-1].backup_token).actual; } break; case 370: { (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; if (!(actual = yang_read_node(trg, actual, param->node, s, LYS_ANYDATA, sizeof(struct lys_node_anydata)))) { YYABORT; } data_node = actual; s = NULL; actual_type = ANYDATA_KEYWORD; } break; case 371: { LOGDBG(LY_LDGYANG, "finished parsing anydata statement \"%s\"", data_node->name); actual_type = (yyvsp[-1].backup_token).token; actual = (yyvsp[-1].backup_token).actual; data_node = (yyvsp[-1].backup_token).actual; } break; case 373: { void *tmp; if ((yyvsp[-1].nodes).node.ptr_anydata->iffeature_size) { tmp = realloc((yyvsp[-1].nodes).node.ptr_anydata->iffeature, (yyvsp[-1].nodes).node.ptr_anydata->iffeature_size * sizeof *(yyvsp[-1].nodes).node.ptr_anydata->iffeature); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } (yyvsp[-1].nodes).node.ptr_anydata->iffeature = tmp; } if ((yyvsp[-1].nodes).node.ptr_anydata->must_size) { tmp = realloc((yyvsp[-1].nodes).node.ptr_anydata->must, (yyvsp[-1].nodes).node.ptr_anydata->must_size * sizeof *(yyvsp[-1].nodes).node.ptr_anydata->must); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } (yyvsp[-1].nodes).node.ptr_anydata->must = tmp; } } break; case 374: { (yyval.nodes).node.ptr_anydata = actual; (yyval.nodes).node.flag = actual_type; } break; case 378: { if ((yyvsp[-1].nodes).node.ptr_anydata->flags & LYS_CONFIG_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_anydata, "config", ((yyvsp[-1].nodes).node.flag == ANYXML_KEYWORD) ? "anyxml" : "anydata"); YYABORT; } (yyvsp[-1].nodes).node.ptr_anydata->flags |= (yyvsp[0].i); } break; case 379: { if ((yyvsp[-1].nodes).node.ptr_anydata->flags & LYS_MAND_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_anydata, "mandatory", ((yyvsp[-1].nodes).node.flag == ANYXML_KEYWORD) ? "anyxml" : "anydata"); YYABORT; } (yyvsp[-1].nodes).node.ptr_anydata->flags |= (yyvsp[0].i); } break; case 380: { if ((yyvsp[-1].nodes).node.ptr_anydata->flags & LYS_STATUS_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_anydata, "status", ((yyvsp[-1].nodes).node.flag == ANYXML_KEYWORD) ? "anyxml" : "anydata"); YYABORT; } (yyvsp[-1].nodes).node.ptr_anydata->flags |= (yyvsp[0].i); } break; case 381: { if (yang_read_description(trg, (yyvsp[-1].nodes).node.ptr_anydata, s, ((yyvsp[-1].nodes).node.flag == ANYXML_KEYWORD) ? "anyxml" : "anydata", NODE_PRINT)) { YYABORT; } s = NULL; } break; case 382: { if (yang_read_reference(trg, (yyvsp[-1].nodes).node.ptr_anydata, s, ((yyvsp[-1].nodes).node.flag == ANYXML_KEYWORD) ? "anyxml" : "anydata", NODE_PRINT)) { YYABORT; } s = NULL; } break; case 383: { (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; if (!(actual = yang_read_node(trg, actual, param->node, s, LYS_USES, sizeof(struct lys_node_uses)))) { YYABORT; } data_node = actual; s = NULL; actual_type = USES_KEYWORD; } break; case 384: { LOGDBG(LY_LDGYANG, "finished parsing uses statement \"%s\"", data_node->name); actual_type = (yyvsp[-1].backup_token).token; actual = (yyvsp[-1].backup_token).actual; data_node = (yyvsp[-1].backup_token).actual; } break; case 386: { void *tmp; if ((yyvsp[-1].nodes).uses->iffeature_size) { tmp = realloc((yyvsp[-1].nodes).uses->iffeature, (yyvsp[-1].nodes).uses->iffeature_size * sizeof *(yyvsp[-1].nodes).uses->iffeature); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } (yyvsp[-1].nodes).uses->iffeature = tmp; } if ((yyvsp[-1].nodes).uses->refine_size) { tmp = realloc((yyvsp[-1].nodes).uses->refine, (yyvsp[-1].nodes).uses->refine_size * sizeof *(yyvsp[-1].nodes).uses->refine); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } (yyvsp[-1].nodes).uses->refine = tmp; } if ((yyvsp[-1].nodes).uses->augment_size) { tmp = realloc((yyvsp[-1].nodes).uses->augment, (yyvsp[-1].nodes).uses->augment_size * sizeof *(yyvsp[-1].nodes).uses->augment); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } (yyvsp[-1].nodes).uses->augment = tmp; } } break; case 387: { (yyval.nodes).uses = actual; } break; case 390: { if ((yyvsp[-1].nodes).uses->flags & LYS_STATUS_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_LYS, (yyvsp[-1].nodes).uses, "status", "uses"); YYABORT; } (yyvsp[-1].nodes).uses->flags |= (yyvsp[0].i); } break; case 391: { if (yang_read_description(trg, (yyvsp[-1].nodes).uses, s, "uses", NODE_PRINT)) { YYABORT; } s = NULL; } break; case 392: { if (yang_read_reference(trg, (yyvsp[-1].nodes).uses, s, "uses", NODE_PRINT)) { YYABORT; } s = NULL; } break; case 397: { (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; YANG_ADDELEM(((struct lys_node_uses *)actual)->refine, ((struct lys_node_uses *)actual)->refine_size, "refines"); ((struct lys_refine *)actual)->target_name = transform_schema2json(trg, s); free(s); s = NULL; if (!((struct lys_refine *)actual)->target_name) { YYABORT; } actual_type = REFINE_KEYWORD; } break; case 398: { actual_type = (yyvsp[-1].backup_token).token; actual = (yyvsp[-1].backup_token).actual; } break; case 400: { void *tmp; if ((yyvsp[-1].nodes).refine->iffeature_size) { tmp = realloc((yyvsp[-1].nodes).refine->iffeature, (yyvsp[-1].nodes).refine->iffeature_size * sizeof *(yyvsp[-1].nodes).refine->iffeature); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } (yyvsp[-1].nodes).refine->iffeature = tmp; } if ((yyvsp[-1].nodes).refine->must_size) { tmp = realloc((yyvsp[-1].nodes).refine->must, (yyvsp[-1].nodes).refine->must_size * sizeof *(yyvsp[-1].nodes).refine->must); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } (yyvsp[-1].nodes).refine->must = tmp; } if ((yyvsp[-1].nodes).refine->dflt_size) { tmp = realloc((yyvsp[-1].nodes).refine->dflt, (yyvsp[-1].nodes).refine->dflt_size * sizeof *(yyvsp[-1].nodes).refine->dflt); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } (yyvsp[-1].nodes).refine->dflt = tmp; } } break; case 401: { (yyval.nodes).refine = actual; actual_type = REFINE_KEYWORD; } break; case 402: { actual = (yyvsp[-2].nodes).refine; actual_type = REFINE_KEYWORD; if ((yyvsp[-2].nodes).refine->target_type) { if ((yyvsp[-2].nodes).refine->target_type & (LYS_LEAF | LYS_LIST | LYS_LEAFLIST | LYS_CONTAINER | LYS_ANYXML)) { (yyvsp[-2].nodes).refine->target_type &= (LYS_LEAF | LYS_LIST | LYS_LEAFLIST | LYS_CONTAINER | LYS_ANYXML); } else { LOGVAL(trg->ctx, LYE_MISSCHILDSTMT, LY_VLOG_NONE, NULL, "must", "refine"); LOGVAL(trg->ctx, LYE_SPEC, LY_VLOG_NONE, NULL, "Invalid refine target nodetype for the substatements."); YYABORT; } } else { (yyvsp[-2].nodes).refine->target_type = LYS_LEAF | LYS_LIST | LYS_LEAFLIST | LYS_CONTAINER | LYS_ANYXML; } } break; case 403: { /* leaf, leaf-list, list, container or anyxml */ /* check possibility of statements combination */ if ((yyvsp[-2].nodes).refine->target_type) { if ((yyvsp[-2].nodes).refine->target_type & (LYS_LEAF | LYS_LIST | LYS_LEAFLIST | LYS_CONTAINER | LYS_ANYDATA)) { (yyvsp[-2].nodes).refine->target_type &= (LYS_LEAF | LYS_LIST | LYS_LEAFLIST | LYS_CONTAINER | LYS_ANYDATA); } else { free(s); LOGVAL(trg->ctx, LYE_MISSCHILDSTMT, LY_VLOG_NONE, NULL, "if-feature", "refine"); LOGVAL(trg->ctx, LYE_SPEC, LY_VLOG_NONE, NULL, "Invalid refine target nodetype for the substatements."); YYABORT; } } else { (yyvsp[-2].nodes).refine->target_type = LYS_LEAF | LYS_LIST | LYS_LEAFLIST | LYS_CONTAINER | LYS_ANYDATA; } } break; case 404: { if ((yyvsp[-1].nodes).refine->target_type) { if ((yyvsp[-1].nodes).refine->target_type & LYS_CONTAINER) { if ((yyvsp[-1].nodes).refine->mod.presence) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_NONE, NULL, "presence", "refine"); free(s); YYABORT; } (yyvsp[-1].nodes).refine->target_type = LYS_CONTAINER; (yyvsp[-1].nodes).refine->mod.presence = lydict_insert_zc(trg->ctx, s); } else { free(s); LOGVAL(trg->ctx, LYE_MISSCHILDSTMT, LY_VLOG_NONE, NULL, "presence", "refine"); LOGVAL(trg->ctx, LYE_SPEC, LY_VLOG_NONE, NULL, "Invalid refine target nodetype for the substatements."); YYABORT; } } else { (yyvsp[-1].nodes).refine->target_type = LYS_CONTAINER; (yyvsp[-1].nodes).refine->mod.presence = lydict_insert_zc(trg->ctx, s); } s = NULL; (yyval.nodes) = (yyvsp[-1].nodes); } break; case 405: { int i; if ((yyvsp[-1].nodes).refine->dflt_size) { if (trg->version < 2) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_NONE, NULL, "default", "refine"); YYABORT; } if ((yyvsp[-1].nodes).refine->target_type & LYS_LEAFLIST) { (yyvsp[-1].nodes).refine->target_type = LYS_LEAFLIST; } else { free(s); LOGVAL(trg->ctx, LYE_MISSCHILDSTMT, LY_VLOG_NONE, NULL, "default", "refine"); LOGVAL(trg->ctx, LYE_SPEC, LY_VLOG_NONE, NULL, "Invalid refine target nodetype for the substatements."); YYABORT; } } else { if ((yyvsp[-1].nodes).refine->target_type) { if (trg->version < 2 && ((yyvsp[-1].nodes).refine->target_type & (LYS_LEAF | LYS_CHOICE))) { (yyvsp[-1].nodes).refine->target_type &= (LYS_LEAF | LYS_CHOICE); } if (trg->version > 1 && ((yyvsp[-1].nodes).refine->target_type & (LYS_LEAF | LYS_LEAFLIST | LYS_CHOICE))) { /* YANG 1.1 */ (yyvsp[-1].nodes).refine->target_type &= (LYS_LEAF | LYS_LEAFLIST | LYS_CHOICE); } else { free(s); LOGVAL(trg->ctx, LYE_MISSCHILDSTMT, LY_VLOG_NONE, NULL, "default", "refine"); LOGVAL(trg->ctx, LYE_SPEC, LY_VLOG_NONE, NULL, "Invalid refine target nodetype for the substatements."); YYABORT; } } else { if (trg->version < 2) { (yyvsp[-1].nodes).refine->target_type = LYS_LEAF | LYS_CHOICE; } else { /* YANG 1.1 */ (yyvsp[-1].nodes).refine->target_type = LYS_LEAF | LYS_LEAFLIST | LYS_CHOICE; } } } /* check for duplicity */ for (i = 0; i < (yyvsp[-1].nodes).refine->dflt_size; ++i) { if (ly_strequal((yyvsp[-1].nodes).refine->dflt[i], s, 0)) { LOGVAL(trg->ctx, LYE_INARG, LY_VLOG_NONE, NULL, s, "default"); LOGVAL(trg->ctx, LYE_SPEC, LY_VLOG_NONE, NULL, "Duplicated default value \"%s\".", s); YYABORT; } } YANG_ADDELEM((yyvsp[-1].nodes).refine->dflt, (yyvsp[-1].nodes).refine->dflt_size, "defaults"); *((const char **)actual) = lydict_insert_zc(trg->ctx, s); actual = (yyvsp[-1].nodes).refine; s = NULL; (yyval.nodes) = (yyvsp[-1].nodes); } break; case 406: { if ((yyvsp[-1].nodes).refine->target_type) { if ((yyvsp[-1].nodes).refine->target_type & (LYS_LEAF | LYS_CHOICE | LYS_LIST | LYS_CONTAINER | LYS_LEAFLIST)) { (yyvsp[-1].nodes).refine->target_type &= (LYS_LEAF | LYS_CHOICE | LYS_LIST | LYS_CONTAINER | LYS_LEAFLIST); if ((yyvsp[-1].nodes).refine->flags & LYS_CONFIG_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_NONE, NULL, "config", "refine"); YYABORT; } (yyvsp[-1].nodes).refine->flags |= (yyvsp[0].i); } else { LOGVAL(trg->ctx, LYE_MISSCHILDSTMT, LY_VLOG_NONE, NULL, "config", "refine"); LOGVAL(trg->ctx, LYE_SPEC, LY_VLOG_NONE, NULL, "Invalid refine target nodetype for the substatements."); YYABORT; } } else { (yyvsp[-1].nodes).refine->target_type = LYS_LEAF | LYS_CHOICE | LYS_LIST | LYS_CONTAINER | LYS_LEAFLIST; (yyvsp[-1].nodes).refine->flags |= (yyvsp[0].i); } (yyval.nodes) = (yyvsp[-1].nodes); } break; case 407: { if ((yyvsp[-1].nodes).refine->target_type) { if ((yyvsp[-1].nodes).refine->target_type & (LYS_LEAF | LYS_CHOICE | LYS_ANYXML)) { (yyvsp[-1].nodes).refine->target_type &= (LYS_LEAF | LYS_CHOICE | LYS_ANYXML); if ((yyvsp[-1].nodes).refine->flags & LYS_MAND_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_NONE, NULL, "mandatory", "refine"); YYABORT; } (yyvsp[-1].nodes).refine->flags |= (yyvsp[0].i); } else { LOGVAL(trg->ctx, LYE_MISSCHILDSTMT, LY_VLOG_NONE, NULL, "mandatory", "refine"); LOGVAL(trg->ctx, LYE_SPEC, LY_VLOG_NONE, NULL, "Invalid refine target nodetype for the substatements."); YYABORT; } } else { (yyvsp[-1].nodes).refine->target_type = LYS_LEAF | LYS_CHOICE | LYS_ANYXML; (yyvsp[-1].nodes).refine->flags |= (yyvsp[0].i); } (yyval.nodes) = (yyvsp[-1].nodes); } break; case 408: { if ((yyvsp[-1].nodes).refine->target_type) { if ((yyvsp[-1].nodes).refine->target_type & (LYS_LIST | LYS_LEAFLIST)) { (yyvsp[-1].nodes).refine->target_type &= (LYS_LIST | LYS_LEAFLIST); if ((yyvsp[-1].nodes).refine->flags & LYS_RFN_MINSET) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_NONE, NULL, "min-elements", "refine"); YYABORT; } (yyvsp[-1].nodes).refine->flags |= LYS_RFN_MINSET; (yyvsp[-1].nodes).refine->mod.list.min = (yyvsp[0].uint); } else { LOGVAL(trg->ctx, LYE_MISSCHILDSTMT, LY_VLOG_NONE, NULL, "min-elements", "refine"); LOGVAL(trg->ctx, LYE_SPEC, LY_VLOG_NONE, NULL, "Invalid refine target nodetype for the substatements."); YYABORT; } } else { (yyvsp[-1].nodes).refine->target_type = LYS_LIST | LYS_LEAFLIST; (yyvsp[-1].nodes).refine->flags |= LYS_RFN_MINSET; (yyvsp[-1].nodes).refine->mod.list.min = (yyvsp[0].uint); } (yyval.nodes) = (yyvsp[-1].nodes); } break; case 409: { if ((yyvsp[-1].nodes).refine->target_type) { if ((yyvsp[-1].nodes).refine->target_type & (LYS_LIST | LYS_LEAFLIST)) { (yyvsp[-1].nodes).refine->target_type &= (LYS_LIST | LYS_LEAFLIST); if ((yyvsp[-1].nodes).refine->flags & LYS_RFN_MAXSET) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_NONE, NULL, "max-elements", "refine"); YYABORT; } (yyvsp[-1].nodes).refine->flags |= LYS_RFN_MAXSET; (yyvsp[-1].nodes).refine->mod.list.max = (yyvsp[0].uint); } else { LOGVAL(trg->ctx, LYE_MISSCHILDSTMT, LY_VLOG_NONE, NULL, "max-elements", "refine"); LOGVAL(trg->ctx, LYE_SPEC, LY_VLOG_NONE, NULL, "Invalid refine target nodetype for the substatements."); YYABORT; } } else { (yyvsp[-1].nodes).refine->target_type = LYS_LIST | LYS_LEAFLIST; (yyvsp[-1].nodes).refine->flags |= LYS_RFN_MAXSET; (yyvsp[-1].nodes).refine->mod.list.max = (yyvsp[0].uint); } (yyval.nodes) = (yyvsp[-1].nodes); } break; case 410: { if (yang_read_description(trg, (yyvsp[-1].nodes).refine, s, "refine", NODE)) { YYABORT; } s = NULL; } break; case 411: { if (yang_read_reference(trg, (yyvsp[-1].nodes).refine, s, "refine", NODE)) { YYABORT; } s = NULL; } break; case 414: { void *parent; (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; parent = actual; YANG_ADDELEM(((struct lys_node_uses *)actual)->augment, ((struct lys_node_uses *)actual)->augment_size, "augments"); if (yang_read_augment(trg, parent, actual, s)) { YYABORT; } data_node = actual; s = NULL; actual_type = AUGMENT_KEYWORD; } break; case 415: { LOGDBG(LY_LDGYANG, "finished parsing augment statement \"%s\"", data_node->name); actual_type = (yyvsp[-4].backup_token).token; actual = (yyvsp[-4].backup_token).actual; data_node = (yyvsp[-4].backup_token).actual; } break; case 418: { (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; YANG_ADDELEM(trg->augment, trg->augment_size, "augments"); if (yang_read_augment(trg, NULL, actual, s)) { YYABORT; } data_node = actual; s = NULL; actual_type = AUGMENT_KEYWORD; } break; case 419: { LOGDBG(LY_LDGYANG, "finished parsing augment statement \"%s\"", data_node->name); actual_type = (yyvsp[-4].backup_token).token; actual = (yyvsp[-4].backup_token).actual; data_node = (yyvsp[-4].backup_token).actual; } break; case 420: { (yyval.nodes).augment = actual; } break; case 423: { if ((yyvsp[-1].nodes).augment->flags & LYS_STATUS_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_LYS, (yyvsp[-1].nodes).augment, "status", "augment"); YYABORT; } (yyvsp[-1].nodes).augment->flags |= (yyvsp[0].i); } break; case 424: { if (yang_read_description(trg, (yyvsp[-1].nodes).augment, s, "augment", NODE_PRINT)) { YYABORT; } s = NULL; } break; case 425: { if (yang_read_reference(trg, (yyvsp[-1].nodes).augment, s, "augment", NODE_PRINT)) { YYABORT; } s = NULL; } break; case 428: { if (trg->version < 2) { LOGVAL(trg->ctx, LYE_INSTMT, LY_VLOG_LYS, (yyvsp[-2].nodes).augment, "notification"); YYABORT; } } break; case 430: { if (param->module->version != 2) { LOGVAL(trg->ctx, LYE_INSTMT, LY_VLOG_LYS, actual, "action"); free(s); YYABORT; } (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; if (!(actual = yang_read_node(trg, actual, param->node, s, LYS_ACTION, sizeof(struct lys_node_rpc_action)))) { YYABORT; } data_node = actual; s = NULL; actual_type = ACTION_KEYWORD; } break; case 431: { LOGDBG(LY_LDGYANG, "finished parsing action statement \"%s\"", data_node->name); actual_type = (yyvsp[-1].backup_token).token; actual = (yyvsp[-1].backup_token).actual; data_node = (yyvsp[-1].backup_token).actual; } break; case 432: { (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; if (!(actual = yang_read_node(trg, NULL, param->node, s, LYS_RPC, sizeof(struct lys_node_rpc_action)))) { YYABORT; } data_node = actual; s = NULL; actual_type = RPC_KEYWORD; } break; case 433: { LOGDBG(LY_LDGYANG, "finished parsing rpc statement \"%s\"", data_node->name); actual_type = (yyvsp[-1].backup_token).token; actual = (yyvsp[-1].backup_token).actual; data_node = (yyvsp[-1].backup_token).actual; } break; case 435: { void *tmp; if ((yyvsp[-1].nodes).node.ptr_rpc->iffeature_size) { tmp = realloc((yyvsp[-1].nodes).node.ptr_rpc->iffeature, (yyvsp[-1].nodes).node.ptr_rpc->iffeature_size * sizeof *(yyvsp[-1].nodes).node.ptr_rpc->iffeature); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } (yyvsp[-1].nodes).node.ptr_rpc->iffeature = tmp; } if ((yyvsp[-1].nodes).node.ptr_rpc->tpdf_size) { tmp = realloc((yyvsp[-1].nodes).node.ptr_rpc->tpdf, (yyvsp[-1].nodes).node.ptr_rpc->tpdf_size * sizeof *(yyvsp[-1].nodes).node.ptr_rpc->tpdf); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } (yyvsp[-1].nodes).node.ptr_rpc->tpdf = tmp; } } break; case 436: { (yyval.nodes).node.ptr_rpc = actual; (yyval.nodes).node.flag = 0; } break; case 438: { if ((yyvsp[-1].nodes).node.ptr_rpc->flags & LYS_STATUS_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_LYS, (yyvsp[-1].nodes).node.ptr_rpc, "status", "rpc"); YYABORT; } (yyvsp[-1].nodes).node.ptr_rpc->flags |= (yyvsp[0].i); } break; case 439: { if (yang_read_description(trg, (yyvsp[-1].nodes).node.ptr_rpc, s, "rpc", NODE_PRINT)) { YYABORT; } s = NULL; } break; case 440: { if (yang_read_reference(trg, (yyvsp[-1].nodes).node.ptr_rpc, s, "rpc", NODE_PRINT)) { YYABORT; } s = NULL; } break; case 443: { if ((yyvsp[-2].nodes).node.flag & LYS_RPC_INPUT) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_LYS, (yyvsp[-2].nodes).node.ptr_rpc, "input", "rpc"); YYABORT; } (yyvsp[-2].nodes).node.flag |= LYS_RPC_INPUT; (yyval.nodes) = (yyvsp[-2].nodes); } break; case 444: { if ((yyvsp[-2].nodes).node.flag & LYS_RPC_OUTPUT) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_LYS, (yyvsp[-2].nodes).node.ptr_rpc, "output", "rpc"); YYABORT; } (yyvsp[-2].nodes).node.flag |= LYS_RPC_OUTPUT; (yyval.nodes) = (yyvsp[-2].nodes); } break; case 445: { (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; s = strdup("input"); if (!s) { LOGMEM(trg->ctx); YYABORT; } if (!(actual = yang_read_node(trg, actual, param->node, s, LYS_INPUT, sizeof(struct lys_node_inout)))) { YYABORT; } data_node = actual; s = NULL; actual_type = INPUT_KEYWORD; } break; case 446: { void *tmp; struct lys_node_inout *input = actual; if (input->must_size) { tmp = realloc(input->must, input->must_size * sizeof *input->must); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } input->must = tmp; } if (input->tpdf_size) { tmp = realloc(input->tpdf, input->tpdf_size * sizeof *input->tpdf); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } input->tpdf = tmp; } LOGDBG(LY_LDGYANG, "finished parsing input statement \"%s\"", data_node->name); actual_type = (yyvsp[-4].backup_token).token; actual = (yyvsp[-4].backup_token).actual; data_node = (yyvsp[-4].backup_token).actual; } break; case 452: { (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; s = strdup("output"); if (!s) { LOGMEM(trg->ctx); YYABORT; } if (!(actual = yang_read_node(trg, actual, param->node, s, LYS_OUTPUT, sizeof(struct lys_node_inout)))) { YYABORT; } data_node = actual; s = NULL; actual_type = OUTPUT_KEYWORD; } break; case 453: { void *tmp; struct lys_node_inout *output = actual; if (output->must_size) { tmp = realloc(output->must, output->must_size * sizeof *output->must); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } output->must = tmp; } if (output->tpdf_size) { tmp = realloc(output->tpdf, output->tpdf_size * sizeof *output->tpdf); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } output->tpdf = tmp; } LOGDBG(LY_LDGYANG, "finished parsing output statement \"%s\"", data_node->name); actual_type = (yyvsp[-4].backup_token).token; actual = (yyvsp[-4].backup_token).actual; data_node = (yyvsp[-4].backup_token).actual; } break; case 454: { (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; if (!(actual = yang_read_node(trg, actual, param->node, s, LYS_NOTIF, sizeof(struct lys_node_notif)))) { YYABORT; } data_node = actual; actual_type = NOTIFICATION_KEYWORD; } break; case 455: { LOGDBG(LY_LDGYANG, "finished parsing notification statement \"%s\"", data_node->name); actual_type = (yyvsp[-1].backup_token).token; actual = (yyvsp[-1].backup_token).actual; data_node = (yyvsp[-1].backup_token).actual; } break; case 457: { void *tmp; if ((yyvsp[-1].nodes).notif->must_size) { tmp = realloc((yyvsp[-1].nodes).notif->must, (yyvsp[-1].nodes).notif->must_size * sizeof *(yyvsp[-1].nodes).notif->must); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } (yyvsp[-1].nodes).notif->must = tmp; } if ((yyvsp[-1].nodes).notif->iffeature_size) { tmp = realloc((yyvsp[-1].nodes).notif->iffeature, (yyvsp[-1].nodes).notif->iffeature_size * sizeof *(yyvsp[-1].nodes).notif->iffeature); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } (yyvsp[-1].nodes).notif->iffeature = tmp; } if ((yyvsp[-1].nodes).notif->tpdf_size) { tmp = realloc((yyvsp[-1].nodes).notif->tpdf, (yyvsp[-1].nodes).notif->tpdf_size * sizeof *(yyvsp[-1].nodes).notif->tpdf); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } (yyvsp[-1].nodes).notif->tpdf = tmp; } } break; case 458: { (yyval.nodes).notif = actual; } break; case 461: { if ((yyvsp[-1].nodes).notif->flags & LYS_STATUS_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_LYS, (yyvsp[-1].nodes).notif, "status", "notification"); YYABORT; } (yyvsp[-1].nodes).notif->flags |= (yyvsp[0].i); } break; case 462: { if (yang_read_description(trg, (yyvsp[-1].nodes).notif, s, "notification", NODE_PRINT)) { YYABORT; } s = NULL; } break; case 463: { if (yang_read_reference(trg, (yyvsp[-1].nodes).notif, s, "notification", NODE_PRINT)) { YYABORT; } s = NULL; } break; case 467: { (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; YANG_ADDELEM(trg->deviation, trg->deviation_size, "deviations"); ((struct lys_deviation *)actual)->target_name = transform_schema2json(trg, s); free(s); if (!((struct lys_deviation *)actual)->target_name) { YYABORT; } s = NULL; actual_type = DEVIATION_KEYWORD; } break; case 468: { void *tmp; if ((yyvsp[-1].dev)->deviate_size) { tmp = realloc((yyvsp[-1].dev)->deviate, (yyvsp[-1].dev)->deviate_size * sizeof *(yyvsp[-1].dev)->deviate); if (!tmp) { LOGINT(trg->ctx); YYABORT; } (yyvsp[-1].dev)->deviate = tmp; } else { LOGVAL(trg->ctx, LYE_MISSCHILDSTMT, LY_VLOG_NONE, NULL, "deviate", "deviation"); YYABORT; } actual_type = (yyvsp[-4].backup_token).token; actual = (yyvsp[-4].backup_token).actual; } break; case 469: { (yyval.dev) = actual; } break; case 470: { if (yang_read_description(trg, (yyvsp[-1].dev), s, "deviation", NODE)) { YYABORT; } s = NULL; (yyval.dev) = (yyvsp[-1].dev); } break; case 471: { if (yang_read_reference(trg, (yyvsp[-1].dev), s, "deviation", NODE)) { YYABORT; } s = NULL; (yyval.dev) = (yyvsp[-1].dev); } break; case 477: { (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; if (!(actual = yang_read_deviate_unsupported(trg->ctx, actual))) { YYABORT; } actual_type = NOT_SUPPORTED_KEYWORD; } break; case 478: { actual_type = (yyvsp[-2].backup_token).token; actual = (yyvsp[-2].backup_token).actual; } break; case 484: { (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; if (!(actual = yang_read_deviate(trg->ctx, actual, LY_DEVIATE_ADD))) { YYABORT; } actual_type = ADD_KEYWORD; } break; case 485: { actual_type = (yyvsp[-2].backup_token).token; actual = (yyvsp[-2].backup_token).actual; } break; case 487: { void *tmp; if ((yyvsp[-1].deviate)->must_size) { tmp = realloc((yyvsp[-1].deviate)->must, (yyvsp[-1].deviate)->must_size * sizeof *(yyvsp[-1].deviate)->must); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } (yyvsp[-1].deviate)->must = tmp; } if ((yyvsp[-1].deviate)->unique_size) { tmp = realloc((yyvsp[-1].deviate)->unique, (yyvsp[-1].deviate)->unique_size * sizeof *(yyvsp[-1].deviate)->unique); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } (yyvsp[-1].deviate)->unique = tmp; } if ((yyvsp[-1].deviate)->dflt_size) { tmp = realloc((yyvsp[-1].deviate)->dflt, (yyvsp[-1].deviate)->dflt_size * sizeof *(yyvsp[-1].deviate)->dflt); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } (yyvsp[-1].deviate)->dflt = tmp; } } break; case 488: { (yyval.deviate) = actual; } break; case 489: { if (yang_read_units(trg, actual, s, ADD_KEYWORD)) { YYABORT; } s = NULL; (yyval.deviate) = (yyvsp[-1].deviate); } break; case 491: { YANG_ADDELEM((yyvsp[-1].deviate)->unique, (yyvsp[-1].deviate)->unique_size, "uniques"); ((struct lys_unique *)actual)->expr = (const char **)s; s = NULL; actual = (yyvsp[-1].deviate); (yyval.deviate)= (yyvsp[-1].deviate); } break; case 492: { YANG_ADDELEM((yyvsp[-1].deviate)->dflt, (yyvsp[-1].deviate)->dflt_size, "defaults"); *((const char **)actual) = lydict_insert_zc(trg->ctx, s); s = NULL; actual = (yyvsp[-1].deviate); (yyval.deviate) = (yyvsp[-1].deviate); } break; case 493: { if ((yyvsp[-1].deviate)->flags & LYS_CONFIG_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_NONE, NULL, "config", "deviate"); YYABORT; } (yyvsp[-1].deviate)->flags = (yyvsp[0].i); (yyval.deviate) = (yyvsp[-1].deviate); } break; case 494: { if ((yyvsp[-1].deviate)->flags & LYS_MAND_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_NONE, NULL, "mandatory", "deviate"); YYABORT; } (yyvsp[-1].deviate)->flags = (yyvsp[0].i); (yyval.deviate) = (yyvsp[-1].deviate); } break; case 495: { if ((yyvsp[-1].deviate)->min_set) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_NONE, NULL, "min-elements", "deviation"); YYABORT; } (yyvsp[-1].deviate)->min = (yyvsp[0].uint); (yyvsp[-1].deviate)->min_set = 1; (yyval.deviate) = (yyvsp[-1].deviate); } break; case 496: { if ((yyvsp[-1].deviate)->max_set) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_NONE, NULL, "max-elements", "deviation"); YYABORT; } (yyvsp[-1].deviate)->max = (yyvsp[0].uint); (yyvsp[-1].deviate)->max_set = 1; (yyval.deviate) = (yyvsp[-1].deviate); } break; case 497: { (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; if (!(actual = yang_read_deviate(trg->ctx, actual, LY_DEVIATE_DEL))) { YYABORT; } actual_type = DELETE_KEYWORD; } break; case 498: { actual_type = (yyvsp[-2].backup_token).token; actual = (yyvsp[-2].backup_token).actual; } break; case 500: { void *tmp; if ((yyvsp[-1].deviate)->must_size) { tmp = realloc((yyvsp[-1].deviate)->must, (yyvsp[-1].deviate)->must_size * sizeof *(yyvsp[-1].deviate)->must); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } (yyvsp[-1].deviate)->must = tmp; } if ((yyvsp[-1].deviate)->unique_size) { tmp = realloc((yyvsp[-1].deviate)->unique, (yyvsp[-1].deviate)->unique_size * sizeof *(yyvsp[-1].deviate)->unique); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } (yyvsp[-1].deviate)->unique = tmp; } if ((yyvsp[-1].deviate)->dflt_size) { tmp = realloc((yyvsp[-1].deviate)->dflt, (yyvsp[-1].deviate)->dflt_size * sizeof *(yyvsp[-1].deviate)->dflt); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } (yyvsp[-1].deviate)->dflt = tmp; } } break; case 501: { (yyval.deviate) = actual; } break; case 502: { if (yang_read_units(trg, actual, s, DELETE_KEYWORD)) { YYABORT; } s = NULL; (yyval.deviate) = (yyvsp[-1].deviate); } break; case 504: { YANG_ADDELEM((yyvsp[-1].deviate)->unique, (yyvsp[-1].deviate)->unique_size, "uniques"); ((struct lys_unique *)actual)->expr = (const char **)s; s = NULL; actual = (yyvsp[-1].deviate); (yyval.deviate) = (yyvsp[-1].deviate); } break; case 505: { YANG_ADDELEM((yyvsp[-1].deviate)->dflt, (yyvsp[-1].deviate)->dflt_size, "defaults"); *((const char **)actual) = lydict_insert_zc(trg->ctx, s); s = NULL; actual = (yyvsp[-1].deviate); (yyval.deviate) = (yyvsp[-1].deviate); } break; case 506: { (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; if (!(actual = yang_read_deviate(trg->ctx, actual, LY_DEVIATE_RPL))) { YYABORT; } actual_type = REPLACE_KEYWORD; } break; case 507: { actual_type = (yyvsp[-2].backup_token).token; actual = (yyvsp[-2].backup_token).actual; } break; case 509: { void *tmp; if ((yyvsp[-1].deviate)->dflt_size) { tmp = realloc((yyvsp[-1].deviate)->dflt, (yyvsp[-1].deviate)->dflt_size * sizeof *(yyvsp[-1].deviate)->dflt); if (!tmp) { LOGMEM(trg->ctx); YYABORT; } (yyvsp[-1].deviate)->dflt = tmp; } } break; case 510: { (yyval.deviate) = actual; } break; case 512: { if (yang_read_units(trg, actual, s, DELETE_KEYWORD)) { YYABORT; } s = NULL; (yyval.deviate) = (yyvsp[-1].deviate); } break; case 513: { YANG_ADDELEM((yyvsp[-1].deviate)->dflt, (yyvsp[-1].deviate)->dflt_size, "defaults"); *((const char **)actual) = lydict_insert_zc(trg->ctx, s); s = NULL; actual = (yyvsp[-1].deviate); (yyval.deviate) = (yyvsp[-1].deviate); } break; case 514: { if ((yyvsp[-1].deviate)->flags & LYS_CONFIG_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_NONE, NULL, "config", "deviate"); YYABORT; } (yyvsp[-1].deviate)->flags = (yyvsp[0].i); (yyval.deviate) = (yyvsp[-1].deviate); } break; case 515: { if ((yyvsp[-1].deviate)->flags & LYS_MAND_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_NONE, NULL, "mandatory", "deviate"); YYABORT; } (yyvsp[-1].deviate)->flags = (yyvsp[0].i); (yyval.deviate) = (yyvsp[-1].deviate); } break; case 516: { if ((yyvsp[-1].deviate)->min_set) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_NONE, NULL, "min-elements", "deviation"); YYABORT; } (yyvsp[-1].deviate)->min = (yyvsp[0].uint); (yyvsp[-1].deviate)->min_set = 1; (yyval.deviate) = (yyvsp[-1].deviate); } break; case 517: { if ((yyvsp[-1].deviate)->max_set) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_NONE, NULL, "max-elements", "deviation"); YYABORT; } (yyvsp[-1].deviate)->max = (yyvsp[0].uint); (yyvsp[-1].deviate)->max_set = 1; (yyval.deviate) = (yyvsp[-1].deviate); } break; case 518: { (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; if (!(actual = yang_read_when(trg, actual, actual_type, s))) { YYABORT; } s = NULL; actual_type = WHEN_KEYWORD; } break; case 519: { actual_type = (yyvsp[-1].backup_token).token; actual = (yyvsp[-1].backup_token).actual; } break; case 523: { if (yang_read_description(trg, actual, s, "when", NODE)) { YYABORT; } s = NULL; } break; case 524: { if (yang_read_reference(trg, actual, s, "when", NODE)) { YYABORT; } s = NULL; } break; case 525: { (yyval.i) = (yyvsp[0].i); backup_type = actual_type; actual_type = CONFIG_KEYWORD; } break; case 526: { (yyval.i) = (yyvsp[-1].i); } break; case 527: { (yyval.i) = LYS_CONFIG_W | LYS_CONFIG_SET; } break; case 528: { (yyval.i) = LYS_CONFIG_R | LYS_CONFIG_SET; } break; case 529: { if (!strcmp(s, "true")) { (yyval.i) = LYS_CONFIG_W | LYS_CONFIG_SET; } else if (!strcmp(s, "false")) { (yyval.i) = LYS_CONFIG_R | LYS_CONFIG_SET; } else { LOGVAL(trg->ctx, LYE_INARG, LY_VLOG_NONE, NULL, s, "config"); free(s); YYABORT; } free(s); s = NULL; } break; case 530: { (yyval.i) = (yyvsp[0].i); backup_type = actual_type; actual_type = MANDATORY_KEYWORD; } break; case 531: { (yyval.i) = (yyvsp[-1].i); } break; case 532: { (yyval.i) = LYS_MAND_TRUE; } break; case 533: { (yyval.i) = LYS_MAND_FALSE; } break; case 534: { if (!strcmp(s, "true")) { (yyval.i) = LYS_MAND_TRUE; } else if (!strcmp(s, "false")) { (yyval.i) = LYS_MAND_FALSE; } else { LOGVAL(trg->ctx, LYE_INARG, LY_VLOG_NONE, NULL, s, "mandatory"); free(s); YYABORT; } free(s); s = NULL; } break; case 535: { backup_type = actual_type; actual_type = PRESENCE_KEYWORD; } break; case 537: { (yyval.uint) = (yyvsp[0].uint); backup_type = actual_type; actual_type = MIN_ELEMENTS_KEYWORD; } break; case 538: { (yyval.uint) = (yyvsp[-1].uint); } break; case 539: { (yyval.uint) = (yyvsp[-1].uint); } break; case 540: { if (strlen(s) == 1 && s[0] == '0') { (yyval.uint) = 0; } else { /* convert it to uint32_t */ uint64_t val; char *endptr = NULL; errno = 0; val = strtoul(s, &endptr, 10); if (*endptr || s[0] == '-' || errno || val > UINT32_MAX) { LOGVAL(trg->ctx, LYE_INARG, LY_VLOG_NONE, NULL, s, "min-elements"); free(s); YYABORT; } (yyval.uint) = (uint32_t) val; } free(s); s = NULL; } break; case 541: { (yyval.uint) = (yyvsp[0].uint); backup_type = actual_type; actual_type = MAX_ELEMENTS_KEYWORD; } break; case 542: { (yyval.uint) = (yyvsp[-1].uint); } break; case 543: { (yyval.uint) = 0; } break; case 544: { (yyval.uint) = (yyvsp[-1].uint); } break; case 545: { if (!strcmp(s, "unbounded")) { (yyval.uint) = 0; } else { /* convert it to uint32_t */ uint64_t val; char *endptr = NULL; errno = 0; val = strtoul(s, &endptr, 10); if (*endptr || s[0] == '-' || errno || val == 0 || val > UINT32_MAX) { LOGVAL(trg->ctx, LYE_INARG, LY_VLOG_NONE, NULL, s, "max-elements"); free(s); YYABORT; } (yyval.uint) = (uint32_t) val; } free(s); s = NULL; } break; case 546: { (yyval.i) = (yyvsp[0].i); backup_type = actual_type; actual_type = ORDERED_BY_KEYWORD; } break; case 547: { (yyval.i) = (yyvsp[-1].i); } break; case 548: { (yyval.i) = LYS_USERORDERED; } break; case 549: { (yyval.i) = LYS_SYSTEMORDERED; } break; case 550: { if (!strcmp(s, "user")) { (yyval.i) = LYS_USERORDERED; } else if (!strcmp(s, "system")) { (yyval.i) = LYS_SYSTEMORDERED; } else { free(s); YYABORT; } free(s); s=NULL; } break; case 551: { (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; switch (actual_type) { case CONTAINER_KEYWORD: YANG_ADDELEM(((struct lys_node_container *)actual)->must, ((struct lys_node_container *)actual)->must_size, "musts"); break; case ANYDATA_KEYWORD: case ANYXML_KEYWORD: YANG_ADDELEM(((struct lys_node_anydata *)actual)->must, ((struct lys_node_anydata *)actual)->must_size, "musts"); break; case LEAF_KEYWORD: YANG_ADDELEM(((struct lys_node_leaf *)actual)->must, ((struct lys_node_leaf *)actual)->must_size, "musts"); break; case LEAF_LIST_KEYWORD: YANG_ADDELEM(((struct lys_node_leaflist *)actual)->must, ((struct lys_node_leaflist *)actual)->must_size, "musts"); break; case LIST_KEYWORD: YANG_ADDELEM(((struct lys_node_list *)actual)->must, ((struct lys_node_list *)actual)->must_size, "musts"); break; case REFINE_KEYWORD: YANG_ADDELEM(((struct lys_refine *)actual)->must, ((struct lys_refine *)actual)->must_size, "musts"); break; case ADD_KEYWORD: case DELETE_KEYWORD: YANG_ADDELEM(((struct lys_deviate *)actual)->must, ((struct lys_deviate *)actual)->must_size, "musts"); break; case NOTIFICATION_KEYWORD: if (trg->version < 2) { free(s); LOGVAL(trg->ctx, LYE_INSTMT, LY_VLOG_LYS, actual, "must"); YYABORT; } YANG_ADDELEM(((struct lys_node_notif *)actual)->must, ((struct lys_node_notif *)actual)->must_size, "musts"); break; case INPUT_KEYWORD: case OUTPUT_KEYWORD: if (trg->version < 2) { free(s); LOGVAL(trg->ctx, LYE_INSTMT, LY_VLOG_LYS, actual, "must"); YYABORT; } YANG_ADDELEM(((struct lys_node_inout *)actual)->must, ((struct lys_node_inout *)actual)->must_size, "musts"); break; case EXTENSION_INSTANCE: /* must is already allocated */ break; default: free(s); LOGINT(trg->ctx); YYABORT; } ((struct lys_restr *)actual)->expr = transform_schema2json(trg, s); free(s); if (!((struct lys_restr *)actual)->expr) { YYABORT; } s = NULL; actual_type = MUST_KEYWORD; } break; case 552: { actual_type = (yyvsp[-1].backup_token).token; actual = (yyvsp[-1].backup_token).actual; } break; case 555: { backup_type = actual_type; actual_type = UNIQUE_KEYWORD; } break; case 559: { backup_type = actual_type; actual_type = KEY_KEYWORD; } break; case 561: { s = strdup(yyget_text(scanner)); if (!s) { LOGMEM(trg->ctx); YYABORT; } } break; case 564: { (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; if (!(actual = yang_read_range(trg->ctx, actual, s, is_ext_instance))) { YYABORT; } actual_type = RANGE_KEYWORD; s = NULL; } break; case 565: { if (s) { s = ly_realloc(s,strlen(s) + yyget_leng(scanner) + 2); if (!s) { LOGMEM(trg->ctx); YYABORT; } strcat(s,"/"); strcat(s, yyget_text(scanner)); } else { s = malloc(yyget_leng(scanner) + 2); if (!s) { LOGMEM(trg->ctx); YYABORT; } s[0]='/'; memcpy(s + 1, yyget_text(scanner), yyget_leng(scanner) + 1); } } break; case 569: { if (s) { s = ly_realloc(s,strlen(s) + yyget_leng(scanner) + 1); if (!s) { LOGMEM(trg->ctx); YYABORT; } strcat(s, yyget_text(scanner)); } else { s = strdup(yyget_text(scanner)); if (!s) { LOGMEM(trg->ctx); YYABORT; } } } break; case 571: { tmp_s = yyget_text(scanner); } break; case 572: { s = strdup(tmp_s); if (!s) { LOGMEM(trg->ctx); YYABORT; } s[strlen(s) - 1] = '\0'; } break; case 573: { tmp_s = yyget_text(scanner); } break; case 574: { s = strdup(tmp_s); if (!s) { LOGMEM(trg->ctx); YYABORT; } s[strlen(s) - 1] = '\0'; } break; case 598: { /* convert it to uint32_t */ unsigned long val; val = strtoul(yyget_text(scanner), NULL, 10); if (val > UINT32_MAX) { LOGVAL(trg->ctx, LYE_SPEC, LY_VLOG_NONE, NULL, "Converted number is very long."); YYABORT; } (yyval.uint) = (uint32_t) val; } break; case 599: { (yyval.uint) = 0; } break; case 600: { (yyval.uint) = (yyvsp[0].uint); } break; case 601: { (yyval.i) = 0; } break; case 602: { /* convert it to int32_t */ int64_t val; val = strtoll(yyget_text(scanner), NULL, 10); if (val < INT32_MIN || val > INT32_MAX) { LOGVAL(trg->ctx, LYE_SPEC, LY_VLOG_NONE, NULL, "The number is not in the correct range (INT32_MIN..INT32_MAX): \"%d\"",val); YYABORT; } (yyval.i) = (int32_t) val; } break; case 608: { if (lyp_check_identifier(trg->ctx, s, LY_IDENT_SIMPLE, trg, NULL)) { free(s); YYABORT; } } break; case 613: { char *tmp; if ((tmp = strchr(s, ':'))) { *tmp = '\0'; /* check prefix */ if (lyp_check_identifier(trg->ctx, s, LY_IDENT_SIMPLE, trg, NULL)) { free(s); YYABORT; } /* check identifier */ if (lyp_check_identifier(trg->ctx, tmp + 1, LY_IDENT_SIMPLE, trg, NULL)) { free(s); YYABORT; } *tmp = ':'; } else { /* check identifier */ if (lyp_check_identifier(trg->ctx, s, LY_IDENT_SIMPLE, trg, NULL)) { free(s); YYABORT; } } } break; case 614: { s = (yyvsp[-1].str); } break; case 615: { s = (yyvsp[-3].str); } break; case 616: { actual_type = backup_type; backup_type = NODE; (yyval.str) = s; s = NULL; } break; case 617: { actual_type = backup_type; backup_type = NODE; } break; case 618: { (yyval.str) = s; s = NULL; } break; case 622: { actual_type = (yyvsp[-1].backup_token).token; actual = (yyvsp[-1].backup_token).actual; } break; case 623: { (yyval.backup_token).token = actual_type; (yyval.backup_token).actual = actual; if (!(actual = yang_read_ext(trg, (actual) ? actual : trg, (yyvsp[-1].str), s, actual_type, backup_type, is_ext_instance))) { YYABORT; } s = NULL; actual_type = EXTENSION_INSTANCE; } break; case 624: { (yyval.str) = s; s = NULL; } break; case 639: { struct yang_ext_substmt *substmt = ((struct lys_ext_instance *)actual)->parent; int32_t length = 0, old_length = 0; char *tmp_value; if (!substmt) { substmt = calloc(1, sizeof *substmt); if (!substmt) { LOGMEM(trg->ctx); YYABORT; } ((struct lys_ext_instance *)actual)->parent = substmt; } length = strlen((yyvsp[-2].str)); old_length = (substmt->ext_substmt) ? strlen(substmt->ext_substmt) + 2 : 2; tmp_value = realloc(substmt->ext_substmt, old_length + length + 1); if (!tmp_value) { LOGMEM(trg->ctx); YYABORT; } substmt->ext_substmt = tmp_value; tmp_value += old_length - 2; memcpy(tmp_value, (yyvsp[-2].str), length); tmp_value[length] = ' '; tmp_value[length + 1] = '\0'; tmp_value[length + 2] = '\0'; } break; case 640: { struct yang_ext_substmt *substmt = ((struct lys_ext_instance *)actual)->parent; int32_t length; char *tmp_value, **array; int i = 0; if (!substmt) { substmt = calloc(1, sizeof *substmt); if (!substmt) { LOGMEM(trg->ctx); YYABORT; } ((struct lys_ext_instance *)actual)->parent = substmt; } length = strlen((yyvsp[-2].str)); if (!substmt->ext_modules) { array = malloc(2 * sizeof *substmt->ext_modules); } else { for (i = 0; substmt->ext_modules[i]; ++i); array = realloc(substmt->ext_modules, (i + 2) * sizeof *substmt->ext_modules); } if (!array) { LOGMEM(trg->ctx); YYABORT; } substmt->ext_modules = array; array[i + 1] = NULL; tmp_value = malloc(length + 2); if (!tmp_value) { LOGMEM(trg->ctx); YYABORT; } array[i] = tmp_value; memcpy(tmp_value, (yyvsp[-2].str), length); tmp_value[length] = '\0'; tmp_value[length + 1] = '\0'; } break; case 643: { (yyval.str) = yyget_text(scanner); } break; case 644: { (yyval.str) = yyget_text(scanner); } break; case 656: { s = strdup(yyget_text(scanner)); if (!s) { LOGMEM(trg->ctx); YYABORT; } } break; case 749: { s = strdup(yyget_text(scanner)); if (!s) { LOGMEM(trg->ctx); YYABORT; } } break; case 750: { s = strdup(yyget_text(scanner)); if (!s) { LOGMEM(trg->ctx); YYABORT; } } break; case 751: { struct lys_type **type; type = (struct lys_type **)yang_getplace_for_extcomplex_struct(ext_instance, NULL, ext_name, "type", LY_STMT_TYPE); if (!type) { YYABORT; } /* allocate type structure */ (*type) = calloc(1, sizeof **type); if (!*type) { LOGMEM(trg->ctx); YYABORT; } /* HACK for unres */ (*type)->parent = (struct lys_tpdf *)ext_instance; (yyval.v) = actual = *type; is_ext_instance = 0; } break; case 752: { struct lys_tpdf **tpdf; tpdf = (struct lys_tpdf **)yang_getplace_for_extcomplex_struct(ext_instance, NULL, ext_name, "typedef", LY_STMT_TYPEDEF); if (!tpdf) { YYABORT; } /* allocate typedef structure */ (*tpdf) = calloc(1, sizeof **tpdf); if (!*tpdf) { LOGMEM(trg->ctx); YYABORT; } (yyval.v) = actual = *tpdf; is_ext_instance = 0; } break; case 753: { struct lys_iffeature **iffeature; iffeature = (struct lys_iffeature **)yang_getplace_for_extcomplex_struct(ext_instance, NULL, ext_name, "if-feature", LY_STMT_IFFEATURE); if (!iffeature) { YYABORT; } /* allocate typedef structure */ (*iffeature) = calloc(1, sizeof **iffeature); if (!*iffeature) { LOGMEM(trg->ctx); YYABORT; } (yyval.v) = actual = *iffeature; } break; case 754: { struct lys_restr **restr; LY_STMT stmt; s = yyget_text(scanner); if (!strcmp(s, "must")) { stmt = LY_STMT_MUST; } else if (!strcmp(s, "pattern")) { stmt = LY_STMT_PATTERN; } else if (!strcmp(s, "range")) { stmt = LY_STMT_RANGE; } else { stmt = LY_STMT_LENGTH; } restr = (struct lys_restr **)yang_getplace_for_extcomplex_struct(ext_instance, NULL, ext_name, s, stmt); if (!restr) { YYABORT; } /* allocate structure for must */ (*restr) = calloc(1, sizeof(struct lys_restr)); if (!*restr) { LOGMEM(trg->ctx); YYABORT; } (yyval.v) = actual = *restr; s = NULL; } break; case 755: { actual = yang_getplace_for_extcomplex_struct(ext_instance, NULL, ext_name, "when", LY_STMT_WHEN); if (!actual) { YYABORT; } (yyval.v) = actual; } break; case 756: { struct lys_revision **rev; int i; rev = (struct lys_revision **)yang_getplace_for_extcomplex_struct(ext_instance, &i, ext_name, "revision", LY_STMT_REVISION); if (!rev) { YYABORT; } rev[i] = calloc(1, sizeof **rev); if (!rev[i]) { LOGMEM(trg->ctx); YYABORT; } actual = rev[i]; (yyval.revisions).revision = rev; (yyval.revisions).index = i; } break; case 757: { LY_STMT stmt; s = yyget_text(scanner); if (!strcmp(s, "action")) { stmt = LY_STMT_ACTION; } else if (!strcmp(s, "anydata")) { stmt = LY_STMT_ANYDATA; } else if (!strcmp(s, "anyxml")) { stmt = LY_STMT_ANYXML; } else if (!strcmp(s, "case")) { stmt = LY_STMT_CASE; } else if (!strcmp(s, "choice")) { stmt = LY_STMT_CHOICE; } else if (!strcmp(s, "container")) { stmt = LY_STMT_CONTAINER; } else if (!strcmp(s, "grouping")) { stmt = LY_STMT_GROUPING; } else if (!strcmp(s, "input")) { stmt = LY_STMT_INPUT; } else if (!strcmp(s, "leaf")) { stmt = LY_STMT_LEAF; } else if (!strcmp(s, "leaf-list")) { stmt = LY_STMT_LEAFLIST; } else if (!strcmp(s, "list")) { stmt = LY_STMT_LIST; } else if (!strcmp(s, "notification")) { stmt = LY_STMT_NOTIFICATION; } else if (!strcmp(s, "output")) { stmt = LY_STMT_OUTPUT; } else { stmt = LY_STMT_USES; } if (yang_extcomplex_node(ext_instance, ext_name, s, *param->node, stmt)) { YYABORT; } actual = NULL; s = NULL; is_ext_instance = 0; } break; case 758: { LOGERR(trg->ctx, ly_errno, "Extension's substatement \"%s\" not supported.", yyget_text(scanner)); } break; case 790: { actual_type = EXTENSION_INSTANCE; actual = ext_instance; if (!is_ext_instance) { LOGVAL(trg->ctx, LYE_INSTMT, LY_VLOG_NONE, NULL, yyget_text(scanner)); YYABORT; } (yyval.i) = 0; } break; case 792: { if (yang_read_extcomplex_str(trg, ext_instance, "prefix", ext_name, s, 0, LY_STMT_PREFIX)) { YYABORT; } } break; case 793: { if (yang_read_extcomplex_str(trg, ext_instance, "description", ext_name, s, 0, LY_STMT_DESCRIPTION)) { YYABORT; } } break; case 794: { if (yang_read_extcomplex_str(trg, ext_instance, "reference", ext_name, s, 0, LY_STMT_REFERENCE)) { YYABORT; } } break; case 795: { if (yang_read_extcomplex_str(trg, ext_instance, "units", ext_name, s, 0, LY_STMT_UNITS)) { YYABORT; } } break; case 796: { if (yang_read_extcomplex_str(trg, ext_instance, "base", ext_name, s, 0, LY_STMT_BASE)) { YYABORT; } } break; case 797: { if (yang_read_extcomplex_str(trg, ext_instance, "contact", ext_name, s, 0, LY_STMT_CONTACT)) { YYABORT; } } break; case 798: { if (yang_read_extcomplex_str(trg, ext_instance, "default", ext_name, s, 0, LY_STMT_DEFAULT)) { YYABORT; } } break; case 799: { if (yang_read_extcomplex_str(trg, ext_instance, "error-message", ext_name, s, 0, LY_STMT_ERRMSG)) { YYABORT; } } break; case 800: { if (yang_read_extcomplex_str(trg, ext_instance, "error-app-tag", ext_name, s, 0, LY_STMT_ERRTAG)) { YYABORT; } } break; case 801: { if (yang_read_extcomplex_str(trg, ext_instance, "key", ext_name, s, 0, LY_STMT_KEY)) { YYABORT; } } break; case 802: { if (yang_read_extcomplex_str(trg, ext_instance, "namespace", ext_name, s, 0, LY_STMT_NAMESPACE)) { YYABORT; } } break; case 803: { if (yang_read_extcomplex_str(trg, ext_instance, "organization", ext_name, s, 0, LY_STMT_ORGANIZATION)) { YYABORT; } } break; case 804: { if (yang_read_extcomplex_str(trg, ext_instance, "path", ext_name, s, 0, LY_STMT_PATH)) { YYABORT; } } break; case 805: { if (yang_read_extcomplex_str(trg, ext_instance, "presence", ext_name, s, 0, LY_STMT_PRESENCE)) { YYABORT; } } break; case 806: { if (yang_read_extcomplex_str(trg, ext_instance, "revision-date", ext_name, s, 0, LY_STMT_REVISIONDATE)) { YYABORT; } } break; case 807: { struct lys_type *type = (yyvsp[-2].v); if (yang_fill_type(trg, type, (struct yang_type *)type->der, ext_instance, param->unres)) { yang_type_free(trg->ctx, type); YYABORT; } if (unres_schema_add_node(trg, param->unres, type, UNRES_TYPE_DER_EXT, NULL) == -1) { yang_type_free(trg->ctx, type); YYABORT; } actual = ext_instance; is_ext_instance = 1; } break; case 808: { struct lys_tpdf *tpdf = (yyvsp[-2].v); if (yang_fill_type(trg, &tpdf->type, (struct yang_type *)tpdf->type.der, tpdf, param->unres)) { yang_type_free(trg->ctx, &tpdf->type); } if (yang_check_ext_instance(trg, &tpdf->ext, tpdf->ext_size, tpdf, param->unres)) { YYABORT; } if (unres_schema_add_node(trg, param->unres, &tpdf->type, UNRES_TYPE_DER_TPDF, (struct lys_node *)ext_instance) == -1) { yang_type_free(trg->ctx, &tpdf->type); YYABORT; } /* check default value*/ if (unres_schema_add_node(trg, param->unres, &tpdf->type, UNRES_TYPE_DFLT, (struct lys_node *)(&tpdf->dflt)) == -1) { YYABORT; } actual = ext_instance; is_ext_instance = 1; } break; case 809: { if (yang_fill_extcomplex_flags(ext_instance, ext_name, "status", LY_STMT_STATUS, (yyvsp[0].i), LYS_STATUS_MASK)) { YYABORT; } } break; case 810: { if (yang_fill_extcomplex_flags(ext_instance, ext_name, "config", LY_STMT_CONFIG, (yyvsp[0].i), LYS_CONFIG_MASK)) { YYABORT; } } break; case 811: { if (yang_fill_extcomplex_flags(ext_instance, ext_name, "mandatory", LY_STMT_MANDATORY, (yyvsp[0].i), LYS_MAND_MASK)) { YYABORT; } } break; case 812: { if ((yyvsp[-1].i) & LYS_ORDERED_MASK) { LOGVAL(trg->ctx, LYE_TOOMANY, LY_VLOG_NONE, NULL, "ordered by", ext_name); YYABORT; } if ((yyvsp[0].i) & LYS_USERORDERED) { if (yang_fill_extcomplex_flags(ext_instance, ext_name, "ordered-by", LY_STMT_ORDEREDBY, (yyvsp[0].i), LYS_USERORDERED)) { YYABORT; } } (yyvsp[-1].i) |= (yyvsp[0].i); (yyval.i) = (yyvsp[-1].i); } break; case 813: { if (yang_fill_extcomplex_uint8(ext_instance, ext_name, "require-instance", LY_STMT_REQINSTANCE, (yyvsp[0].i))) { YYABORT; } } break; case 814: { if (yang_fill_extcomplex_uint8(ext_instance, ext_name, "modifier", LY_STMT_MODIFIER, 0)) { YYABORT; } } break; case 815: { /* range check */ if ((yyvsp[0].uint) < 1 || (yyvsp[0].uint) > 18) { LOGVAL(trg->ctx, LYE_SPEC, LY_VLOG_NONE, NULL, "Invalid value \"%d\" of \"%s\".", (yyvsp[0].uint), "fraction-digits"); YYABORT; } if (yang_fill_extcomplex_uint8(ext_instance, ext_name, "fraction-digits", LY_STMT_DIGITS, (yyvsp[0].uint))) { YYABORT; } } break; case 816: { uint32_t **val; val = (uint32_t **)yang_getplace_for_extcomplex_struct(ext_instance, NULL, ext_name, "min-elements", LY_STMT_MIN); if (!val) { YYABORT; } /* store the value */ *val = malloc(sizeof(uint32_t)); if (!*val) { LOGMEM(trg->ctx); YYABORT; } **val = (yyvsp[0].uint); } break; case 817: { uint32_t **val; val = (uint32_t **)yang_getplace_for_extcomplex_struct(ext_instance, NULL, ext_name, "max-elements", LY_STMT_MAX); if (!val) { YYABORT; } /* store the value */ *val = malloc(sizeof(uint32_t)); if (!*val) { LOGMEM(trg->ctx); YYABORT; } **val = (yyvsp[0].uint); } break; case 818: { uint32_t **val; val = (uint32_t **)yang_getplace_for_extcomplex_struct(ext_instance, NULL, ext_name, "position", LY_STMT_POSITION); if (!val) { YYABORT; } /* store the value */ *val = malloc(sizeof(uint32_t)); if (!*val) { LOGMEM(trg->ctx); YYABORT; } **val = (yyvsp[0].uint); } break; case 819: { int32_t **val; val = (int32_t **)yang_getplace_for_extcomplex_struct(ext_instance, NULL, ext_name, "value", LY_STMT_VALUE); if (!val) { YYABORT; } /* store the value */ *val = malloc(sizeof(int32_t)); if (!*val) { LOGMEM(trg->ctx); YYABORT; } **val = (yyvsp[0].i); } break; case 820: { struct lys_unique **unique; int rc; unique = (struct lys_unique **)yang_getplace_for_extcomplex_struct(ext_instance, NULL, ext_name, "unique", LY_STMT_UNIQUE); if (!unique) { YYABORT; } *unique = calloc(1, sizeof(struct lys_unique)); if (!*unique) { LOGMEM(trg->ctx); YYABORT; } rc = yang_fill_unique(trg, (struct lys_node_list *)ext_instance, *unique, s, param->unres); free(s); s = NULL; if (rc) { YYABORT; } } break; case 821: { struct lys_iffeature *iffeature; iffeature = (yyvsp[-2].v); s = (char *)iffeature->features; iffeature->features = NULL; if (yang_fill_iffeature(trg, iffeature, ext_instance, s, param->unres, 0)) { YYABORT; } if (yang_check_ext_instance(trg, &iffeature->ext, iffeature->ext_size, iffeature, param->unres)) { YYABORT; } s = NULL; actual = ext_instance; } break; case 823: { if (yang_check_ext_instance(trg, &((struct lys_restr *)(yyvsp[-2].v))->ext, ((struct lys_restr *)(yyvsp[-2].v))->ext_size, (yyvsp[-2].v), param->unres)) { YYABORT; } actual = ext_instance; } break; case 824: { if (yang_check_ext_instance(trg, &(*(struct lys_when **)(yyvsp[-2].v))->ext, (*(struct lys_when **)(yyvsp[-2].v))->ext_size, *(struct lys_when **)(yyvsp[-2].v), param->unres)) { YYABORT; } actual = ext_instance; } break; case 825: { int i; for (i = 0; i < (yyvsp[-2].revisions).index; ++i) { if (!strcmp((yyvsp[-2].revisions).revision[i]->date, (yyvsp[-2].revisions).revision[(yyvsp[-2].revisions).index]->date)) { LOGWRN(trg->ctx, "Module's revisions are not unique (%s).", (yyvsp[-2].revisions).revision[i]->date); break; } } if (yang_check_ext_instance(trg, &(yyvsp[-2].revisions).revision[(yyvsp[-2].revisions).index]->ext, (yyvsp[-2].revisions).revision[(yyvsp[-2].revisions).index]->ext_size, &(yyvsp[-2].revisions).revision[(yyvsp[-2].revisions).index], param->unres)) { YYABORT; } actual = ext_instance; } break; case 826: { actual = ext_instance; is_ext_instance = 1; } break; default: break; } /* User semantic actions sometimes alter yychar, and that requires that yytoken be updated with the new translation. We take the approach of translating immediately before every use of yytoken. One alternative is translating here after every semantic action, but that translation would be missed if the semantic action invokes YYABORT, YYACCEPT, or YYERROR immediately after altering yychar or if it invokes YYBACKUP. In the case of YYABORT or YYACCEPT, an incorrect destructor might then be invoked immediately. In the case of YYERROR or YYBACKUP, subsequent parser actions might lead to an incorrect destructor call or verbose syntax error message before the lookahead is translated. */ YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc); YYPOPSTACK (yylen); yylen = 0; YY_STACK_PRINT (yyss, yyssp); *++yyvsp = yyval; *++yylsp = yyloc; /* Now 'shift' the result of the reduction. Determine what state that goes to, based on the state we popped back to and the rule number reduced by. */ { const int yylhs = yyr1[yyn] - YYNTOKENS; const int yyi = yypgoto[yylhs] + *yyssp; yystate = (0 <= yyi && yyi <= YYLAST && yycheck[yyi] == *yyssp ? yytable[yyi] : yydefgoto[yylhs]); } goto yynewstate; /*--------------------------------------. | yyerrlab -- here on detecting error. | `--------------------------------------*/ yyerrlab: /* Make sure we have latest lookahead translation. See comments at user semantic actions for why this is necessary. */ yytoken = yychar == YYEMPTY ? YYEMPTY : YYTRANSLATE (yychar); /* If not already recovering from an error, report this error. */ if (!yyerrstatus) { ++yynerrs; #if ! YYERROR_VERBOSE yyerror (&yylloc, scanner, param, YY_("syntax error")); #else # define YYSYNTAX_ERROR yysyntax_error (&yymsg_alloc, &yymsg, \ yyssp, yytoken) { char const *yymsgp = YY_("syntax error"); int yysyntax_error_status; yysyntax_error_status = YYSYNTAX_ERROR; if (yysyntax_error_status == 0) yymsgp = yymsg; else if (yysyntax_error_status == 1) { if (yymsg != yymsgbuf) YYSTACK_FREE (yymsg); yymsg = (char *) YYSTACK_ALLOC (yymsg_alloc); if (!yymsg) { yymsg = yymsgbuf; yymsg_alloc = sizeof yymsgbuf; yysyntax_error_status = 2; } else { yysyntax_error_status = YYSYNTAX_ERROR; yymsgp = yymsg; } } yyerror (&yylloc, scanner, param, yymsgp); if (yysyntax_error_status == 2) goto yyexhaustedlab; } # undef YYSYNTAX_ERROR #endif } yyerror_range[1] = yylloc; if (yyerrstatus == 3) { /* If just tried and failed to reuse lookahead token after an error, discard it. */ if (yychar <= YYEOF) { /* Return failure if at end of input. */ if (yychar == YYEOF) YYABORT; } else { yydestruct ("Error: discarding", yytoken, &yylval, &yylloc, scanner, param); yychar = YYEMPTY; } } /* Else will try to reuse lookahead token after shifting the error token. */ goto yyerrlab1; /*---------------------------------------------------. | yyerrorlab -- error raised explicitly by YYERROR. | `---------------------------------------------------*/ yyerrorlab: /* Pacify compilers like GCC when the user code never invokes YYERROR and the label yyerrorlab therefore never appears in user code. */ if (/*CONSTCOND*/ 0) goto yyerrorlab; /* Do not reclaim the symbols of the rule whose action triggered this YYERROR. */ YYPOPSTACK (yylen); yylen = 0; YY_STACK_PRINT (yyss, yyssp); yystate = *yyssp; goto yyerrlab1; /*-------------------------------------------------------------. | yyerrlab1 -- common code for both syntax error and YYERROR. | `-------------------------------------------------------------*/ yyerrlab1: yyerrstatus = 3; /* Each real token shifted decrements this. */ for (;;) { yyn = yypact[yystate]; if (!yypact_value_is_default (yyn)) { yyn += YYTERROR; if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR) { yyn = yytable[yyn]; if (0 < yyn) break; } } /* Pop the current state because it cannot handle the error token. */ if (yyssp == yyss) YYABORT; yyerror_range[1] = *yylsp; yydestruct ("Error: popping", yystos[yystate], yyvsp, yylsp, scanner, param); YYPOPSTACK (1); yystate = *yyssp; YY_STACK_PRINT (yyss, yyssp); } YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN *++yyvsp = yylval; YY_IGNORE_MAYBE_UNINITIALIZED_END yyerror_range[2] = yylloc; /* Using YYLLOC is tempting, but would change the location of the lookahead. YYLOC is available though. */ YYLLOC_DEFAULT (yyloc, yyerror_range, 2); *++yylsp = yyloc; /* Shift the error token. */ YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp); yystate = yyn; goto yynewstate; /*-------------------------------------. | yyacceptlab -- YYACCEPT comes here. | `-------------------------------------*/ yyacceptlab: yyresult = 0; goto yyreturn; /*-----------------------------------. | yyabortlab -- YYABORT comes here. | `-----------------------------------*/ yyabortlab: yyresult = 1; goto yyreturn; #if !defined yyoverflow || YYERROR_VERBOSE /*-------------------------------------------------. | yyexhaustedlab -- memory exhaustion comes here. | `-------------------------------------------------*/ yyexhaustedlab: yyerror (&yylloc, scanner, param, YY_("memory exhausted")); yyresult = 2; /* Fall through. */ #endif yyreturn: if (yychar != YYEMPTY) { /* Make sure we have latest lookahead translation. See comments at user semantic actions for why this is necessary. */ yytoken = YYTRANSLATE (yychar); yydestruct ("Cleanup: discarding lookahead", yytoken, &yylval, &yylloc, scanner, param); } /* Do not reclaim the symbols of the rule whose action triggered this YYABORT or YYACCEPT. */ YYPOPSTACK (yylen); YY_STACK_PRINT (yyss, yyssp); while (yyssp != yyss) { yydestruct ("Cleanup: popping", yystos[*yyssp], yyvsp, yylsp, scanner, param); YYPOPSTACK (1); } #ifndef yyoverflow if (yyss != yyssa) YYSTACK_FREE (yyss); #endif #if YYERROR_VERBOSE if (yymsg != yymsgbuf) YYSTACK_FREE (yymsg); #endif return yyresult; } void yyerror(YYLTYPE *yylloc, void *scanner, struct yang_parameter *param, ...) { free(*param->value); *param->value = NULL; if (yylloc->first_line != -1) { if (*param->data_node && (*param->data_node) == (*param->actual_node)) { LOGVAL(param->module->ctx, LYE_INSTMT, LY_VLOG_LYS, *param->data_node, yyget_text(scanner)); } else { LOGVAL(param->module->ctx, LYE_INSTMT, LY_VLOG_NONE, NULL, yyget_text(scanner)); } } }
./CrossVul/dataset_final_sorted/CWE-20/c/good_1363_0
crossvul-cpp_data_bad_5732_0
#undef DEBUG /* * ARM performance counter support. * * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com> * * This code is based on the sparc64 perf event code, which is in turn based * on the x86 code. Callchain code is based on the ARM OProfile backtrace * code. */ #define pr_fmt(fmt) "hw perfevents: " fmt #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/uaccess.h> #include <asm/irq_regs.h> #include <asm/pmu.h> #include <asm/stacktrace.h> static int armpmu_map_cache_event(const unsigned (*cache_map) [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX], u64 config) { unsigned int cache_type, cache_op, cache_result, ret; cache_type = (config >> 0) & 0xff; if (cache_type >= PERF_COUNT_HW_CACHE_MAX) return -EINVAL; cache_op = (config >> 8) & 0xff; if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) return -EINVAL; cache_result = (config >> 16) & 0xff; if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) return -EINVAL; ret = (int)(*cache_map)[cache_type][cache_op][cache_result]; if (ret == CACHE_OP_UNSUPPORTED) return -ENOENT; return ret; } static int armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) { int mapping = (*event_map)[config]; return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; } static int armpmu_map_raw_event(u32 raw_event_mask, u64 config) { return (int)(config & raw_event_mask); } int armpmu_map_event(struct perf_event *event, const unsigned (*event_map)[PERF_COUNT_HW_MAX], const unsigned (*cache_map) [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX], u32 raw_event_mask) { u64 config = event->attr.config; switch (event->attr.type) { case PERF_TYPE_HARDWARE: return armpmu_map_hw_event(event_map, config); case PERF_TYPE_HW_CACHE: return armpmu_map_cache_event(cache_map, config); case PERF_TYPE_RAW: return armpmu_map_raw_event(raw_event_mask, config); } return -ENOENT; } int armpmu_event_set_period(struct perf_event *event) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; s64 left = local64_read(&hwc->period_left); s64 period = hwc->sample_period; int ret = 0; /* The period may have been changed by PERF_EVENT_IOC_PERIOD */ if (unlikely(period != hwc->last_period)) left = period - (hwc->last_period - left); if (unlikely(left <= -period)) { left = period; local64_set(&hwc->period_left, left); hwc->last_period = period; ret = 1; } if (unlikely(left <= 0)) { left += period; local64_set(&hwc->period_left, left); hwc->last_period = period; ret = 1; } if (left > (s64)armpmu->max_period) left = armpmu->max_period; local64_set(&hwc->prev_count, (u64)-left); armpmu->write_counter(event, (u64)(-left) & 0xffffffff); perf_event_update_userpage(event); return ret; } u64 armpmu_event_update(struct perf_event *event) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; u64 delta, prev_raw_count, new_raw_count; again: prev_raw_count = local64_read(&hwc->prev_count); new_raw_count = armpmu->read_counter(event); if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, new_raw_count) != prev_raw_count) goto again; delta = (new_raw_count - prev_raw_count) & armpmu->max_period; local64_add(delta, &event->count); local64_sub(delta, &hwc->period_left); return new_raw_count; } static void armpmu_read(struct perf_event *event) { armpmu_event_update(event); } static void armpmu_stop(struct perf_event *event, int flags) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; /* * ARM pmu always has to update the counter, so ignore * PERF_EF_UPDATE, see comments in armpmu_start(). */ if (!(hwc->state & PERF_HES_STOPPED)) { armpmu->disable(event); armpmu_event_update(event); hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; } } static void armpmu_start(struct perf_event *event, int flags) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; /* * ARM pmu always has to reprogram the period, so ignore * PERF_EF_RELOAD, see the comment below. */ if (flags & PERF_EF_RELOAD) WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); hwc->state = 0; /* * Set the period again. Some counters can't be stopped, so when we * were stopped we simply disabled the IRQ source and the counter * may have been left counting. If we don't do this step then we may * get an interrupt too soon or *way* too late if the overflow has * happened since disabling. */ armpmu_event_set_period(event); armpmu->enable(event); } static void armpmu_del(struct perf_event *event, int flags) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct pmu_hw_events *hw_events = armpmu->get_hw_events(); struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; armpmu_stop(event, PERF_EF_UPDATE); hw_events->events[idx] = NULL; clear_bit(idx, hw_events->used_mask); perf_event_update_userpage(event); } static int armpmu_add(struct perf_event *event, int flags) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct pmu_hw_events *hw_events = armpmu->get_hw_events(); struct hw_perf_event *hwc = &event->hw; int idx; int err = 0; perf_pmu_disable(event->pmu); /* If we don't have a space for the counter then finish early. */ idx = armpmu->get_event_idx(hw_events, event); if (idx < 0) { err = idx; goto out; } /* * If there is an event in the counter we are going to use then make * sure it is disabled. */ event->hw.idx = idx; armpmu->disable(event); hw_events->events[idx] = event; hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; if (flags & PERF_EF_START) armpmu_start(event, PERF_EF_RELOAD); /* Propagate our changes to the userspace mapping. */ perf_event_update_userpage(event); out: perf_pmu_enable(event->pmu); return err; } static int validate_event(struct pmu_hw_events *hw_events, struct perf_event *event) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct pmu *leader_pmu = event->group_leader->pmu; if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) return 1; if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) return 1; return armpmu->get_event_idx(hw_events, event) >= 0; } static int validate_group(struct perf_event *event) { struct perf_event *sibling, *leader = event->group_leader; struct pmu_hw_events fake_pmu; DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS); /* * Initialise the fake PMU. We only need to populate the * used_mask for the purposes of validation. */ memset(fake_used_mask, 0, sizeof(fake_used_mask)); fake_pmu.used_mask = fake_used_mask; if (!validate_event(&fake_pmu, leader)) return -EINVAL; list_for_each_entry(sibling, &leader->sibling_list, group_entry) { if (!validate_event(&fake_pmu, sibling)) return -EINVAL; } if (!validate_event(&fake_pmu, event)) return -EINVAL; return 0; } static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) { struct arm_pmu *armpmu = (struct arm_pmu *) dev; struct platform_device *plat_device = armpmu->plat_device; struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev); if (plat && plat->handle_irq) return plat->handle_irq(irq, dev, armpmu->handle_irq); else return armpmu->handle_irq(irq, dev); } static void armpmu_release_hardware(struct arm_pmu *armpmu) { armpmu->free_irq(armpmu); pm_runtime_put_sync(&armpmu->plat_device->dev); } static int armpmu_reserve_hardware(struct arm_pmu *armpmu) { int err; struct platform_device *pmu_device = armpmu->plat_device; if (!pmu_device) return -ENODEV; pm_runtime_get_sync(&pmu_device->dev); err = armpmu->request_irq(armpmu, armpmu_dispatch_irq); if (err) { armpmu_release_hardware(armpmu); return err; } return 0; } static void hw_perf_event_destroy(struct perf_event *event) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); atomic_t *active_events = &armpmu->active_events; struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex; if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) { armpmu_release_hardware(armpmu); mutex_unlock(pmu_reserve_mutex); } } static int event_requires_mode_exclusion(struct perf_event_attr *attr) { return attr->exclude_idle || attr->exclude_user || attr->exclude_kernel || attr->exclude_hv; } static int __hw_perf_event_init(struct perf_event *event) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; int mapping; mapping = armpmu->map_event(event); if (mapping < 0) { pr_debug("event %x:%llx not supported\n", event->attr.type, event->attr.config); return mapping; } /* * We don't assign an index until we actually place the event onto * hardware. Use -1 to signify that we haven't decided where to put it * yet. For SMP systems, each core has it's own PMU so we can't do any * clever allocation or constraints checking at this point. */ hwc->idx = -1; hwc->config_base = 0; hwc->config = 0; hwc->event_base = 0; /* * Check whether we need to exclude the counter from certain modes. */ if ((!armpmu->set_event_filter || armpmu->set_event_filter(hwc, &event->attr)) && event_requires_mode_exclusion(&event->attr)) { pr_debug("ARM performance counters do not support " "mode exclusion\n"); return -EOPNOTSUPP; } /* * Store the event encoding into the config_base field. */ hwc->config_base |= (unsigned long)mapping; if (!hwc->sample_period) { /* * For non-sampling runs, limit the sample_period to half * of the counter width. That way, the new counter value * is far less likely to overtake the previous one unless * you have some serious IRQ latency issues. */ hwc->sample_period = armpmu->max_period >> 1; hwc->last_period = hwc->sample_period; local64_set(&hwc->period_left, hwc->sample_period); } if (event->group_leader != event) { if (validate_group(event) != 0) return -EINVAL; } return 0; } static int armpmu_event_init(struct perf_event *event) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); int err = 0; atomic_t *active_events = &armpmu->active_events; /* does not support taken branch sampling */ if (has_branch_stack(event)) return -EOPNOTSUPP; if (armpmu->map_event(event) == -ENOENT) return -ENOENT; event->destroy = hw_perf_event_destroy; if (!atomic_inc_not_zero(active_events)) { mutex_lock(&armpmu->reserve_mutex); if (atomic_read(active_events) == 0) err = armpmu_reserve_hardware(armpmu); if (!err) atomic_inc(active_events); mutex_unlock(&armpmu->reserve_mutex); } if (err) return err; err = __hw_perf_event_init(event); if (err) hw_perf_event_destroy(event); return err; } static void armpmu_enable(struct pmu *pmu) { struct arm_pmu *armpmu = to_arm_pmu(pmu); struct pmu_hw_events *hw_events = armpmu->get_hw_events(); int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); if (enabled) armpmu->start(armpmu); } static void armpmu_disable(struct pmu *pmu) { struct arm_pmu *armpmu = to_arm_pmu(pmu); armpmu->stop(armpmu); } #ifdef CONFIG_PM_RUNTIME static int armpmu_runtime_resume(struct device *dev) { struct arm_pmu_platdata *plat = dev_get_platdata(dev); if (plat && plat->runtime_resume) return plat->runtime_resume(dev); return 0; } static int armpmu_runtime_suspend(struct device *dev) { struct arm_pmu_platdata *plat = dev_get_platdata(dev); if (plat && plat->runtime_suspend) return plat->runtime_suspend(dev); return 0; } #endif const struct dev_pm_ops armpmu_dev_pm_ops = { SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL) }; static void armpmu_init(struct arm_pmu *armpmu) { atomic_set(&armpmu->active_events, 0); mutex_init(&armpmu->reserve_mutex); armpmu->pmu = (struct pmu) { .pmu_enable = armpmu_enable, .pmu_disable = armpmu_disable, .event_init = armpmu_event_init, .add = armpmu_add, .del = armpmu_del, .start = armpmu_start, .stop = armpmu_stop, .read = armpmu_read, }; } int armpmu_register(struct arm_pmu *armpmu, int type) { armpmu_init(armpmu); pm_runtime_enable(&armpmu->plat_device->dev); pr_info("enabled with %s PMU driver, %d counters available\n", armpmu->name, armpmu->num_events); return perf_pmu_register(&armpmu->pmu, armpmu->name, type); } /* * Callchain handling code. */ /* * The registers we're interested in are at the end of the variable * length saved register structure. The fp points at the end of this * structure so the address of this struct is: * (struct frame_tail *)(xxx->fp)-1 * * This code has been adapted from the ARM OProfile support. */ struct frame_tail { struct frame_tail __user *fp; unsigned long sp; unsigned long lr; } __attribute__((packed)); /* * Get the return address for a single stackframe and return a pointer to the * next frame tail. */ static struct frame_tail __user * user_backtrace(struct frame_tail __user *tail, struct perf_callchain_entry *entry) { struct frame_tail buftail; /* Also check accessibility of one struct frame_tail beyond */ if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) return NULL; if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail))) return NULL; perf_callchain_store(entry, buftail.lr); /* * Frame pointers should strictly progress back up the stack * (towards higher addresses). */ if (tail + 1 >= buftail.fp) return NULL; return buftail.fp - 1; } void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) { struct frame_tail __user *tail; if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { /* We don't support guest os callchain now */ return; } perf_callchain_store(entry, regs->ARM_pc); tail = (struct frame_tail __user *)regs->ARM_fp - 1; while ((entry->nr < PERF_MAX_STACK_DEPTH) && tail && !((unsigned long)tail & 0x3)) tail = user_backtrace(tail, entry); } /* * Gets called by walk_stackframe() for every stackframe. This will be called * whist unwinding the stackframe and is like a subroutine return so we use * the PC. */ static int callchain_trace(struct stackframe *fr, void *data) { struct perf_callchain_entry *entry = data; perf_callchain_store(entry, fr->pc); return 0; } void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) { struct stackframe fr; if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { /* We don't support guest os callchain now */ return; } fr.fp = regs->ARM_fp; fr.sp = regs->ARM_sp; fr.lr = regs->ARM_lr; fr.pc = regs->ARM_pc; walk_stackframe(&fr, callchain_trace, entry); } unsigned long perf_instruction_pointer(struct pt_regs *regs) { if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) return perf_guest_cbs->get_guest_ip(); return instruction_pointer(regs); } unsigned long perf_misc_flags(struct pt_regs *regs) { int misc = 0; if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { if (perf_guest_cbs->is_user_mode()) misc |= PERF_RECORD_MISC_GUEST_USER; else misc |= PERF_RECORD_MISC_GUEST_KERNEL; } else { if (user_mode(regs)) misc |= PERF_RECORD_MISC_USER; else misc |= PERF_RECORD_MISC_KERNEL; } return misc; }
./CrossVul/dataset_final_sorted/CWE-20/c/bad_5732_0
crossvul-cpp_data_bad_5604_0
/* * linux/fs/ext3/super.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/inode.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Big-endian to little-endian byte-swapping/bitmaps by * David S. Miller (davem@caip.rutgers.edu), 1995 */ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/parser.h> #include <linux/exportfs.h> #include <linux/statfs.h> #include <linux/random.h> #include <linux/mount.h> #include <linux/quotaops.h> #include <linux/seq_file.h> #include <linux/log2.h> #include <linux/cleancache.h> #include <asm/uaccess.h> #define CREATE_TRACE_POINTS #include "ext3.h" #include "xattr.h" #include "acl.h" #include "namei.h" #ifdef CONFIG_EXT3_DEFAULTS_TO_ORDERED #define EXT3_MOUNT_DEFAULT_DATA_MODE EXT3_MOUNT_ORDERED_DATA #else #define EXT3_MOUNT_DEFAULT_DATA_MODE EXT3_MOUNT_WRITEBACK_DATA #endif static int ext3_load_journal(struct super_block *, struct ext3_super_block *, unsigned long journal_devnum); static int ext3_create_journal(struct super_block *, struct ext3_super_block *, unsigned int); static int ext3_commit_super(struct super_block *sb, struct ext3_super_block *es, int sync); static void ext3_mark_recovery_complete(struct super_block * sb, struct ext3_super_block * es); static void ext3_clear_journal_err(struct super_block * sb, struct ext3_super_block * es); static int ext3_sync_fs(struct super_block *sb, int wait); static const char *ext3_decode_error(struct super_block * sb, int errno, char nbuf[16]); static int ext3_remount (struct super_block * sb, int * flags, char * data); static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf); static int ext3_unfreeze(struct super_block *sb); static int ext3_freeze(struct super_block *sb); /* * Wrappers for journal_start/end. */ handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks) { journal_t *journal; if (sb->s_flags & MS_RDONLY) return ERR_PTR(-EROFS); /* Special case here: if the journal has aborted behind our * backs (eg. EIO in the commit thread), then we still need to * take the FS itself readonly cleanly. */ journal = EXT3_SB(sb)->s_journal; if (is_journal_aborted(journal)) { ext3_abort(sb, __func__, "Detected aborted journal"); return ERR_PTR(-EROFS); } return journal_start(journal, nblocks); } int __ext3_journal_stop(const char *where, handle_t *handle) { struct super_block *sb; int err; int rc; sb = handle->h_transaction->t_journal->j_private; err = handle->h_err; rc = journal_stop(handle); if (!err) err = rc; if (err) __ext3_std_error(sb, where, err); return err; } void ext3_journal_abort_handle(const char *caller, const char *err_fn, struct buffer_head *bh, handle_t *handle, int err) { char nbuf[16]; const char *errstr = ext3_decode_error(NULL, err, nbuf); if (bh) BUFFER_TRACE(bh, "abort"); if (!handle->h_err) handle->h_err = err; if (is_handle_aborted(handle)) return; printk(KERN_ERR "EXT3-fs: %s: aborting transaction: %s in %s\n", caller, errstr, err_fn); journal_abort_handle(handle); } void ext3_msg(struct super_block *sb, const char *prefix, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk("%sEXT3-fs (%s): %pV\n", prefix, sb->s_id, &vaf); va_end(args); } /* Deal with the reporting of failure conditions on a filesystem such as * inconsistencies detected or read IO failures. * * On ext2, we can store the error state of the filesystem in the * superblock. That is not possible on ext3, because we may have other * write ordering constraints on the superblock which prevent us from * writing it out straight away; and given that the journal is about to * be aborted, we can't rely on the current, or future, transactions to * write out the superblock safely. * * We'll just use the journal_abort() error code to record an error in * the journal instead. On recovery, the journal will complain about * that error until we've noted it down and cleared it. */ static void ext3_handle_error(struct super_block *sb) { struct ext3_super_block *es = EXT3_SB(sb)->s_es; EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS; es->s_state |= cpu_to_le16(EXT3_ERROR_FS); if (sb->s_flags & MS_RDONLY) return; if (!test_opt (sb, ERRORS_CONT)) { journal_t *journal = EXT3_SB(sb)->s_journal; set_opt(EXT3_SB(sb)->s_mount_opt, ABORT); if (journal) journal_abort(journal, -EIO); } if (test_opt (sb, ERRORS_RO)) { ext3_msg(sb, KERN_CRIT, "error: remounting filesystem read-only"); sb->s_flags |= MS_RDONLY; } ext3_commit_super(sb, es, 1); if (test_opt(sb, ERRORS_PANIC)) panic("EXT3-fs (%s): panic forced after error\n", sb->s_id); } void ext3_error(struct super_block *sb, const char *function, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_CRIT "EXT3-fs error (device %s): %s: %pV\n", sb->s_id, function, &vaf); va_end(args); ext3_handle_error(sb); } static const char *ext3_decode_error(struct super_block * sb, int errno, char nbuf[16]) { char *errstr = NULL; switch (errno) { case -EIO: errstr = "IO failure"; break; case -ENOMEM: errstr = "Out of memory"; break; case -EROFS: if (!sb || EXT3_SB(sb)->s_journal->j_flags & JFS_ABORT) errstr = "Journal has aborted"; else errstr = "Readonly filesystem"; break; default: /* If the caller passed in an extra buffer for unknown * errors, textualise them now. Else we just return * NULL. */ if (nbuf) { /* Check for truncated error codes... */ if (snprintf(nbuf, 16, "error %d", -errno) >= 0) errstr = nbuf; } break; } return errstr; } /* __ext3_std_error decodes expected errors from journaling functions * automatically and invokes the appropriate error response. */ void __ext3_std_error (struct super_block * sb, const char * function, int errno) { char nbuf[16]; const char *errstr; /* Special case: if the error is EROFS, and we're not already * inside a transaction, then there's really no point in logging * an error. */ if (errno == -EROFS && journal_current_handle() == NULL && (sb->s_flags & MS_RDONLY)) return; errstr = ext3_decode_error(sb, errno, nbuf); ext3_msg(sb, KERN_CRIT, "error in %s: %s", function, errstr); ext3_handle_error(sb); } /* * ext3_abort is a much stronger failure handler than ext3_error. The * abort function may be used to deal with unrecoverable failures such * as journal IO errors or ENOMEM at a critical moment in log management. * * We unconditionally force the filesystem into an ABORT|READONLY state, * unless the error response on the fs has been set to panic in which * case we take the easy way out and panic immediately. */ void ext3_abort(struct super_block *sb, const char *function, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_CRIT "EXT3-fs (%s): error: %s: %pV\n", sb->s_id, function, &vaf); va_end(args); if (test_opt(sb, ERRORS_PANIC)) panic("EXT3-fs: panic from previous error\n"); if (sb->s_flags & MS_RDONLY) return; ext3_msg(sb, KERN_CRIT, "error: remounting filesystem read-only"); EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS; sb->s_flags |= MS_RDONLY; set_opt(EXT3_SB(sb)->s_mount_opt, ABORT); if (EXT3_SB(sb)->s_journal) journal_abort(EXT3_SB(sb)->s_journal, -EIO); } void ext3_warning(struct super_block *sb, const char *function, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_WARNING "EXT3-fs (%s): warning: %s: %pV\n", sb->s_id, function, &vaf); va_end(args); } void ext3_update_dynamic_rev(struct super_block *sb) { struct ext3_super_block *es = EXT3_SB(sb)->s_es; if (le32_to_cpu(es->s_rev_level) > EXT3_GOOD_OLD_REV) return; ext3_msg(sb, KERN_WARNING, "warning: updating to rev %d because of " "new feature flag, running e2fsck is recommended", EXT3_DYNAMIC_REV); es->s_first_ino = cpu_to_le32(EXT3_GOOD_OLD_FIRST_INO); es->s_inode_size = cpu_to_le16(EXT3_GOOD_OLD_INODE_SIZE); es->s_rev_level = cpu_to_le32(EXT3_DYNAMIC_REV); /* leave es->s_feature_*compat flags alone */ /* es->s_uuid will be set by e2fsck if empty */ /* * The rest of the superblock fields should be zero, and if not it * means they are likely already in use, so leave them alone. We * can leave it up to e2fsck to clean up any inconsistencies there. */ } /* * Open the external journal device */ static struct block_device *ext3_blkdev_get(dev_t dev, struct super_block *sb) { struct block_device *bdev; char b[BDEVNAME_SIZE]; bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb); if (IS_ERR(bdev)) goto fail; return bdev; fail: ext3_msg(sb, "error: failed to open journal device %s: %ld", __bdevname(dev, b), PTR_ERR(bdev)); return NULL; } /* * Release the journal device */ static int ext3_blkdev_put(struct block_device *bdev) { return blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); } static int ext3_blkdev_remove(struct ext3_sb_info *sbi) { struct block_device *bdev; int ret = -ENODEV; bdev = sbi->journal_bdev; if (bdev) { ret = ext3_blkdev_put(bdev); sbi->journal_bdev = NULL; } return ret; } static inline struct inode *orphan_list_entry(struct list_head *l) { return &list_entry(l, struct ext3_inode_info, i_orphan)->vfs_inode; } static void dump_orphan_list(struct super_block *sb, struct ext3_sb_info *sbi) { struct list_head *l; ext3_msg(sb, KERN_ERR, "error: sb orphan head is %d", le32_to_cpu(sbi->s_es->s_last_orphan)); ext3_msg(sb, KERN_ERR, "sb_info orphan list:"); list_for_each(l, &sbi->s_orphan) { struct inode *inode = orphan_list_entry(l); ext3_msg(sb, KERN_ERR, " " "inode %s:%lu at %p: mode %o, nlink %d, next %d\n", inode->i_sb->s_id, inode->i_ino, inode, inode->i_mode, inode->i_nlink, NEXT_ORPHAN(inode)); } } static void ext3_put_super (struct super_block * sb) { struct ext3_sb_info *sbi = EXT3_SB(sb); struct ext3_super_block *es = sbi->s_es; int i, err; dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); ext3_xattr_put_super(sb); err = journal_destroy(sbi->s_journal); sbi->s_journal = NULL; if (err < 0) ext3_abort(sb, __func__, "Couldn't clean up the journal"); if (!(sb->s_flags & MS_RDONLY)) { EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); es->s_state = cpu_to_le16(sbi->s_mount_state); BUFFER_TRACE(sbi->s_sbh, "marking dirty"); mark_buffer_dirty(sbi->s_sbh); ext3_commit_super(sb, es, 1); } for (i = 0; i < sbi->s_gdb_count; i++) brelse(sbi->s_group_desc[i]); kfree(sbi->s_group_desc); percpu_counter_destroy(&sbi->s_freeblocks_counter); percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_dirs_counter); brelse(sbi->s_sbh); #ifdef CONFIG_QUOTA for (i = 0; i < MAXQUOTAS; i++) kfree(sbi->s_qf_names[i]); #endif /* Debugging code just in case the in-memory inode orphan list * isn't empty. The on-disk one can be non-empty if we've * detected an error and taken the fs readonly, but the * in-memory list had better be clean by this point. */ if (!list_empty(&sbi->s_orphan)) dump_orphan_list(sb, sbi); J_ASSERT(list_empty(&sbi->s_orphan)); invalidate_bdev(sb->s_bdev); if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) { /* * Invalidate the journal device's buffers. We don't want them * floating about in memory - the physical journal device may * hotswapped, and it breaks the `ro-after' testing code. */ sync_blockdev(sbi->journal_bdev); invalidate_bdev(sbi->journal_bdev); ext3_blkdev_remove(sbi); } sb->s_fs_info = NULL; kfree(sbi->s_blockgroup_lock); kfree(sbi); } static struct kmem_cache *ext3_inode_cachep; /* * Called inside transaction, so use GFP_NOFS */ static struct inode *ext3_alloc_inode(struct super_block *sb) { struct ext3_inode_info *ei; ei = kmem_cache_alloc(ext3_inode_cachep, GFP_NOFS); if (!ei) return NULL; ei->i_block_alloc_info = NULL; ei->vfs_inode.i_version = 1; atomic_set(&ei->i_datasync_tid, 0); atomic_set(&ei->i_sync_tid, 0); return &ei->vfs_inode; } static int ext3_drop_inode(struct inode *inode) { int drop = generic_drop_inode(inode); trace_ext3_drop_inode(inode, drop); return drop; } static void ext3_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(ext3_inode_cachep, EXT3_I(inode)); } static void ext3_destroy_inode(struct inode *inode) { if (!list_empty(&(EXT3_I(inode)->i_orphan))) { printk("EXT3 Inode %p: orphan list check failed!\n", EXT3_I(inode)); print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4, EXT3_I(inode), sizeof(struct ext3_inode_info), false); dump_stack(); } call_rcu(&inode->i_rcu, ext3_i_callback); } static void init_once(void *foo) { struct ext3_inode_info *ei = (struct ext3_inode_info *) foo; INIT_LIST_HEAD(&ei->i_orphan); #ifdef CONFIG_EXT3_FS_XATTR init_rwsem(&ei->xattr_sem); #endif mutex_init(&ei->truncate_mutex); inode_init_once(&ei->vfs_inode); } static int init_inodecache(void) { ext3_inode_cachep = kmem_cache_create("ext3_inode_cache", sizeof(struct ext3_inode_info), 0, (SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD), init_once); if (ext3_inode_cachep == NULL) return -ENOMEM; return 0; } static void destroy_inodecache(void) { /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(ext3_inode_cachep); } static inline void ext3_show_quota_options(struct seq_file *seq, struct super_block *sb) { #if defined(CONFIG_QUOTA) struct ext3_sb_info *sbi = EXT3_SB(sb); if (sbi->s_jquota_fmt) { char *fmtname = ""; switch (sbi->s_jquota_fmt) { case QFMT_VFS_OLD: fmtname = "vfsold"; break; case QFMT_VFS_V0: fmtname = "vfsv0"; break; case QFMT_VFS_V1: fmtname = "vfsv1"; break; } seq_printf(seq, ",jqfmt=%s", fmtname); } if (sbi->s_qf_names[USRQUOTA]) seq_printf(seq, ",usrjquota=%s", sbi->s_qf_names[USRQUOTA]); if (sbi->s_qf_names[GRPQUOTA]) seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]); if (test_opt(sb, USRQUOTA)) seq_puts(seq, ",usrquota"); if (test_opt(sb, GRPQUOTA)) seq_puts(seq, ",grpquota"); #endif } static char *data_mode_string(unsigned long mode) { switch (mode) { case EXT3_MOUNT_JOURNAL_DATA: return "journal"; case EXT3_MOUNT_ORDERED_DATA: return "ordered"; case EXT3_MOUNT_WRITEBACK_DATA: return "writeback"; } return "unknown"; } /* * Show an option if * - it's set to a non-default value OR * - if the per-sb default is different from the global default */ static int ext3_show_options(struct seq_file *seq, struct dentry *root) { struct super_block *sb = root->d_sb; struct ext3_sb_info *sbi = EXT3_SB(sb); struct ext3_super_block *es = sbi->s_es; unsigned long def_mount_opts; def_mount_opts = le32_to_cpu(es->s_default_mount_opts); if (sbi->s_sb_block != 1) seq_printf(seq, ",sb=%lu", sbi->s_sb_block); if (test_opt(sb, MINIX_DF)) seq_puts(seq, ",minixdf"); if (test_opt(sb, GRPID)) seq_puts(seq, ",grpid"); if (!test_opt(sb, GRPID) && (def_mount_opts & EXT3_DEFM_BSDGROUPS)) seq_puts(seq, ",nogrpid"); if (!uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT3_DEF_RESUID)) || le16_to_cpu(es->s_def_resuid) != EXT3_DEF_RESUID) { seq_printf(seq, ",resuid=%u", from_kuid_munged(&init_user_ns, sbi->s_resuid)); } if (!gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT3_DEF_RESGID)) || le16_to_cpu(es->s_def_resgid) != EXT3_DEF_RESGID) { seq_printf(seq, ",resgid=%u", from_kgid_munged(&init_user_ns, sbi->s_resgid)); } if (test_opt(sb, ERRORS_RO)) { int def_errors = le16_to_cpu(es->s_errors); if (def_errors == EXT3_ERRORS_PANIC || def_errors == EXT3_ERRORS_CONTINUE) { seq_puts(seq, ",errors=remount-ro"); } } if (test_opt(sb, ERRORS_CONT)) seq_puts(seq, ",errors=continue"); if (test_opt(sb, ERRORS_PANIC)) seq_puts(seq, ",errors=panic"); if (test_opt(sb, NO_UID32)) seq_puts(seq, ",nouid32"); if (test_opt(sb, DEBUG)) seq_puts(seq, ",debug"); #ifdef CONFIG_EXT3_FS_XATTR if (test_opt(sb, XATTR_USER)) seq_puts(seq, ",user_xattr"); if (!test_opt(sb, XATTR_USER) && (def_mount_opts & EXT3_DEFM_XATTR_USER)) { seq_puts(seq, ",nouser_xattr"); } #endif #ifdef CONFIG_EXT3_FS_POSIX_ACL if (test_opt(sb, POSIX_ACL)) seq_puts(seq, ",acl"); if (!test_opt(sb, POSIX_ACL) && (def_mount_opts & EXT3_DEFM_ACL)) seq_puts(seq, ",noacl"); #endif if (!test_opt(sb, RESERVATION)) seq_puts(seq, ",noreservation"); if (sbi->s_commit_interval) { seq_printf(seq, ",commit=%u", (unsigned) (sbi->s_commit_interval / HZ)); } /* * Always display barrier state so it's clear what the status is. */ seq_puts(seq, ",barrier="); seq_puts(seq, test_opt(sb, BARRIER) ? "1" : "0"); seq_printf(seq, ",data=%s", data_mode_string(test_opt(sb, DATA_FLAGS))); if (test_opt(sb, DATA_ERR_ABORT)) seq_puts(seq, ",data_err=abort"); if (test_opt(sb, NOLOAD)) seq_puts(seq, ",norecovery"); ext3_show_quota_options(seq, sb); return 0; } static struct inode *ext3_nfs_get_inode(struct super_block *sb, u64 ino, u32 generation) { struct inode *inode; if (ino < EXT3_FIRST_INO(sb) && ino != EXT3_ROOT_INO) return ERR_PTR(-ESTALE); if (ino > le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count)) return ERR_PTR(-ESTALE); /* iget isn't really right if the inode is currently unallocated!! * * ext3_read_inode will return a bad_inode if the inode had been * deleted, so we should be safe. * * Currently we don't know the generation for parent directory, so * a generation of 0 means "accept any" */ inode = ext3_iget(sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); if (generation && inode->i_generation != generation) { iput(inode); return ERR_PTR(-ESTALE); } return inode; } static struct dentry *ext3_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_dentry(sb, fid, fh_len, fh_type, ext3_nfs_get_inode); } static struct dentry *ext3_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_parent(sb, fid, fh_len, fh_type, ext3_nfs_get_inode); } /* * Try to release metadata pages (indirect blocks, directories) which are * mapped via the block device. Since these pages could have journal heads * which would prevent try_to_free_buffers() from freeing them, we must use * jbd layer's try_to_free_buffers() function to release them. */ static int bdev_try_to_free_page(struct super_block *sb, struct page *page, gfp_t wait) { journal_t *journal = EXT3_SB(sb)->s_journal; WARN_ON(PageChecked(page)); if (!page_has_buffers(page)) return 0; if (journal) return journal_try_to_free_buffers(journal, page, wait & ~__GFP_WAIT); return try_to_free_buffers(page); } #ifdef CONFIG_QUOTA #define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group") #define QTYPE2MOPT(on, t) ((t)==USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA)) static int ext3_write_dquot(struct dquot *dquot); static int ext3_acquire_dquot(struct dquot *dquot); static int ext3_release_dquot(struct dquot *dquot); static int ext3_mark_dquot_dirty(struct dquot *dquot); static int ext3_write_info(struct super_block *sb, int type); static int ext3_quota_on(struct super_block *sb, int type, int format_id, struct path *path); static int ext3_quota_on_mount(struct super_block *sb, int type); static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off); static ssize_t ext3_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off); static const struct dquot_operations ext3_quota_operations = { .write_dquot = ext3_write_dquot, .acquire_dquot = ext3_acquire_dquot, .release_dquot = ext3_release_dquot, .mark_dirty = ext3_mark_dquot_dirty, .write_info = ext3_write_info, .alloc_dquot = dquot_alloc, .destroy_dquot = dquot_destroy, }; static const struct quotactl_ops ext3_qctl_operations = { .quota_on = ext3_quota_on, .quota_off = dquot_quota_off, .quota_sync = dquot_quota_sync, .get_info = dquot_get_dqinfo, .set_info = dquot_set_dqinfo, .get_dqblk = dquot_get_dqblk, .set_dqblk = dquot_set_dqblk }; #endif static const struct super_operations ext3_sops = { .alloc_inode = ext3_alloc_inode, .destroy_inode = ext3_destroy_inode, .write_inode = ext3_write_inode, .dirty_inode = ext3_dirty_inode, .drop_inode = ext3_drop_inode, .evict_inode = ext3_evict_inode, .put_super = ext3_put_super, .sync_fs = ext3_sync_fs, .freeze_fs = ext3_freeze, .unfreeze_fs = ext3_unfreeze, .statfs = ext3_statfs, .remount_fs = ext3_remount, .show_options = ext3_show_options, #ifdef CONFIG_QUOTA .quota_read = ext3_quota_read, .quota_write = ext3_quota_write, #endif .bdev_try_to_free_page = bdev_try_to_free_page, }; static const struct export_operations ext3_export_ops = { .fh_to_dentry = ext3_fh_to_dentry, .fh_to_parent = ext3_fh_to_parent, .get_parent = ext3_get_parent, }; enum { Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid, Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro, Opt_nouid32, Opt_nocheck, Opt_debug, Opt_oldalloc, Opt_orlov, Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl, Opt_reservation, Opt_noreservation, Opt_noload, Opt_nobh, Opt_bh, Opt_commit, Opt_journal_update, Opt_journal_inum, Opt_journal_dev, Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback, Opt_data_err_abort, Opt_data_err_ignore, Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota, Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota, Opt_noquota, Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err, Opt_resize, Opt_usrquota, Opt_grpquota }; static const match_table_t tokens = { {Opt_bsd_df, "bsddf"}, {Opt_minix_df, "minixdf"}, {Opt_grpid, "grpid"}, {Opt_grpid, "bsdgroups"}, {Opt_nogrpid, "nogrpid"}, {Opt_nogrpid, "sysvgroups"}, {Opt_resgid, "resgid=%u"}, {Opt_resuid, "resuid=%u"}, {Opt_sb, "sb=%u"}, {Opt_err_cont, "errors=continue"}, {Opt_err_panic, "errors=panic"}, {Opt_err_ro, "errors=remount-ro"}, {Opt_nouid32, "nouid32"}, {Opt_nocheck, "nocheck"}, {Opt_nocheck, "check=none"}, {Opt_debug, "debug"}, {Opt_oldalloc, "oldalloc"}, {Opt_orlov, "orlov"}, {Opt_user_xattr, "user_xattr"}, {Opt_nouser_xattr, "nouser_xattr"}, {Opt_acl, "acl"}, {Opt_noacl, "noacl"}, {Opt_reservation, "reservation"}, {Opt_noreservation, "noreservation"}, {Opt_noload, "noload"}, {Opt_noload, "norecovery"}, {Opt_nobh, "nobh"}, {Opt_bh, "bh"}, {Opt_commit, "commit=%u"}, {Opt_journal_update, "journal=update"}, {Opt_journal_inum, "journal=%u"}, {Opt_journal_dev, "journal_dev=%u"}, {Opt_abort, "abort"}, {Opt_data_journal, "data=journal"}, {Opt_data_ordered, "data=ordered"}, {Opt_data_writeback, "data=writeback"}, {Opt_data_err_abort, "data_err=abort"}, {Opt_data_err_ignore, "data_err=ignore"}, {Opt_offusrjquota, "usrjquota="}, {Opt_usrjquota, "usrjquota=%s"}, {Opt_offgrpjquota, "grpjquota="}, {Opt_grpjquota, "grpjquota=%s"}, {Opt_jqfmt_vfsold, "jqfmt=vfsold"}, {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"}, {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"}, {Opt_grpquota, "grpquota"}, {Opt_noquota, "noquota"}, {Opt_quota, "quota"}, {Opt_usrquota, "usrquota"}, {Opt_barrier, "barrier=%u"}, {Opt_barrier, "barrier"}, {Opt_nobarrier, "nobarrier"}, {Opt_resize, "resize"}, {Opt_err, NULL}, }; static ext3_fsblk_t get_sb_block(void **data, struct super_block *sb) { ext3_fsblk_t sb_block; char *options = (char *) *data; if (!options || strncmp(options, "sb=", 3) != 0) return 1; /* Default location */ options += 3; /*todo: use simple_strtoll with >32bit ext3 */ sb_block = simple_strtoul(options, &options, 0); if (*options && *options != ',') { ext3_msg(sb, "error: invalid sb specification: %s", (char *) *data); return 1; } if (*options == ',') options++; *data = (void *) options; return sb_block; } #ifdef CONFIG_QUOTA static int set_qf_name(struct super_block *sb, int qtype, substring_t *args) { struct ext3_sb_info *sbi = EXT3_SB(sb); char *qname; if (sb_any_quota_loaded(sb) && !sbi->s_qf_names[qtype]) { ext3_msg(sb, KERN_ERR, "Cannot change journaled " "quota options when quota turned on"); return 0; } qname = match_strdup(args); if (!qname) { ext3_msg(sb, KERN_ERR, "Not enough memory for storing quotafile name"); return 0; } if (sbi->s_qf_names[qtype]) { int same = !strcmp(sbi->s_qf_names[qtype], qname); kfree(qname); if (!same) { ext3_msg(sb, KERN_ERR, "%s quota file already specified", QTYPE2NAME(qtype)); } return same; } if (strchr(qname, '/')) { ext3_msg(sb, KERN_ERR, "quotafile must be on filesystem root"); kfree(qname); return 0; } sbi->s_qf_names[qtype] = qname; set_opt(sbi->s_mount_opt, QUOTA); return 1; } static int clear_qf_name(struct super_block *sb, int qtype) { struct ext3_sb_info *sbi = EXT3_SB(sb); if (sb_any_quota_loaded(sb) && sbi->s_qf_names[qtype]) { ext3_msg(sb, KERN_ERR, "Cannot change journaled quota options" " when quota turned on"); return 0; } if (sbi->s_qf_names[qtype]) { kfree(sbi->s_qf_names[qtype]); sbi->s_qf_names[qtype] = NULL; } return 1; } #endif static int parse_options (char *options, struct super_block *sb, unsigned int *inum, unsigned long *journal_devnum, ext3_fsblk_t *n_blocks_count, int is_remount) { struct ext3_sb_info *sbi = EXT3_SB(sb); char * p; substring_t args[MAX_OPT_ARGS]; int data_opt = 0; int option; kuid_t uid; kgid_t gid; #ifdef CONFIG_QUOTA int qfmt; #endif if (!options) return 1; while ((p = strsep (&options, ",")) != NULL) { int token; if (!*p) continue; /* * Initialize args struct so we know whether arg was * found; some options take optional arguments. */ args[0].to = args[0].from = NULL; token = match_token(p, tokens, args); switch (token) { case Opt_bsd_df: clear_opt (sbi->s_mount_opt, MINIX_DF); break; case Opt_minix_df: set_opt (sbi->s_mount_opt, MINIX_DF); break; case Opt_grpid: set_opt (sbi->s_mount_opt, GRPID); break; case Opt_nogrpid: clear_opt (sbi->s_mount_opt, GRPID); break; case Opt_resuid: if (match_int(&args[0], &option)) return 0; uid = make_kuid(current_user_ns(), option); if (!uid_valid(uid)) { ext3_msg(sb, KERN_ERR, "Invalid uid value %d", option); return 0; } sbi->s_resuid = uid; break; case Opt_resgid: if (match_int(&args[0], &option)) return 0; gid = make_kgid(current_user_ns(), option); if (!gid_valid(gid)) { ext3_msg(sb, KERN_ERR, "Invalid gid value %d", option); return 0; } sbi->s_resgid = gid; break; case Opt_sb: /* handled by get_sb_block() instead of here */ /* *sb_block = match_int(&args[0]); */ break; case Opt_err_panic: clear_opt (sbi->s_mount_opt, ERRORS_CONT); clear_opt (sbi->s_mount_opt, ERRORS_RO); set_opt (sbi->s_mount_opt, ERRORS_PANIC); break; case Opt_err_ro: clear_opt (sbi->s_mount_opt, ERRORS_CONT); clear_opt (sbi->s_mount_opt, ERRORS_PANIC); set_opt (sbi->s_mount_opt, ERRORS_RO); break; case Opt_err_cont: clear_opt (sbi->s_mount_opt, ERRORS_RO); clear_opt (sbi->s_mount_opt, ERRORS_PANIC); set_opt (sbi->s_mount_opt, ERRORS_CONT); break; case Opt_nouid32: set_opt (sbi->s_mount_opt, NO_UID32); break; case Opt_nocheck: clear_opt (sbi->s_mount_opt, CHECK); break; case Opt_debug: set_opt (sbi->s_mount_opt, DEBUG); break; case Opt_oldalloc: ext3_msg(sb, KERN_WARNING, "Ignoring deprecated oldalloc option"); break; case Opt_orlov: ext3_msg(sb, KERN_WARNING, "Ignoring deprecated orlov option"); break; #ifdef CONFIG_EXT3_FS_XATTR case Opt_user_xattr: set_opt (sbi->s_mount_opt, XATTR_USER); break; case Opt_nouser_xattr: clear_opt (sbi->s_mount_opt, XATTR_USER); break; #else case Opt_user_xattr: case Opt_nouser_xattr: ext3_msg(sb, KERN_INFO, "(no)user_xattr options not supported"); break; #endif #ifdef CONFIG_EXT3_FS_POSIX_ACL case Opt_acl: set_opt(sbi->s_mount_opt, POSIX_ACL); break; case Opt_noacl: clear_opt(sbi->s_mount_opt, POSIX_ACL); break; #else case Opt_acl: case Opt_noacl: ext3_msg(sb, KERN_INFO, "(no)acl options not supported"); break; #endif case Opt_reservation: set_opt(sbi->s_mount_opt, RESERVATION); break; case Opt_noreservation: clear_opt(sbi->s_mount_opt, RESERVATION); break; case Opt_journal_update: /* @@@ FIXME */ /* Eventually we will want to be able to create a journal file here. For now, only allow the user to specify an existing inode to be the journal file. */ if (is_remount) { ext3_msg(sb, KERN_ERR, "error: cannot specify " "journal on remount"); return 0; } set_opt (sbi->s_mount_opt, UPDATE_JOURNAL); break; case Opt_journal_inum: if (is_remount) { ext3_msg(sb, KERN_ERR, "error: cannot specify " "journal on remount"); return 0; } if (match_int(&args[0], &option)) return 0; *inum = option; break; case Opt_journal_dev: if (is_remount) { ext3_msg(sb, KERN_ERR, "error: cannot specify " "journal on remount"); return 0; } if (match_int(&args[0], &option)) return 0; *journal_devnum = option; break; case Opt_noload: set_opt (sbi->s_mount_opt, NOLOAD); break; case Opt_commit: if (match_int(&args[0], &option)) return 0; if (option < 0) return 0; if (option == 0) option = JBD_DEFAULT_MAX_COMMIT_AGE; sbi->s_commit_interval = HZ * option; break; case Opt_data_journal: data_opt = EXT3_MOUNT_JOURNAL_DATA; goto datacheck; case Opt_data_ordered: data_opt = EXT3_MOUNT_ORDERED_DATA; goto datacheck; case Opt_data_writeback: data_opt = EXT3_MOUNT_WRITEBACK_DATA; datacheck: if (is_remount) { if (test_opt(sb, DATA_FLAGS) == data_opt) break; ext3_msg(sb, KERN_ERR, "error: cannot change " "data mode on remount. The filesystem " "is mounted in data=%s mode and you " "try to remount it in data=%s mode.", data_mode_string(test_opt(sb, DATA_FLAGS)), data_mode_string(data_opt)); return 0; } else { clear_opt(sbi->s_mount_opt, DATA_FLAGS); sbi->s_mount_opt |= data_opt; } break; case Opt_data_err_abort: set_opt(sbi->s_mount_opt, DATA_ERR_ABORT); break; case Opt_data_err_ignore: clear_opt(sbi->s_mount_opt, DATA_ERR_ABORT); break; #ifdef CONFIG_QUOTA case Opt_usrjquota: if (!set_qf_name(sb, USRQUOTA, &args[0])) return 0; break; case Opt_grpjquota: if (!set_qf_name(sb, GRPQUOTA, &args[0])) return 0; break; case Opt_offusrjquota: if (!clear_qf_name(sb, USRQUOTA)) return 0; break; case Opt_offgrpjquota: if (!clear_qf_name(sb, GRPQUOTA)) return 0; break; case Opt_jqfmt_vfsold: qfmt = QFMT_VFS_OLD; goto set_qf_format; case Opt_jqfmt_vfsv0: qfmt = QFMT_VFS_V0; goto set_qf_format; case Opt_jqfmt_vfsv1: qfmt = QFMT_VFS_V1; set_qf_format: if (sb_any_quota_loaded(sb) && sbi->s_jquota_fmt != qfmt) { ext3_msg(sb, KERN_ERR, "error: cannot change " "journaled quota options when " "quota turned on."); return 0; } sbi->s_jquota_fmt = qfmt; break; case Opt_quota: case Opt_usrquota: set_opt(sbi->s_mount_opt, QUOTA); set_opt(sbi->s_mount_opt, USRQUOTA); break; case Opt_grpquota: set_opt(sbi->s_mount_opt, QUOTA); set_opt(sbi->s_mount_opt, GRPQUOTA); break; case Opt_noquota: if (sb_any_quota_loaded(sb)) { ext3_msg(sb, KERN_ERR, "error: cannot change " "quota options when quota turned on."); return 0; } clear_opt(sbi->s_mount_opt, QUOTA); clear_opt(sbi->s_mount_opt, USRQUOTA); clear_opt(sbi->s_mount_opt, GRPQUOTA); break; #else case Opt_quota: case Opt_usrquota: case Opt_grpquota: ext3_msg(sb, KERN_ERR, "error: quota options not supported."); break; case Opt_usrjquota: case Opt_grpjquota: case Opt_offusrjquota: case Opt_offgrpjquota: case Opt_jqfmt_vfsold: case Opt_jqfmt_vfsv0: case Opt_jqfmt_vfsv1: ext3_msg(sb, KERN_ERR, "error: journaled quota options not " "supported."); break; case Opt_noquota: break; #endif case Opt_abort: set_opt(sbi->s_mount_opt, ABORT); break; case Opt_nobarrier: clear_opt(sbi->s_mount_opt, BARRIER); break; case Opt_barrier: if (args[0].from) { if (match_int(&args[0], &option)) return 0; } else option = 1; /* No argument, default to 1 */ if (option) set_opt(sbi->s_mount_opt, BARRIER); else clear_opt(sbi->s_mount_opt, BARRIER); break; case Opt_ignore: break; case Opt_resize: if (!is_remount) { ext3_msg(sb, KERN_ERR, "error: resize option only available " "for remount"); return 0; } if (match_int(&args[0], &option) != 0) return 0; *n_blocks_count = option; break; case Opt_nobh: ext3_msg(sb, KERN_WARNING, "warning: ignoring deprecated nobh option"); break; case Opt_bh: ext3_msg(sb, KERN_WARNING, "warning: ignoring deprecated bh option"); break; default: ext3_msg(sb, KERN_ERR, "error: unrecognized mount option \"%s\" " "or missing value", p); return 0; } } #ifdef CONFIG_QUOTA if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) { if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA]) clear_opt(sbi->s_mount_opt, USRQUOTA); if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA]) clear_opt(sbi->s_mount_opt, GRPQUOTA); if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) { ext3_msg(sb, KERN_ERR, "error: old and new quota " "format mixing."); return 0; } if (!sbi->s_jquota_fmt) { ext3_msg(sb, KERN_ERR, "error: journaled quota format " "not specified."); return 0; } } else { if (sbi->s_jquota_fmt) { ext3_msg(sb, KERN_ERR, "error: journaled quota format " "specified with no journaling " "enabled."); return 0; } } #endif return 1; } static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es, int read_only) { struct ext3_sb_info *sbi = EXT3_SB(sb); int res = 0; if (le32_to_cpu(es->s_rev_level) > EXT3_MAX_SUPP_REV) { ext3_msg(sb, KERN_ERR, "error: revision level too high, " "forcing read-only mode"); res = MS_RDONLY; } if (read_only) return res; if (!(sbi->s_mount_state & EXT3_VALID_FS)) ext3_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, " "running e2fsck is recommended"); else if ((sbi->s_mount_state & EXT3_ERROR_FS)) ext3_msg(sb, KERN_WARNING, "warning: mounting fs with errors, " "running e2fsck is recommended"); else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 && le16_to_cpu(es->s_mnt_count) >= le16_to_cpu(es->s_max_mnt_count)) ext3_msg(sb, KERN_WARNING, "warning: maximal mount count reached, " "running e2fsck is recommended"); else if (le32_to_cpu(es->s_checkinterval) && (le32_to_cpu(es->s_lastcheck) + le32_to_cpu(es->s_checkinterval) <= get_seconds())) ext3_msg(sb, KERN_WARNING, "warning: checktime reached, " "running e2fsck is recommended"); #if 0 /* @@@ We _will_ want to clear the valid bit if we find inconsistencies, to force a fsck at reboot. But for a plain journaled filesystem we can keep it set as valid forever! :) */ es->s_state &= cpu_to_le16(~EXT3_VALID_FS); #endif if (!le16_to_cpu(es->s_max_mnt_count)) es->s_max_mnt_count = cpu_to_le16(EXT3_DFL_MAX_MNT_COUNT); le16_add_cpu(&es->s_mnt_count, 1); es->s_mtime = cpu_to_le32(get_seconds()); ext3_update_dynamic_rev(sb); EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); ext3_commit_super(sb, es, 1); if (test_opt(sb, DEBUG)) ext3_msg(sb, KERN_INFO, "[bs=%lu, gc=%lu, " "bpg=%lu, ipg=%lu, mo=%04lx]", sb->s_blocksize, sbi->s_groups_count, EXT3_BLOCKS_PER_GROUP(sb), EXT3_INODES_PER_GROUP(sb), sbi->s_mount_opt); if (EXT3_SB(sb)->s_journal->j_inode == NULL) { char b[BDEVNAME_SIZE]; ext3_msg(sb, KERN_INFO, "using external journal on %s", bdevname(EXT3_SB(sb)->s_journal->j_dev, b)); } else { ext3_msg(sb, KERN_INFO, "using internal journal"); } cleancache_init_fs(sb); return res; } /* Called at mount-time, super-block is locked */ static int ext3_check_descriptors(struct super_block *sb) { struct ext3_sb_info *sbi = EXT3_SB(sb); int i; ext3_debug ("Checking group descriptors"); for (i = 0; i < sbi->s_groups_count; i++) { struct ext3_group_desc *gdp = ext3_get_group_desc(sb, i, NULL); ext3_fsblk_t first_block = ext3_group_first_block_no(sb, i); ext3_fsblk_t last_block; if (i == sbi->s_groups_count - 1) last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1; else last_block = first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1); if (le32_to_cpu(gdp->bg_block_bitmap) < first_block || le32_to_cpu(gdp->bg_block_bitmap) > last_block) { ext3_error (sb, "ext3_check_descriptors", "Block bitmap for group %d" " not in group (block %lu)!", i, (unsigned long) le32_to_cpu(gdp->bg_block_bitmap)); return 0; } if (le32_to_cpu(gdp->bg_inode_bitmap) < first_block || le32_to_cpu(gdp->bg_inode_bitmap) > last_block) { ext3_error (sb, "ext3_check_descriptors", "Inode bitmap for group %d" " not in group (block %lu)!", i, (unsigned long) le32_to_cpu(gdp->bg_inode_bitmap)); return 0; } if (le32_to_cpu(gdp->bg_inode_table) < first_block || le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group - 1 > last_block) { ext3_error (sb, "ext3_check_descriptors", "Inode table for group %d" " not in group (block %lu)!", i, (unsigned long) le32_to_cpu(gdp->bg_inode_table)); return 0; } } sbi->s_es->s_free_blocks_count=cpu_to_le32(ext3_count_free_blocks(sb)); sbi->s_es->s_free_inodes_count=cpu_to_le32(ext3_count_free_inodes(sb)); return 1; } /* ext3_orphan_cleanup() walks a singly-linked list of inodes (starting at * the superblock) which were deleted from all directories, but held open by * a process at the time of a crash. We walk the list and try to delete these * inodes at recovery time (only with a read-write filesystem). * * In order to keep the orphan inode chain consistent during traversal (in * case of crash during recovery), we link each inode into the superblock * orphan list_head and handle it the same way as an inode deletion during * normal operation (which journals the operations for us). * * We only do an iget() and an iput() on each inode, which is very safe if we * accidentally point at an in-use or already deleted inode. The worst that * can happen in this case is that we get a "bit already cleared" message from * ext3_free_inode(). The only reason we would point at a wrong inode is if * e2fsck was run on this filesystem, and it must have already done the orphan * inode cleanup for us, so we can safely abort without any further action. */ static void ext3_orphan_cleanup (struct super_block * sb, struct ext3_super_block * es) { unsigned int s_flags = sb->s_flags; int nr_orphans = 0, nr_truncates = 0; #ifdef CONFIG_QUOTA int i; #endif if (!es->s_last_orphan) { jbd_debug(4, "no orphan inodes to clean up\n"); return; } if (bdev_read_only(sb->s_bdev)) { ext3_msg(sb, KERN_ERR, "error: write access " "unavailable, skipping orphan cleanup."); return; } /* Check if feature set allows readwrite operations */ if (EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP)) { ext3_msg(sb, KERN_INFO, "Skipping orphan cleanup due to " "unknown ROCOMPAT features"); return; } if (EXT3_SB(sb)->s_mount_state & EXT3_ERROR_FS) { /* don't clear list on RO mount w/ errors */ if (es->s_last_orphan && !(s_flags & MS_RDONLY)) { jbd_debug(1, "Errors on filesystem, " "clearing orphan list.\n"); es->s_last_orphan = 0; } jbd_debug(1, "Skipping orphan recovery on fs with errors.\n"); return; } if (s_flags & MS_RDONLY) { ext3_msg(sb, KERN_INFO, "orphan cleanup on readonly fs"); sb->s_flags &= ~MS_RDONLY; } #ifdef CONFIG_QUOTA /* Needed for iput() to work correctly and not trash data */ sb->s_flags |= MS_ACTIVE; /* Turn on quotas so that they are updated correctly */ for (i = 0; i < MAXQUOTAS; i++) { if (EXT3_SB(sb)->s_qf_names[i]) { int ret = ext3_quota_on_mount(sb, i); if (ret < 0) ext3_msg(sb, KERN_ERR, "error: cannot turn on journaled " "quota: %d", ret); } } #endif while (es->s_last_orphan) { struct inode *inode; inode = ext3_orphan_get(sb, le32_to_cpu(es->s_last_orphan)); if (IS_ERR(inode)) { es->s_last_orphan = 0; break; } list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan); dquot_initialize(inode); if (inode->i_nlink) { printk(KERN_DEBUG "%s: truncating inode %lu to %Ld bytes\n", __func__, inode->i_ino, inode->i_size); jbd_debug(2, "truncating inode %lu to %Ld bytes\n", inode->i_ino, inode->i_size); ext3_truncate(inode); nr_truncates++; } else { printk(KERN_DEBUG "%s: deleting unreferenced inode %lu\n", __func__, inode->i_ino); jbd_debug(2, "deleting unreferenced inode %lu\n", inode->i_ino); nr_orphans++; } iput(inode); /* The delete magic happens here! */ } #define PLURAL(x) (x), ((x)==1) ? "" : "s" if (nr_orphans) ext3_msg(sb, KERN_INFO, "%d orphan inode%s deleted", PLURAL(nr_orphans)); if (nr_truncates) ext3_msg(sb, KERN_INFO, "%d truncate%s cleaned up", PLURAL(nr_truncates)); #ifdef CONFIG_QUOTA /* Turn quotas off */ for (i = 0; i < MAXQUOTAS; i++) { if (sb_dqopt(sb)->files[i]) dquot_quota_off(sb, i); } #endif sb->s_flags = s_flags; /* Restore MS_RDONLY status */ } /* * Maximal file size. There is a direct, and {,double-,triple-}indirect * block limit, and also a limit of (2^32 - 1) 512-byte sectors in i_blocks. * We need to be 1 filesystem block less than the 2^32 sector limit. */ static loff_t ext3_max_size(int bits) { loff_t res = EXT3_NDIR_BLOCKS; int meta_blocks; loff_t upper_limit; /* This is calculated to be the largest file size for a * dense, file such that the total number of * sectors in the file, including data and all indirect blocks, * does not exceed 2^32 -1 * __u32 i_blocks representing the total number of * 512 bytes blocks of the file */ upper_limit = (1LL << 32) - 1; /* total blocks in file system block size */ upper_limit >>= (bits - 9); /* indirect blocks */ meta_blocks = 1; /* double indirect blocks */ meta_blocks += 1 + (1LL << (bits-2)); /* tripple indirect blocks */ meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2))); upper_limit -= meta_blocks; upper_limit <<= bits; res += 1LL << (bits-2); res += 1LL << (2*(bits-2)); res += 1LL << (3*(bits-2)); res <<= bits; if (res > upper_limit) res = upper_limit; if (res > MAX_LFS_FILESIZE) res = MAX_LFS_FILESIZE; return res; } static ext3_fsblk_t descriptor_loc(struct super_block *sb, ext3_fsblk_t logic_sb_block, int nr) { struct ext3_sb_info *sbi = EXT3_SB(sb); unsigned long bg, first_meta_bg; int has_super = 0; first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg); if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_META_BG) || nr < first_meta_bg) return (logic_sb_block + nr + 1); bg = sbi->s_desc_per_block * nr; if (ext3_bg_has_super(sb, bg)) has_super = 1; return (has_super + ext3_group_first_block_no(sb, bg)); } static int ext3_fill_super (struct super_block *sb, void *data, int silent) { struct buffer_head * bh; struct ext3_super_block *es = NULL; struct ext3_sb_info *sbi; ext3_fsblk_t block; ext3_fsblk_t sb_block = get_sb_block(&data, sb); ext3_fsblk_t logic_sb_block; unsigned long offset = 0; unsigned int journal_inum = 0; unsigned long journal_devnum = 0; unsigned long def_mount_opts; struct inode *root; int blocksize; int hblock; int db_count; int i; int needs_recovery; int ret = -EINVAL; __le32 features; int err; sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); if (!sbi) return -ENOMEM; sbi->s_blockgroup_lock = kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL); if (!sbi->s_blockgroup_lock) { kfree(sbi); return -ENOMEM; } sb->s_fs_info = sbi; sbi->s_sb_block = sb_block; blocksize = sb_min_blocksize(sb, EXT3_MIN_BLOCK_SIZE); if (!blocksize) { ext3_msg(sb, KERN_ERR, "error: unable to set blocksize"); goto out_fail; } /* * The ext3 superblock will not be buffer aligned for other than 1kB * block sizes. We need to calculate the offset from buffer start. */ if (blocksize != EXT3_MIN_BLOCK_SIZE) { logic_sb_block = (sb_block * EXT3_MIN_BLOCK_SIZE) / blocksize; offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize; } else { logic_sb_block = sb_block; } if (!(bh = sb_bread(sb, logic_sb_block))) { ext3_msg(sb, KERN_ERR, "error: unable to read superblock"); goto out_fail; } /* * Note: s_es must be initialized as soon as possible because * some ext3 macro-instructions depend on its value */ es = (struct ext3_super_block *) (bh->b_data + offset); sbi->s_es = es; sb->s_magic = le16_to_cpu(es->s_magic); if (sb->s_magic != EXT3_SUPER_MAGIC) goto cantfind_ext3; /* Set defaults before we parse the mount options */ def_mount_opts = le32_to_cpu(es->s_default_mount_opts); if (def_mount_opts & EXT3_DEFM_DEBUG) set_opt(sbi->s_mount_opt, DEBUG); if (def_mount_opts & EXT3_DEFM_BSDGROUPS) set_opt(sbi->s_mount_opt, GRPID); if (def_mount_opts & EXT3_DEFM_UID16) set_opt(sbi->s_mount_opt, NO_UID32); #ifdef CONFIG_EXT3_FS_XATTR if (def_mount_opts & EXT3_DEFM_XATTR_USER) set_opt(sbi->s_mount_opt, XATTR_USER); #endif #ifdef CONFIG_EXT3_FS_POSIX_ACL if (def_mount_opts & EXT3_DEFM_ACL) set_opt(sbi->s_mount_opt, POSIX_ACL); #endif if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_DATA) set_opt(sbi->s_mount_opt, JOURNAL_DATA); else if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_ORDERED) set_opt(sbi->s_mount_opt, ORDERED_DATA); else if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_WBACK) set_opt(sbi->s_mount_opt, WRITEBACK_DATA); if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_PANIC) set_opt(sbi->s_mount_opt, ERRORS_PANIC); else if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_CONTINUE) set_opt(sbi->s_mount_opt, ERRORS_CONT); else set_opt(sbi->s_mount_opt, ERRORS_RO); sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid)); sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid)); /* enable barriers by default */ set_opt(sbi->s_mount_opt, BARRIER); set_opt(sbi->s_mount_opt, RESERVATION); if (!parse_options ((char *) data, sb, &journal_inum, &journal_devnum, NULL, 0)) goto failed_mount; sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); if (le32_to_cpu(es->s_rev_level) == EXT3_GOOD_OLD_REV && (EXT3_HAS_COMPAT_FEATURE(sb, ~0U) || EXT3_HAS_RO_COMPAT_FEATURE(sb, ~0U) || EXT3_HAS_INCOMPAT_FEATURE(sb, ~0U))) ext3_msg(sb, KERN_WARNING, "warning: feature flags set on rev 0 fs, " "running e2fsck is recommended"); /* * Check feature flags regardless of the revision level, since we * previously didn't change the revision level when setting the flags, * so there is a chance incompat flags are set on a rev 0 filesystem. */ features = EXT3_HAS_INCOMPAT_FEATURE(sb, ~EXT3_FEATURE_INCOMPAT_SUPP); if (features) { ext3_msg(sb, KERN_ERR, "error: couldn't mount because of unsupported " "optional features (%x)", le32_to_cpu(features)); goto failed_mount; } features = EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP); if (!(sb->s_flags & MS_RDONLY) && features) { ext3_msg(sb, KERN_ERR, "error: couldn't mount RDWR because of unsupported " "optional features (%x)", le32_to_cpu(features)); goto failed_mount; } blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size); if (blocksize < EXT3_MIN_BLOCK_SIZE || blocksize > EXT3_MAX_BLOCK_SIZE) { ext3_msg(sb, KERN_ERR, "error: couldn't mount because of unsupported " "filesystem blocksize %d", blocksize); goto failed_mount; } hblock = bdev_logical_block_size(sb->s_bdev); if (sb->s_blocksize != blocksize) { /* * Make sure the blocksize for the filesystem is larger * than the hardware sectorsize for the machine. */ if (blocksize < hblock) { ext3_msg(sb, KERN_ERR, "error: fsblocksize %d too small for " "hardware sectorsize %d", blocksize, hblock); goto failed_mount; } brelse (bh); if (!sb_set_blocksize(sb, blocksize)) { ext3_msg(sb, KERN_ERR, "error: bad blocksize %d", blocksize); goto out_fail; } logic_sb_block = (sb_block * EXT3_MIN_BLOCK_SIZE) / blocksize; offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize; bh = sb_bread(sb, logic_sb_block); if (!bh) { ext3_msg(sb, KERN_ERR, "error: can't read superblock on 2nd try"); goto failed_mount; } es = (struct ext3_super_block *)(bh->b_data + offset); sbi->s_es = es; if (es->s_magic != cpu_to_le16(EXT3_SUPER_MAGIC)) { ext3_msg(sb, KERN_ERR, "error: magic mismatch"); goto failed_mount; } } sb->s_maxbytes = ext3_max_size(sb->s_blocksize_bits); if (le32_to_cpu(es->s_rev_level) == EXT3_GOOD_OLD_REV) { sbi->s_inode_size = EXT3_GOOD_OLD_INODE_SIZE; sbi->s_first_ino = EXT3_GOOD_OLD_FIRST_INO; } else { sbi->s_inode_size = le16_to_cpu(es->s_inode_size); sbi->s_first_ino = le32_to_cpu(es->s_first_ino); if ((sbi->s_inode_size < EXT3_GOOD_OLD_INODE_SIZE) || (!is_power_of_2(sbi->s_inode_size)) || (sbi->s_inode_size > blocksize)) { ext3_msg(sb, KERN_ERR, "error: unsupported inode size: %d", sbi->s_inode_size); goto failed_mount; } } sbi->s_frag_size = EXT3_MIN_FRAG_SIZE << le32_to_cpu(es->s_log_frag_size); if (blocksize != sbi->s_frag_size) { ext3_msg(sb, KERN_ERR, "error: fragsize %lu != blocksize %u (unsupported)", sbi->s_frag_size, blocksize); goto failed_mount; } sbi->s_frags_per_block = 1; sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group); sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group); sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group); if (EXT3_INODE_SIZE(sb) == 0 || EXT3_INODES_PER_GROUP(sb) == 0) goto cantfind_ext3; sbi->s_inodes_per_block = blocksize / EXT3_INODE_SIZE(sb); if (sbi->s_inodes_per_block == 0) goto cantfind_ext3; sbi->s_itb_per_group = sbi->s_inodes_per_group / sbi->s_inodes_per_block; sbi->s_desc_per_block = blocksize / sizeof(struct ext3_group_desc); sbi->s_sbh = bh; sbi->s_mount_state = le16_to_cpu(es->s_state); sbi->s_addr_per_block_bits = ilog2(EXT3_ADDR_PER_BLOCK(sb)); sbi->s_desc_per_block_bits = ilog2(EXT3_DESC_PER_BLOCK(sb)); for (i=0; i < 4; i++) sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]); sbi->s_def_hash_version = es->s_def_hash_version; i = le32_to_cpu(es->s_flags); if (i & EXT2_FLAGS_UNSIGNED_HASH) sbi->s_hash_unsigned = 3; else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) { #ifdef __CHAR_UNSIGNED__ es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH); sbi->s_hash_unsigned = 3; #else es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH); #endif } if (sbi->s_blocks_per_group > blocksize * 8) { ext3_msg(sb, KERN_ERR, "#blocks per group too big: %lu", sbi->s_blocks_per_group); goto failed_mount; } if (sbi->s_frags_per_group > blocksize * 8) { ext3_msg(sb, KERN_ERR, "error: #fragments per group too big: %lu", sbi->s_frags_per_group); goto failed_mount; } if (sbi->s_inodes_per_group > blocksize * 8) { ext3_msg(sb, KERN_ERR, "error: #inodes per group too big: %lu", sbi->s_inodes_per_group); goto failed_mount; } err = generic_check_addressable(sb->s_blocksize_bits, le32_to_cpu(es->s_blocks_count)); if (err) { ext3_msg(sb, KERN_ERR, "error: filesystem is too large to mount safely"); if (sizeof(sector_t) < 8) ext3_msg(sb, KERN_ERR, "error: CONFIG_LBDAF not enabled"); ret = err; goto failed_mount; } if (EXT3_BLOCKS_PER_GROUP(sb) == 0) goto cantfind_ext3; sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) - le32_to_cpu(es->s_first_data_block) - 1) / EXT3_BLOCKS_PER_GROUP(sb)) + 1; db_count = DIV_ROUND_UP(sbi->s_groups_count, EXT3_DESC_PER_BLOCK(sb)); sbi->s_group_desc = kmalloc(db_count * sizeof (struct buffer_head *), GFP_KERNEL); if (sbi->s_group_desc == NULL) { ext3_msg(sb, KERN_ERR, "error: not enough memory"); ret = -ENOMEM; goto failed_mount; } bgl_lock_init(sbi->s_blockgroup_lock); for (i = 0; i < db_count; i++) { block = descriptor_loc(sb, logic_sb_block, i); sbi->s_group_desc[i] = sb_bread(sb, block); if (!sbi->s_group_desc[i]) { ext3_msg(sb, KERN_ERR, "error: can't read group descriptor %d", i); db_count = i; goto failed_mount2; } } if (!ext3_check_descriptors (sb)) { ext3_msg(sb, KERN_ERR, "error: group descriptors corrupted"); goto failed_mount2; } sbi->s_gdb_count = db_count; get_random_bytes(&sbi->s_next_generation, sizeof(u32)); spin_lock_init(&sbi->s_next_gen_lock); /* per fileystem reservation list head & lock */ spin_lock_init(&sbi->s_rsv_window_lock); sbi->s_rsv_window_root = RB_ROOT; /* Add a single, static dummy reservation to the start of the * reservation window list --- it gives us a placeholder for * append-at-start-of-list which makes the allocation logic * _much_ simpler. */ sbi->s_rsv_window_head.rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; sbi->s_rsv_window_head.rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED; sbi->s_rsv_window_head.rsv_alloc_hit = 0; sbi->s_rsv_window_head.rsv_goal_size = 0; ext3_rsv_window_add(sb, &sbi->s_rsv_window_head); /* * set up enough so that it can read an inode */ sb->s_op = &ext3_sops; sb->s_export_op = &ext3_export_ops; sb->s_xattr = ext3_xattr_handlers; #ifdef CONFIG_QUOTA sb->s_qcop = &ext3_qctl_operations; sb->dq_op = &ext3_quota_operations; #endif memcpy(sb->s_uuid, es->s_uuid, sizeof(es->s_uuid)); INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */ mutex_init(&sbi->s_orphan_lock); mutex_init(&sbi->s_resize_lock); sb->s_root = NULL; needs_recovery = (es->s_last_orphan != 0 || EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER)); /* * The first inode we look at is the journal inode. Don't try * root first: it may be modified in the journal! */ if (!test_opt(sb, NOLOAD) && EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL)) { if (ext3_load_journal(sb, es, journal_devnum)) goto failed_mount2; } else if (journal_inum) { if (ext3_create_journal(sb, es, journal_inum)) goto failed_mount2; } else { if (!silent) ext3_msg(sb, KERN_ERR, "error: no journal found. " "mounting ext3 over ext2?"); goto failed_mount2; } err = percpu_counter_init(&sbi->s_freeblocks_counter, ext3_count_free_blocks(sb)); if (!err) { err = percpu_counter_init(&sbi->s_freeinodes_counter, ext3_count_free_inodes(sb)); } if (!err) { err = percpu_counter_init(&sbi->s_dirs_counter, ext3_count_dirs(sb)); } if (err) { ext3_msg(sb, KERN_ERR, "error: insufficient memory"); ret = err; goto failed_mount3; } /* We have now updated the journal if required, so we can * validate the data journaling mode. */ switch (test_opt(sb, DATA_FLAGS)) { case 0: /* No mode set, assume a default based on the journal capabilities: ORDERED_DATA if the journal can cope, else JOURNAL_DATA */ if (journal_check_available_features (sbi->s_journal, 0, 0, JFS_FEATURE_INCOMPAT_REVOKE)) set_opt(sbi->s_mount_opt, DEFAULT_DATA_MODE); else set_opt(sbi->s_mount_opt, JOURNAL_DATA); break; case EXT3_MOUNT_ORDERED_DATA: case EXT3_MOUNT_WRITEBACK_DATA: if (!journal_check_available_features (sbi->s_journal, 0, 0, JFS_FEATURE_INCOMPAT_REVOKE)) { ext3_msg(sb, KERN_ERR, "error: journal does not support " "requested data journaling mode"); goto failed_mount3; } default: break; } /* * The journal_load will have done any necessary log recovery, * so we can safely mount the rest of the filesystem now. */ root = ext3_iget(sb, EXT3_ROOT_INO); if (IS_ERR(root)) { ext3_msg(sb, KERN_ERR, "error: get root inode failed"); ret = PTR_ERR(root); goto failed_mount3; } if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { iput(root); ext3_msg(sb, KERN_ERR, "error: corrupt root inode, run e2fsck"); goto failed_mount3; } sb->s_root = d_make_root(root); if (!sb->s_root) { ext3_msg(sb, KERN_ERR, "error: get root dentry failed"); ret = -ENOMEM; goto failed_mount3; } if (ext3_setup_super(sb, es, sb->s_flags & MS_RDONLY)) sb->s_flags |= MS_RDONLY; EXT3_SB(sb)->s_mount_state |= EXT3_ORPHAN_FS; ext3_orphan_cleanup(sb, es); EXT3_SB(sb)->s_mount_state &= ~EXT3_ORPHAN_FS; if (needs_recovery) { ext3_mark_recovery_complete(sb, es); ext3_msg(sb, KERN_INFO, "recovery complete"); } ext3_msg(sb, KERN_INFO, "mounted filesystem with %s data mode", test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ? "journal": test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA ? "ordered": "writeback"); sb->s_flags |= MS_SNAP_STABLE; return 0; cantfind_ext3: if (!silent) ext3_msg(sb, KERN_INFO, "error: can't find ext3 filesystem on dev %s.", sb->s_id); goto failed_mount; failed_mount3: percpu_counter_destroy(&sbi->s_freeblocks_counter); percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_dirs_counter); journal_destroy(sbi->s_journal); failed_mount2: for (i = 0; i < db_count; i++) brelse(sbi->s_group_desc[i]); kfree(sbi->s_group_desc); failed_mount: #ifdef CONFIG_QUOTA for (i = 0; i < MAXQUOTAS; i++) kfree(sbi->s_qf_names[i]); #endif ext3_blkdev_remove(sbi); brelse(bh); out_fail: sb->s_fs_info = NULL; kfree(sbi->s_blockgroup_lock); kfree(sbi); return ret; } /* * Setup any per-fs journal parameters now. We'll do this both on * initial mount, once the journal has been initialised but before we've * done any recovery; and again on any subsequent remount. */ static void ext3_init_journal_params(struct super_block *sb, journal_t *journal) { struct ext3_sb_info *sbi = EXT3_SB(sb); if (sbi->s_commit_interval) journal->j_commit_interval = sbi->s_commit_interval; /* We could also set up an ext3-specific default for the commit * interval here, but for now we'll just fall back to the jbd * default. */ spin_lock(&journal->j_state_lock); if (test_opt(sb, BARRIER)) journal->j_flags |= JFS_BARRIER; else journal->j_flags &= ~JFS_BARRIER; if (test_opt(sb, DATA_ERR_ABORT)) journal->j_flags |= JFS_ABORT_ON_SYNCDATA_ERR; else journal->j_flags &= ~JFS_ABORT_ON_SYNCDATA_ERR; spin_unlock(&journal->j_state_lock); } static journal_t *ext3_get_journal(struct super_block *sb, unsigned int journal_inum) { struct inode *journal_inode; journal_t *journal; /* First, test for the existence of a valid inode on disk. Bad * things happen if we iget() an unused inode, as the subsequent * iput() will try to delete it. */ journal_inode = ext3_iget(sb, journal_inum); if (IS_ERR(journal_inode)) { ext3_msg(sb, KERN_ERR, "error: no journal found"); return NULL; } if (!journal_inode->i_nlink) { make_bad_inode(journal_inode); iput(journal_inode); ext3_msg(sb, KERN_ERR, "error: journal inode is deleted"); return NULL; } jbd_debug(2, "Journal inode found at %p: %Ld bytes\n", journal_inode, journal_inode->i_size); if (!S_ISREG(journal_inode->i_mode)) { ext3_msg(sb, KERN_ERR, "error: invalid journal inode"); iput(journal_inode); return NULL; } journal = journal_init_inode(journal_inode); if (!journal) { ext3_msg(sb, KERN_ERR, "error: could not load journal inode"); iput(journal_inode); return NULL; } journal->j_private = sb; ext3_init_journal_params(sb, journal); return journal; } static journal_t *ext3_get_dev_journal(struct super_block *sb, dev_t j_dev) { struct buffer_head * bh; journal_t *journal; ext3_fsblk_t start; ext3_fsblk_t len; int hblock, blocksize; ext3_fsblk_t sb_block; unsigned long offset; struct ext3_super_block * es; struct block_device *bdev; bdev = ext3_blkdev_get(j_dev, sb); if (bdev == NULL) return NULL; blocksize = sb->s_blocksize; hblock = bdev_logical_block_size(bdev); if (blocksize < hblock) { ext3_msg(sb, KERN_ERR, "error: blocksize too small for journal device"); goto out_bdev; } sb_block = EXT3_MIN_BLOCK_SIZE / blocksize; offset = EXT3_MIN_BLOCK_SIZE % blocksize; set_blocksize(bdev, blocksize); if (!(bh = __bread(bdev, sb_block, blocksize))) { ext3_msg(sb, KERN_ERR, "error: couldn't read superblock of " "external journal"); goto out_bdev; } es = (struct ext3_super_block *) (bh->b_data + offset); if ((le16_to_cpu(es->s_magic) != EXT3_SUPER_MAGIC) || !(le32_to_cpu(es->s_feature_incompat) & EXT3_FEATURE_INCOMPAT_JOURNAL_DEV)) { ext3_msg(sb, KERN_ERR, "error: external journal has " "bad superblock"); brelse(bh); goto out_bdev; } if (memcmp(EXT3_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) { ext3_msg(sb, KERN_ERR, "error: journal UUID does not match"); brelse(bh); goto out_bdev; } len = le32_to_cpu(es->s_blocks_count); start = sb_block + 1; brelse(bh); /* we're done with the superblock */ journal = journal_init_dev(bdev, sb->s_bdev, start, len, blocksize); if (!journal) { ext3_msg(sb, KERN_ERR, "error: failed to create device journal"); goto out_bdev; } journal->j_private = sb; if (!bh_uptodate_or_lock(journal->j_sb_buffer)) { if (bh_submit_read(journal->j_sb_buffer)) { ext3_msg(sb, KERN_ERR, "I/O error on journal device"); goto out_journal; } } if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) { ext3_msg(sb, KERN_ERR, "error: external journal has more than one " "user (unsupported) - %d", be32_to_cpu(journal->j_superblock->s_nr_users)); goto out_journal; } EXT3_SB(sb)->journal_bdev = bdev; ext3_init_journal_params(sb, journal); return journal; out_journal: journal_destroy(journal); out_bdev: ext3_blkdev_put(bdev); return NULL; } static int ext3_load_journal(struct super_block *sb, struct ext3_super_block *es, unsigned long journal_devnum) { journal_t *journal; unsigned int journal_inum = le32_to_cpu(es->s_journal_inum); dev_t journal_dev; int err = 0; int really_read_only; if (journal_devnum && journal_devnum != le32_to_cpu(es->s_journal_dev)) { ext3_msg(sb, KERN_INFO, "external journal device major/minor " "numbers have changed"); journal_dev = new_decode_dev(journal_devnum); } else journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev)); really_read_only = bdev_read_only(sb->s_bdev); /* * Are we loading a blank journal or performing recovery after a * crash? For recovery, we need to check in advance whether we * can get read-write access to the device. */ if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER)) { if (sb->s_flags & MS_RDONLY) { ext3_msg(sb, KERN_INFO, "recovery required on readonly filesystem"); if (really_read_only) { ext3_msg(sb, KERN_ERR, "error: write access " "unavailable, cannot proceed"); return -EROFS; } ext3_msg(sb, KERN_INFO, "write access will be enabled during recovery"); } } if (journal_inum && journal_dev) { ext3_msg(sb, KERN_ERR, "error: filesystem has both journal " "and inode journals"); return -EINVAL; } if (journal_inum) { if (!(journal = ext3_get_journal(sb, journal_inum))) return -EINVAL; } else { if (!(journal = ext3_get_dev_journal(sb, journal_dev))) return -EINVAL; } if (!(journal->j_flags & JFS_BARRIER)) printk(KERN_INFO "EXT3-fs: barriers not enabled\n"); if (!really_read_only && test_opt(sb, UPDATE_JOURNAL)) { err = journal_update_format(journal); if (err) { ext3_msg(sb, KERN_ERR, "error updating journal"); journal_destroy(journal); return err; } } if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER)) err = journal_wipe(journal, !really_read_only); if (!err) err = journal_load(journal); if (err) { ext3_msg(sb, KERN_ERR, "error loading journal"); journal_destroy(journal); return err; } EXT3_SB(sb)->s_journal = journal; ext3_clear_journal_err(sb, es); if (!really_read_only && journal_devnum && journal_devnum != le32_to_cpu(es->s_journal_dev)) { es->s_journal_dev = cpu_to_le32(journal_devnum); /* Make sure we flush the recovery flag to disk. */ ext3_commit_super(sb, es, 1); } return 0; } static int ext3_create_journal(struct super_block *sb, struct ext3_super_block *es, unsigned int journal_inum) { journal_t *journal; int err; if (sb->s_flags & MS_RDONLY) { ext3_msg(sb, KERN_ERR, "error: readonly filesystem when trying to " "create journal"); return -EROFS; } journal = ext3_get_journal(sb, journal_inum); if (!journal) return -EINVAL; ext3_msg(sb, KERN_INFO, "creating new journal on inode %u", journal_inum); err = journal_create(journal); if (err) { ext3_msg(sb, KERN_ERR, "error creating journal"); journal_destroy(journal); return -EIO; } EXT3_SB(sb)->s_journal = journal; ext3_update_dynamic_rev(sb); EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); EXT3_SET_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL); es->s_journal_inum = cpu_to_le32(journal_inum); /* Make sure we flush the recovery flag to disk. */ ext3_commit_super(sb, es, 1); return 0; } static int ext3_commit_super(struct super_block *sb, struct ext3_super_block *es, int sync) { struct buffer_head *sbh = EXT3_SB(sb)->s_sbh; int error = 0; if (!sbh) return error; if (buffer_write_io_error(sbh)) { /* * Oh, dear. A previous attempt to write the * superblock failed. This could happen because the * USB device was yanked out. Or it could happen to * be a transient write error and maybe the block will * be remapped. Nothing we can do but to retry the * write and hope for the best. */ ext3_msg(sb, KERN_ERR, "previous I/O error to " "superblock detected"); clear_buffer_write_io_error(sbh); set_buffer_uptodate(sbh); } /* * If the file system is mounted read-only, don't update the * superblock write time. This avoids updating the superblock * write time when we are mounting the root file system * read/only but we need to replay the journal; at that point, * for people who are east of GMT and who make their clock * tick in localtime for Windows bug-for-bug compatibility, * the clock is set in the future, and this will cause e2fsck * to complain and force a full file system check. */ if (!(sb->s_flags & MS_RDONLY)) es->s_wtime = cpu_to_le32(get_seconds()); es->s_free_blocks_count = cpu_to_le32(ext3_count_free_blocks(sb)); es->s_free_inodes_count = cpu_to_le32(ext3_count_free_inodes(sb)); BUFFER_TRACE(sbh, "marking dirty"); mark_buffer_dirty(sbh); if (sync) { error = sync_dirty_buffer(sbh); if (buffer_write_io_error(sbh)) { ext3_msg(sb, KERN_ERR, "I/O error while writing " "superblock"); clear_buffer_write_io_error(sbh); set_buffer_uptodate(sbh); } } return error; } /* * Have we just finished recovery? If so, and if we are mounting (or * remounting) the filesystem readonly, then we will end up with a * consistent fs on disk. Record that fact. */ static void ext3_mark_recovery_complete(struct super_block * sb, struct ext3_super_block * es) { journal_t *journal = EXT3_SB(sb)->s_journal; journal_lock_updates(journal); if (journal_flush(journal) < 0) goto out; if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER) && sb->s_flags & MS_RDONLY) { EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); ext3_commit_super(sb, es, 1); } out: journal_unlock_updates(journal); } /* * If we are mounting (or read-write remounting) a filesystem whose journal * has recorded an error from a previous lifetime, move that error to the * main filesystem now. */ static void ext3_clear_journal_err(struct super_block *sb, struct ext3_super_block *es) { journal_t *journal; int j_errno; const char *errstr; journal = EXT3_SB(sb)->s_journal; /* * Now check for any error status which may have been recorded in the * journal by a prior ext3_error() or ext3_abort() */ j_errno = journal_errno(journal); if (j_errno) { char nbuf[16]; errstr = ext3_decode_error(sb, j_errno, nbuf); ext3_warning(sb, __func__, "Filesystem error recorded " "from previous mount: %s", errstr); ext3_warning(sb, __func__, "Marking fs in need of " "filesystem check."); EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS; es->s_state |= cpu_to_le16(EXT3_ERROR_FS); ext3_commit_super (sb, es, 1); journal_clear_err(journal); } } /* * Force the running and committing transactions to commit, * and wait on the commit. */ int ext3_force_commit(struct super_block *sb) { journal_t *journal; int ret; if (sb->s_flags & MS_RDONLY) return 0; journal = EXT3_SB(sb)->s_journal; ret = ext3_journal_force_commit(journal); return ret; } static int ext3_sync_fs(struct super_block *sb, int wait) { tid_t target; trace_ext3_sync_fs(sb, wait); /* * Writeback quota in non-journalled quota case - journalled quota has * no dirty dquots */ dquot_writeback_dquots(sb, -1); if (journal_start_commit(EXT3_SB(sb)->s_journal, &target)) { if (wait) log_wait_commit(EXT3_SB(sb)->s_journal, target); } return 0; } /* * LVM calls this function before a (read-only) snapshot is created. This * gives us a chance to flush the journal completely and mark the fs clean. */ static int ext3_freeze(struct super_block *sb) { int error = 0; journal_t *journal; if (!(sb->s_flags & MS_RDONLY)) { journal = EXT3_SB(sb)->s_journal; /* Now we set up the journal barrier. */ journal_lock_updates(journal); /* * We don't want to clear needs_recovery flag when we failed * to flush the journal. */ error = journal_flush(journal); if (error < 0) goto out; /* Journal blocked and flushed, clear needs_recovery flag. */ EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); error = ext3_commit_super(sb, EXT3_SB(sb)->s_es, 1); if (error) goto out; } return 0; out: journal_unlock_updates(journal); return error; } /* * Called by LVM after the snapshot is done. We need to reset the RECOVER * flag here, even though the filesystem is not technically dirty yet. */ static int ext3_unfreeze(struct super_block *sb) { if (!(sb->s_flags & MS_RDONLY)) { /* Reser the needs_recovery flag before the fs is unlocked. */ EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); ext3_commit_super(sb, EXT3_SB(sb)->s_es, 1); journal_unlock_updates(EXT3_SB(sb)->s_journal); } return 0; } static int ext3_remount (struct super_block * sb, int * flags, char * data) { struct ext3_super_block * es; struct ext3_sb_info *sbi = EXT3_SB(sb); ext3_fsblk_t n_blocks_count = 0; unsigned long old_sb_flags; struct ext3_mount_options old_opts; int enable_quota = 0; int err; #ifdef CONFIG_QUOTA int i; #endif /* Store the original options */ old_sb_flags = sb->s_flags; old_opts.s_mount_opt = sbi->s_mount_opt; old_opts.s_resuid = sbi->s_resuid; old_opts.s_resgid = sbi->s_resgid; old_opts.s_commit_interval = sbi->s_commit_interval; #ifdef CONFIG_QUOTA old_opts.s_jquota_fmt = sbi->s_jquota_fmt; for (i = 0; i < MAXQUOTAS; i++) if (sbi->s_qf_names[i]) { old_opts.s_qf_names[i] = kstrdup(sbi->s_qf_names[i], GFP_KERNEL); if (!old_opts.s_qf_names[i]) { int j; for (j = 0; j < i; j++) kfree(old_opts.s_qf_names[j]); return -ENOMEM; } } else old_opts.s_qf_names[i] = NULL; #endif /* * Allow the "check" option to be passed as a remount option. */ if (!parse_options(data, sb, NULL, NULL, &n_blocks_count, 1)) { err = -EINVAL; goto restore_opts; } if (test_opt(sb, ABORT)) ext3_abort(sb, __func__, "Abort forced by user"); sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); es = sbi->s_es; ext3_init_journal_params(sb, sbi->s_journal); if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY) || n_blocks_count > le32_to_cpu(es->s_blocks_count)) { if (test_opt(sb, ABORT)) { err = -EROFS; goto restore_opts; } if (*flags & MS_RDONLY) { err = dquot_suspend(sb, -1); if (err < 0) goto restore_opts; /* * First of all, the unconditional stuff we have to do * to disable replay of the journal when we next remount */ sb->s_flags |= MS_RDONLY; /* * OK, test if we are remounting a valid rw partition * readonly, and if so set the rdonly flag and then * mark the partition as valid again. */ if (!(es->s_state & cpu_to_le16(EXT3_VALID_FS)) && (sbi->s_mount_state & EXT3_VALID_FS)) es->s_state = cpu_to_le16(sbi->s_mount_state); ext3_mark_recovery_complete(sb, es); } else { __le32 ret; if ((ret = EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP))) { ext3_msg(sb, KERN_WARNING, "warning: couldn't remount RDWR " "because of unsupported optional " "features (%x)", le32_to_cpu(ret)); err = -EROFS; goto restore_opts; } /* * If we have an unprocessed orphan list hanging * around from a previously readonly bdev mount, * require a full umount & mount for now. */ if (es->s_last_orphan) { ext3_msg(sb, KERN_WARNING, "warning: couldn't " "remount RDWR because of unprocessed " "orphan inode list. Please " "umount & mount instead."); err = -EINVAL; goto restore_opts; } /* * Mounting a RDONLY partition read-write, so reread * and store the current valid flag. (It may have * been changed by e2fsck since we originally mounted * the partition.) */ ext3_clear_journal_err(sb, es); sbi->s_mount_state = le16_to_cpu(es->s_state); if ((err = ext3_group_extend(sb, es, n_blocks_count))) goto restore_opts; if (!ext3_setup_super (sb, es, 0)) sb->s_flags &= ~MS_RDONLY; enable_quota = 1; } } #ifdef CONFIG_QUOTA /* Release old quota file names */ for (i = 0; i < MAXQUOTAS; i++) kfree(old_opts.s_qf_names[i]); #endif if (enable_quota) dquot_resume(sb, -1); return 0; restore_opts: sb->s_flags = old_sb_flags; sbi->s_mount_opt = old_opts.s_mount_opt; sbi->s_resuid = old_opts.s_resuid; sbi->s_resgid = old_opts.s_resgid; sbi->s_commit_interval = old_opts.s_commit_interval; #ifdef CONFIG_QUOTA sbi->s_jquota_fmt = old_opts.s_jquota_fmt; for (i = 0; i < MAXQUOTAS; i++) { kfree(sbi->s_qf_names[i]); sbi->s_qf_names[i] = old_opts.s_qf_names[i]; } #endif return err; } static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf) { struct super_block *sb = dentry->d_sb; struct ext3_sb_info *sbi = EXT3_SB(sb); struct ext3_super_block *es = sbi->s_es; u64 fsid; if (test_opt(sb, MINIX_DF)) { sbi->s_overhead_last = 0; } else if (sbi->s_blocks_last != le32_to_cpu(es->s_blocks_count)) { unsigned long ngroups = sbi->s_groups_count, i; ext3_fsblk_t overhead = 0; smp_rmb(); /* * Compute the overhead (FS structures). This is constant * for a given filesystem unless the number of block groups * changes so we cache the previous value until it does. */ /* * All of the blocks before first_data_block are * overhead */ overhead = le32_to_cpu(es->s_first_data_block); /* * Add the overhead attributed to the superblock and * block group descriptors. If the sparse superblocks * feature is turned on, then not all groups have this. */ for (i = 0; i < ngroups; i++) { overhead += ext3_bg_has_super(sb, i) + ext3_bg_num_gdb(sb, i); cond_resched(); } /* * Every block group has an inode bitmap, a block * bitmap, and an inode table. */ overhead += ngroups * (2 + sbi->s_itb_per_group); sbi->s_overhead_last = overhead; smp_wmb(); sbi->s_blocks_last = le32_to_cpu(es->s_blocks_count); } buf->f_type = EXT3_SUPER_MAGIC; buf->f_bsize = sb->s_blocksize; buf->f_blocks = le32_to_cpu(es->s_blocks_count) - sbi->s_overhead_last; buf->f_bfree = percpu_counter_sum_positive(&sbi->s_freeblocks_counter); buf->f_bavail = buf->f_bfree - le32_to_cpu(es->s_r_blocks_count); if (buf->f_bfree < le32_to_cpu(es->s_r_blocks_count)) buf->f_bavail = 0; buf->f_files = le32_to_cpu(es->s_inodes_count); buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter); buf->f_namelen = EXT3_NAME_LEN; fsid = le64_to_cpup((void *)es->s_uuid) ^ le64_to_cpup((void *)es->s_uuid + sizeof(u64)); buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL; buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL; return 0; } /* Helper function for writing quotas on sync - we need to start transaction before quota file * is locked for write. Otherwise the are possible deadlocks: * Process 1 Process 2 * ext3_create() quota_sync() * journal_start() write_dquot() * dquot_initialize() down(dqio_mutex) * down(dqio_mutex) journal_start() * */ #ifdef CONFIG_QUOTA static inline struct inode *dquot_to_inode(struct dquot *dquot) { return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type]; } static int ext3_write_dquot(struct dquot *dquot) { int ret, err; handle_t *handle; struct inode *inode; inode = dquot_to_inode(dquot); handle = ext3_journal_start(inode, EXT3_QUOTA_TRANS_BLOCKS(dquot->dq_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); ret = dquot_commit(dquot); err = ext3_journal_stop(handle); if (!ret) ret = err; return ret; } static int ext3_acquire_dquot(struct dquot *dquot) { int ret, err; handle_t *handle; handle = ext3_journal_start(dquot_to_inode(dquot), EXT3_QUOTA_INIT_BLOCKS(dquot->dq_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); ret = dquot_acquire(dquot); err = ext3_journal_stop(handle); if (!ret) ret = err; return ret; } static int ext3_release_dquot(struct dquot *dquot) { int ret, err; handle_t *handle; handle = ext3_journal_start(dquot_to_inode(dquot), EXT3_QUOTA_DEL_BLOCKS(dquot->dq_sb)); if (IS_ERR(handle)) { /* Release dquot anyway to avoid endless cycle in dqput() */ dquot_release(dquot); return PTR_ERR(handle); } ret = dquot_release(dquot); err = ext3_journal_stop(handle); if (!ret) ret = err; return ret; } static int ext3_mark_dquot_dirty(struct dquot *dquot) { /* Are we journaling quotas? */ if (EXT3_SB(dquot->dq_sb)->s_qf_names[USRQUOTA] || EXT3_SB(dquot->dq_sb)->s_qf_names[GRPQUOTA]) { dquot_mark_dquot_dirty(dquot); return ext3_write_dquot(dquot); } else { return dquot_mark_dquot_dirty(dquot); } } static int ext3_write_info(struct super_block *sb, int type) { int ret, err; handle_t *handle; /* Data block + inode block */ handle = ext3_journal_start(sb->s_root->d_inode, 2); if (IS_ERR(handle)) return PTR_ERR(handle); ret = dquot_commit_info(sb, type); err = ext3_journal_stop(handle); if (!ret) ret = err; return ret; } /* * Turn on quotas during mount time - we need to find * the quota file and such... */ static int ext3_quota_on_mount(struct super_block *sb, int type) { return dquot_quota_on_mount(sb, EXT3_SB(sb)->s_qf_names[type], EXT3_SB(sb)->s_jquota_fmt, type); } /* * Standard function to be called on quota_on */ static int ext3_quota_on(struct super_block *sb, int type, int format_id, struct path *path) { int err; if (!test_opt(sb, QUOTA)) return -EINVAL; /* Quotafile not on the same filesystem? */ if (path->dentry->d_sb != sb) return -EXDEV; /* Journaling quota? */ if (EXT3_SB(sb)->s_qf_names[type]) { /* Quotafile not of fs root? */ if (path->dentry->d_parent != sb->s_root) ext3_msg(sb, KERN_WARNING, "warning: Quota file not on filesystem root. " "Journaled quota will not work."); } /* * When we journal data on quota file, we have to flush journal to see * all updates to the file when we bypass pagecache... */ if (ext3_should_journal_data(path->dentry->d_inode)) { /* * We don't need to lock updates but journal_flush() could * otherwise be livelocked... */ journal_lock_updates(EXT3_SB(sb)->s_journal); err = journal_flush(EXT3_SB(sb)->s_journal); journal_unlock_updates(EXT3_SB(sb)->s_journal); if (err) return err; } return dquot_quota_on(sb, type, format_id, path); } /* Read data from quotafile - avoid pagecache and such because we cannot afford * acquiring the locks... As quota files are never truncated and quota code * itself serializes the operations (and no one else should touch the files) * we don't have to be afraid of races */ static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off) { struct inode *inode = sb_dqopt(sb)->files[type]; sector_t blk = off >> EXT3_BLOCK_SIZE_BITS(sb); int err = 0; int offset = off & (sb->s_blocksize - 1); int tocopy; size_t toread; struct buffer_head *bh; loff_t i_size = i_size_read(inode); if (off > i_size) return 0; if (off+len > i_size) len = i_size-off; toread = len; while (toread > 0) { tocopy = sb->s_blocksize - offset < toread ? sb->s_blocksize - offset : toread; bh = ext3_bread(NULL, inode, blk, 0, &err); if (err) return err; if (!bh) /* A hole? */ memset(data, 0, tocopy); else memcpy(data, bh->b_data+offset, tocopy); brelse(bh); offset = 0; toread -= tocopy; data += tocopy; blk++; } return len; } /* Write to quotafile (we know the transaction is already started and has * enough credits) */ static ssize_t ext3_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off) { struct inode *inode = sb_dqopt(sb)->files[type]; sector_t blk = off >> EXT3_BLOCK_SIZE_BITS(sb); int err = 0; int offset = off & (sb->s_blocksize - 1); int journal_quota = EXT3_SB(sb)->s_qf_names[type] != NULL; struct buffer_head *bh; handle_t *handle = journal_current_handle(); if (!handle) { ext3_msg(sb, KERN_WARNING, "warning: quota write (off=%llu, len=%llu)" " cancelled because transaction is not started.", (unsigned long long)off, (unsigned long long)len); return -EIO; } /* * Since we account only one data block in transaction credits, * then it is impossible to cross a block boundary. */ if (sb->s_blocksize - offset < len) { ext3_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)" " cancelled because not block aligned", (unsigned long long)off, (unsigned long long)len); return -EIO; } bh = ext3_bread(handle, inode, blk, 1, &err); if (!bh) goto out; if (journal_quota) { err = ext3_journal_get_write_access(handle, bh); if (err) { brelse(bh); goto out; } } lock_buffer(bh); memcpy(bh->b_data+offset, data, len); flush_dcache_page(bh->b_page); unlock_buffer(bh); if (journal_quota) err = ext3_journal_dirty_metadata(handle, bh); else { /* Always do at least ordered writes for quotas */ err = ext3_journal_dirty_data(handle, bh); mark_buffer_dirty(bh); } brelse(bh); out: if (err) return err; if (inode->i_size < off + len) { i_size_write(inode, off + len); EXT3_I(inode)->i_disksize = inode->i_size; } inode->i_version++; inode->i_mtime = inode->i_ctime = CURRENT_TIME; ext3_mark_inode_dirty(handle, inode); return len; } #endif static struct dentry *ext3_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, ext3_fill_super); } static struct file_system_type ext3_fs_type = { .owner = THIS_MODULE, .name = "ext3", .mount = ext3_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("ext3"); static int __init init_ext3_fs(void) { int err = init_ext3_xattr(); if (err) return err; err = init_inodecache(); if (err) goto out1; err = register_filesystem(&ext3_fs_type); if (err) goto out; return 0; out: destroy_inodecache(); out1: exit_ext3_xattr(); return err; } static void __exit exit_ext3_fs(void) { unregister_filesystem(&ext3_fs_type); destroy_inodecache(); exit_ext3_xattr(); } MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others"); MODULE_DESCRIPTION("Second Extended Filesystem with journaling extensions"); MODULE_LICENSE("GPL"); module_init(init_ext3_fs) module_exit(exit_ext3_fs)
./CrossVul/dataset_final_sorted/CWE-20/c/bad_5604_0
crossvul-cpp_data_bad_4699_0
/* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */ /* * memcached - memory caching daemon * * http://www.danga.com/memcached/ * * Copyright 2003 Danga Interactive, Inc. All rights reserved. * * Use and distribution licensed under the BSD license. See * the LICENSE file for full text. * * Authors: * Anatoly Vorobey <mellon@pobox.com> * Brad Fitzpatrick <brad@danga.com> */ #include "memcached.h" #include <sys/stat.h> #include <sys/socket.h> #include <sys/un.h> #include <signal.h> #include <sys/resource.h> #include <sys/uio.h> #include <ctype.h> #include <stdarg.h> /* some POSIX systems need the following definition * to get mlockall flags out of sys/mman.h. */ #ifndef _P1003_1B_VISIBLE #define _P1003_1B_VISIBLE #endif /* need this to get IOV_MAX on some platforms. */ #ifndef __need_IOV_MAX #define __need_IOV_MAX #endif #include <pwd.h> #include <sys/mman.h> #include <fcntl.h> #include <netinet/tcp.h> #include <arpa/inet.h> #include <errno.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <time.h> #include <assert.h> #include <limits.h> #include <sysexits.h> #include <stddef.h> /* FreeBSD 4.x doesn't have IOV_MAX exposed. */ #ifndef IOV_MAX #if defined(__FreeBSD__) || defined(__APPLE__) # define IOV_MAX 1024 #endif #endif /* * forward declarations */ static void drive_machine(conn *c); static int new_socket(struct addrinfo *ai); static int try_read_command(conn *c); enum try_read_result { READ_DATA_RECEIVED, READ_NO_DATA_RECEIVED, READ_ERROR, /** an error occured (on the socket) (or client closed connection) */ READ_MEMORY_ERROR /** failed to allocate more memory */ }; static enum try_read_result try_read_network(conn *c); static enum try_read_result try_read_udp(conn *c); static void conn_set_state(conn *c, enum conn_states state); /* stats */ static void stats_init(void); static void server_stats(ADD_STAT add_stats, conn *c); static void process_stat_settings(ADD_STAT add_stats, void *c); /* defaults */ static void settings_init(void); /* event handling, network IO */ static void event_handler(const int fd, const short which, void *arg); static void conn_close(conn *c); static void conn_init(void); static bool update_event(conn *c, const int new_flags); static void complete_nread(conn *c); static void process_command(conn *c, char *command); static void write_and_free(conn *c, char *buf, int bytes); static int ensure_iov_space(conn *c); static int add_iov(conn *c, const void *buf, int len); static int add_msghdr(conn *c); /* time handling */ static void set_current_time(void); /* update the global variable holding global 32-bit seconds-since-start time (to avoid 64 bit time_t) */ static void conn_free(conn *c); /** exported globals **/ struct stats stats; struct settings settings; time_t process_started; /* when the process was started */ /** file scope variables **/ static conn *listen_conn = NULL; static struct event_base *main_base; enum transmit_result { TRANSMIT_COMPLETE, /** All done writing. */ TRANSMIT_INCOMPLETE, /** More data remaining to write. */ TRANSMIT_SOFT_ERROR, /** Can't write any more right now. */ TRANSMIT_HARD_ERROR /** Can't write (c->state is set to conn_closing) */ }; static enum transmit_result transmit(conn *c); #define REALTIME_MAXDELTA 60*60*24*30 /* * given time value that's either unix time or delta from current unix time, return * unix time. Use the fact that delta can't exceed one month (and real time value can't * be that low). */ static rel_time_t realtime(const time_t exptime) { /* no. of seconds in 30 days - largest possible delta exptime */ if (exptime == 0) return 0; /* 0 means never expire */ if (exptime > REALTIME_MAXDELTA) { /* if item expiration is at/before the server started, give it an expiration time of 1 second after the server started. (because 0 means don't expire). without this, we'd underflow and wrap around to some large value way in the future, effectively making items expiring in the past really expiring never */ if (exptime <= process_started) return (rel_time_t)1; return (rel_time_t)(exptime - process_started); } else { return (rel_time_t)(exptime + current_time); } } static void stats_init(void) { stats.curr_items = stats.total_items = stats.curr_conns = stats.total_conns = stats.conn_structs = 0; stats.get_cmds = stats.set_cmds = stats.get_hits = stats.get_misses = stats.evictions = 0; stats.curr_bytes = stats.listen_disabled_num = 0; stats.accepting_conns = true; /* assuming we start in this state. */ /* make the time we started always be 2 seconds before we really did, so time(0) - time.started is never zero. if so, things like 'settings.oldest_live' which act as booleans as well as values are now false in boolean context... */ process_started = time(0) - 2; stats_prefix_init(); } static void stats_reset(void) { STATS_LOCK(); stats.total_items = stats.total_conns = 0; stats.evictions = 0; stats.listen_disabled_num = 0; stats_prefix_clear(); STATS_UNLOCK(); threadlocal_stats_reset(); item_stats_reset(); } static void settings_init(void) { settings.use_cas = true; settings.access = 0700; settings.port = 11211; settings.udpport = 11211; /* By default this string should be NULL for getaddrinfo() */ settings.inter = NULL; settings.maxbytes = 64 * 1024 * 1024; /* default is 64MB */ settings.maxconns = 1024; /* to limit connections-related memory to about 5MB */ settings.verbose = 0; settings.oldest_live = 0; settings.evict_to_free = 1; /* push old items out of cache when memory runs out */ settings.socketpath = NULL; /* by default, not using a unix socket */ settings.factor = 1.25; settings.chunk_size = 48; /* space for a modest key and value */ settings.num_threads = 4; /* N workers */ settings.prefix_delimiter = ':'; settings.detail_enabled = 0; settings.reqs_per_event = 20; settings.backlog = 1024; settings.binding_protocol = negotiating_prot; settings.item_size_max = 1024 * 1024; /* The famous 1MB upper limit. */ } /* * Adds a message header to a connection. * * Returns 0 on success, -1 on out-of-memory. */ static int add_msghdr(conn *c) { struct msghdr *msg; assert(c != NULL); if (c->msgsize == c->msgused) { msg = realloc(c->msglist, c->msgsize * 2 * sizeof(struct msghdr)); if (! msg) return -1; c->msglist = msg; c->msgsize *= 2; } msg = c->msglist + c->msgused; /* this wipes msg_iovlen, msg_control, msg_controllen, and msg_flags, the last 3 of which aren't defined on solaris: */ memset(msg, 0, sizeof(struct msghdr)); msg->msg_iov = &c->iov[c->iovused]; if (c->request_addr_size > 0) { msg->msg_name = &c->request_addr; msg->msg_namelen = c->request_addr_size; } c->msgbytes = 0; c->msgused++; if (IS_UDP(c->transport)) { /* Leave room for the UDP header, which we'll fill in later. */ return add_iov(c, NULL, UDP_HEADER_SIZE); } return 0; } /* * Free list management for connections. */ static conn **freeconns; static int freetotal; static int freecurr; /* Lock for connection freelist */ static pthread_mutex_t conn_lock = PTHREAD_MUTEX_INITIALIZER; static void conn_init(void) { freetotal = 200; freecurr = 0; if ((freeconns = calloc(freetotal, sizeof(conn *))) == NULL) { fprintf(stderr, "Failed to allocate connection structures\n"); } return; } /* * Returns a connection from the freelist, if any. */ conn *conn_from_freelist() { conn *c; pthread_mutex_lock(&conn_lock); if (freecurr > 0) { c = freeconns[--freecurr]; } else { c = NULL; } pthread_mutex_unlock(&conn_lock); return c; } /* * Adds a connection to the freelist. 0 = success. */ bool conn_add_to_freelist(conn *c) { bool ret = true; pthread_mutex_lock(&conn_lock); if (freecurr < freetotal) { freeconns[freecurr++] = c; ret = false; } else { /* try to enlarge free connections array */ size_t newsize = freetotal * 2; conn **new_freeconns = realloc(freeconns, sizeof(conn *) * newsize); if (new_freeconns) { freetotal = newsize; freeconns = new_freeconns; freeconns[freecurr++] = c; ret = false; } } pthread_mutex_unlock(&conn_lock); return ret; } static const char *prot_text(enum protocol prot) { char *rv = "unknown"; switch(prot) { case ascii_prot: rv = "ascii"; break; case binary_prot: rv = "binary"; break; case negotiating_prot: rv = "auto-negotiate"; break; } return rv; } conn *conn_new(const int sfd, enum conn_states init_state, const int event_flags, const int read_buffer_size, enum network_transport transport, struct event_base *base) { conn *c = conn_from_freelist(); if (NULL == c) { if (!(c = (conn *)calloc(1, sizeof(conn)))) { fprintf(stderr, "calloc()\n"); return NULL; } MEMCACHED_CONN_CREATE(c); c->rbuf = c->wbuf = 0; c->ilist = 0; c->suffixlist = 0; c->iov = 0; c->msglist = 0; c->hdrbuf = 0; c->rsize = read_buffer_size; c->wsize = DATA_BUFFER_SIZE; c->isize = ITEM_LIST_INITIAL; c->suffixsize = SUFFIX_LIST_INITIAL; c->iovsize = IOV_LIST_INITIAL; c->msgsize = MSG_LIST_INITIAL; c->hdrsize = 0; c->rbuf = (char *)malloc((size_t)c->rsize); c->wbuf = (char *)malloc((size_t)c->wsize); c->ilist = (item **)malloc(sizeof(item *) * c->isize); c->suffixlist = (char **)malloc(sizeof(char *) * c->suffixsize); c->iov = (struct iovec *)malloc(sizeof(struct iovec) * c->iovsize); c->msglist = (struct msghdr *)malloc(sizeof(struct msghdr) * c->msgsize); if (c->rbuf == 0 || c->wbuf == 0 || c->ilist == 0 || c->iov == 0 || c->msglist == 0 || c->suffixlist == 0) { conn_free(c); fprintf(stderr, "malloc()\n"); return NULL; } STATS_LOCK(); stats.conn_structs++; STATS_UNLOCK(); } c->transport = transport; c->protocol = settings.binding_protocol; /* unix socket mode doesn't need this, so zeroed out. but why * is this done for every command? presumably for UDP * mode. */ if (!settings.socketpath) { c->request_addr_size = sizeof(c->request_addr); } else { c->request_addr_size = 0; } if (settings.verbose > 1) { if (init_state == conn_listening) { fprintf(stderr, "<%d server listening (%s)\n", sfd, prot_text(c->protocol)); } else if (IS_UDP(transport)) { fprintf(stderr, "<%d server listening (udp)\n", sfd); } else if (c->protocol == negotiating_prot) { fprintf(stderr, "<%d new auto-negotiating client connection\n", sfd); } else if (c->protocol == ascii_prot) { fprintf(stderr, "<%d new ascii client connection.\n", sfd); } else if (c->protocol == binary_prot) { fprintf(stderr, "<%d new binary client connection.\n", sfd); } else { fprintf(stderr, "<%d new unknown (%d) client connection\n", sfd, c->protocol); assert(false); } } c->sfd = sfd; c->state = init_state; c->rlbytes = 0; c->cmd = -1; c->rbytes = c->wbytes = 0; c->wcurr = c->wbuf; c->rcurr = c->rbuf; c->ritem = 0; c->icurr = c->ilist; c->suffixcurr = c->suffixlist; c->ileft = 0; c->suffixleft = 0; c->iovused = 0; c->msgcurr = 0; c->msgused = 0; c->write_and_go = init_state; c->write_and_free = 0; c->item = 0; c->noreply = false; event_set(&c->event, sfd, event_flags, event_handler, (void *)c); event_base_set(base, &c->event); c->ev_flags = event_flags; if (event_add(&c->event, 0) == -1) { if (conn_add_to_freelist(c)) { conn_free(c); } perror("event_add"); return NULL; } STATS_LOCK(); stats.curr_conns++; stats.total_conns++; STATS_UNLOCK(); MEMCACHED_CONN_ALLOCATE(c->sfd); return c; } static void conn_cleanup(conn *c) { assert(c != NULL); if (c->item) { item_remove(c->item); c->item = 0; } if (c->ileft != 0) { for (; c->ileft > 0; c->ileft--,c->icurr++) { item_remove(*(c->icurr)); } } if (c->suffixleft != 0) { for (; c->suffixleft > 0; c->suffixleft--, c->suffixcurr++) { cache_free(c->thread->suffix_cache, *(c->suffixcurr)); } } if (c->write_and_free) { free(c->write_and_free); c->write_and_free = 0; } if (c->sasl_conn) { assert(settings.sasl); sasl_dispose(&c->sasl_conn); c->sasl_conn = NULL; } } /* * Frees a connection. */ void conn_free(conn *c) { if (c) { MEMCACHED_CONN_DESTROY(c); if (c->hdrbuf) free(c->hdrbuf); if (c->msglist) free(c->msglist); if (c->rbuf) free(c->rbuf); if (c->wbuf) free(c->wbuf); if (c->ilist) free(c->ilist); if (c->suffixlist) free(c->suffixlist); if (c->iov) free(c->iov); free(c); } } static void conn_close(conn *c) { assert(c != NULL); /* delete the event, the socket and the conn */ event_del(&c->event); if (settings.verbose > 1) fprintf(stderr, "<%d connection closed.\n", c->sfd); MEMCACHED_CONN_RELEASE(c->sfd); close(c->sfd); accept_new_conns(true); conn_cleanup(c); /* if the connection has big buffers, just free it */ if (c->rsize > READ_BUFFER_HIGHWAT || conn_add_to_freelist(c)) { conn_free(c); } STATS_LOCK(); stats.curr_conns--; STATS_UNLOCK(); return; } /* * Shrinks a connection's buffers if they're too big. This prevents * periodic large "get" requests from permanently chewing lots of server * memory. * * This should only be called in between requests since it can wipe output * buffers! */ static void conn_shrink(conn *c) { assert(c != NULL); if (IS_UDP(c->transport)) return; if (c->rsize > READ_BUFFER_HIGHWAT && c->rbytes < DATA_BUFFER_SIZE) { char *newbuf; if (c->rcurr != c->rbuf) memmove(c->rbuf, c->rcurr, (size_t)c->rbytes); newbuf = (char *)realloc((void *)c->rbuf, DATA_BUFFER_SIZE); if (newbuf) { c->rbuf = newbuf; c->rsize = DATA_BUFFER_SIZE; } /* TODO check other branch... */ c->rcurr = c->rbuf; } if (c->isize > ITEM_LIST_HIGHWAT) { item **newbuf = (item**) realloc((void *)c->ilist, ITEM_LIST_INITIAL * sizeof(c->ilist[0])); if (newbuf) { c->ilist = newbuf; c->isize = ITEM_LIST_INITIAL; } /* TODO check error condition? */ } if (c->msgsize > MSG_LIST_HIGHWAT) { struct msghdr *newbuf = (struct msghdr *) realloc((void *)c->msglist, MSG_LIST_INITIAL * sizeof(c->msglist[0])); if (newbuf) { c->msglist = newbuf; c->msgsize = MSG_LIST_INITIAL; } /* TODO check error condition? */ } if (c->iovsize > IOV_LIST_HIGHWAT) { struct iovec *newbuf = (struct iovec *) realloc((void *)c->iov, IOV_LIST_INITIAL * sizeof(c->iov[0])); if (newbuf) { c->iov = newbuf; c->iovsize = IOV_LIST_INITIAL; } /* TODO check return value */ } } /** * Convert a state name to a human readable form. */ static const char *state_text(enum conn_states state) { const char* const statenames[] = { "conn_listening", "conn_new_cmd", "conn_waiting", "conn_read", "conn_parse_cmd", "conn_write", "conn_nread", "conn_swallow", "conn_closing", "conn_mwrite" }; return statenames[state]; } /* * Sets a connection's current state in the state machine. Any special * processing that needs to happen on certain state transitions can * happen here. */ static void conn_set_state(conn *c, enum conn_states state) { assert(c != NULL); assert(state >= conn_listening && state < conn_max_state); if (state != c->state) { if (settings.verbose > 2) { fprintf(stderr, "%d: going from %s to %s\n", c->sfd, state_text(c->state), state_text(state)); } c->state = state; if (state == conn_write || state == conn_mwrite) { MEMCACHED_PROCESS_COMMAND_END(c->sfd, c->wbuf, c->wbytes); } } } /* * Ensures that there is room for another struct iovec in a connection's * iov list. * * Returns 0 on success, -1 on out-of-memory. */ static int ensure_iov_space(conn *c) { assert(c != NULL); if (c->iovused >= c->iovsize) { int i, iovnum; struct iovec *new_iov = (struct iovec *)realloc(c->iov, (c->iovsize * 2) * sizeof(struct iovec)); if (! new_iov) return -1; c->iov = new_iov; c->iovsize *= 2; /* Point all the msghdr structures at the new list. */ for (i = 0, iovnum = 0; i < c->msgused; i++) { c->msglist[i].msg_iov = &c->iov[iovnum]; iovnum += c->msglist[i].msg_iovlen; } } return 0; } /* * Adds data to the list of pending data that will be written out to a * connection. * * Returns 0 on success, -1 on out-of-memory. */ static int add_iov(conn *c, const void *buf, int len) { struct msghdr *m; int leftover; bool limit_to_mtu; assert(c != NULL); do { m = &c->msglist[c->msgused - 1]; /* * Limit UDP packets, and the first payloads of TCP replies, to * UDP_MAX_PAYLOAD_SIZE bytes. */ limit_to_mtu = IS_UDP(c->transport) || (1 == c->msgused); /* We may need to start a new msghdr if this one is full. */ if (m->msg_iovlen == IOV_MAX || (limit_to_mtu && c->msgbytes >= UDP_MAX_PAYLOAD_SIZE)) { add_msghdr(c); m = &c->msglist[c->msgused - 1]; } if (ensure_iov_space(c) != 0) return -1; /* If the fragment is too big to fit in the datagram, split it up */ if (limit_to_mtu && len + c->msgbytes > UDP_MAX_PAYLOAD_SIZE) { leftover = len + c->msgbytes - UDP_MAX_PAYLOAD_SIZE; len -= leftover; } else { leftover = 0; } m = &c->msglist[c->msgused - 1]; m->msg_iov[m->msg_iovlen].iov_base = (void *)buf; m->msg_iov[m->msg_iovlen].iov_len = len; c->msgbytes += len; c->iovused++; m->msg_iovlen++; buf = ((char *)buf) + len; len = leftover; } while (leftover > 0); return 0; } /* * Constructs a set of UDP headers and attaches them to the outgoing messages. */ static int build_udp_headers(conn *c) { int i; unsigned char *hdr; assert(c != NULL); if (c->msgused > c->hdrsize) { void *new_hdrbuf; if (c->hdrbuf) new_hdrbuf = realloc(c->hdrbuf, c->msgused * 2 * UDP_HEADER_SIZE); else new_hdrbuf = malloc(c->msgused * 2 * UDP_HEADER_SIZE); if (! new_hdrbuf) return -1; c->hdrbuf = (unsigned char *)new_hdrbuf; c->hdrsize = c->msgused * 2; } hdr = c->hdrbuf; for (i = 0; i < c->msgused; i++) { c->msglist[i].msg_iov[0].iov_base = (void*)hdr; c->msglist[i].msg_iov[0].iov_len = UDP_HEADER_SIZE; *hdr++ = c->request_id / 256; *hdr++ = c->request_id % 256; *hdr++ = i / 256; *hdr++ = i % 256; *hdr++ = c->msgused / 256; *hdr++ = c->msgused % 256; *hdr++ = 0; *hdr++ = 0; assert((void *) hdr == (caddr_t)c->msglist[i].msg_iov[0].iov_base + UDP_HEADER_SIZE); } return 0; } static void out_string(conn *c, const char *str) { size_t len; assert(c != NULL); if (c->noreply) { if (settings.verbose > 1) fprintf(stderr, ">%d NOREPLY %s\n", c->sfd, str); c->noreply = false; conn_set_state(c, conn_new_cmd); return; } if (settings.verbose > 1) fprintf(stderr, ">%d %s\n", c->sfd, str); len = strlen(str); if ((len + 2) > c->wsize) { /* ought to be always enough. just fail for simplicity */ str = "SERVER_ERROR output line too long"; len = strlen(str); } memcpy(c->wbuf, str, len); memcpy(c->wbuf + len, "\r\n", 2); c->wbytes = len + 2; c->wcurr = c->wbuf; conn_set_state(c, conn_write); c->write_and_go = conn_new_cmd; return; } /* * we get here after reading the value in set/add/replace commands. The command * has been stored in c->cmd, and the item is ready in c->item. */ static void complete_nread_ascii(conn *c) { assert(c != NULL); item *it = c->item; int comm = c->cmd; enum store_item_type ret; pthread_mutex_lock(&c->thread->stats.mutex); c->thread->stats.slab_stats[it->slabs_clsid].set_cmds++; pthread_mutex_unlock(&c->thread->stats.mutex); if (strncmp(ITEM_data(it) + it->nbytes - 2, "\r\n", 2) != 0) { out_string(c, "CLIENT_ERROR bad data chunk"); } else { ret = store_item(it, comm, c); #ifdef ENABLE_DTRACE uint64_t cas = ITEM_get_cas(it); switch (c->cmd) { case NREAD_ADD: MEMCACHED_COMMAND_ADD(c->sfd, ITEM_key(it), it->nkey, (ret == 1) ? it->nbytes : -1, cas); break; case NREAD_REPLACE: MEMCACHED_COMMAND_REPLACE(c->sfd, ITEM_key(it), it->nkey, (ret == 1) ? it->nbytes : -1, cas); break; case NREAD_APPEND: MEMCACHED_COMMAND_APPEND(c->sfd, ITEM_key(it), it->nkey, (ret == 1) ? it->nbytes : -1, cas); break; case NREAD_PREPEND: MEMCACHED_COMMAND_PREPEND(c->sfd, ITEM_key(it), it->nkey, (ret == 1) ? it->nbytes : -1, cas); break; case NREAD_SET: MEMCACHED_COMMAND_SET(c->sfd, ITEM_key(it), it->nkey, (ret == 1) ? it->nbytes : -1, cas); break; case NREAD_CAS: MEMCACHED_COMMAND_CAS(c->sfd, ITEM_key(it), it->nkey, it->nbytes, cas); break; } #endif switch (ret) { case STORED: out_string(c, "STORED"); break; case EXISTS: out_string(c, "EXISTS"); break; case NOT_FOUND: out_string(c, "NOT_FOUND"); break; case NOT_STORED: out_string(c, "NOT_STORED"); break; default: out_string(c, "SERVER_ERROR Unhandled storage type."); } } item_remove(c->item); /* release the c->item reference */ c->item = 0; } /** * get a pointer to the start of the request struct for the current command */ static void* binary_get_request(conn *c) { char *ret = c->rcurr; ret -= (sizeof(c->binary_header) + c->binary_header.request.keylen + c->binary_header.request.extlen); assert(ret >= c->rbuf); return ret; } /** * get a pointer to the key in this request */ static char* binary_get_key(conn *c) { return c->rcurr - (c->binary_header.request.keylen); } static void add_bin_header(conn *c, uint16_t err, uint8_t hdr_len, uint16_t key_len, uint32_t body_len) { protocol_binary_response_header* header; assert(c); c->msgcurr = 0; c->msgused = 0; c->iovused = 0; if (add_msghdr(c) != 0) { /* XXX: out_string is inappropriate here */ out_string(c, "SERVER_ERROR out of memory"); return; } header = (protocol_binary_response_header *)c->wbuf; header->response.magic = (uint8_t)PROTOCOL_BINARY_RES; header->response.opcode = c->binary_header.request.opcode; header->response.keylen = (uint16_t)htons(key_len); header->response.extlen = (uint8_t)hdr_len; header->response.datatype = (uint8_t)PROTOCOL_BINARY_RAW_BYTES; header->response.status = (uint16_t)htons(err); header->response.bodylen = htonl(body_len); header->response.opaque = c->opaque; header->response.cas = htonll(c->cas); if (settings.verbose > 1) { int ii; fprintf(stderr, ">%d Writing bin response:", c->sfd); for (ii = 0; ii < sizeof(header->bytes); ++ii) { if (ii % 4 == 0) { fprintf(stderr, "\n>%d ", c->sfd); } fprintf(stderr, " 0x%02x", header->bytes[ii]); } fprintf(stderr, "\n"); } add_iov(c, c->wbuf, sizeof(header->response)); } static void write_bin_error(conn *c, protocol_binary_response_status err, int swallow) { const char *errstr = "Unknown error"; size_t len; switch (err) { case PROTOCOL_BINARY_RESPONSE_ENOMEM: errstr = "Out of memory"; break; case PROTOCOL_BINARY_RESPONSE_UNKNOWN_COMMAND: errstr = "Unknown command"; break; case PROTOCOL_BINARY_RESPONSE_KEY_ENOENT: errstr = "Not found"; break; case PROTOCOL_BINARY_RESPONSE_EINVAL: errstr = "Invalid arguments"; break; case PROTOCOL_BINARY_RESPONSE_KEY_EEXISTS: errstr = "Data exists for key."; break; case PROTOCOL_BINARY_RESPONSE_E2BIG: errstr = "Too large."; break; case PROTOCOL_BINARY_RESPONSE_DELTA_BADVAL: errstr = "Non-numeric server-side value for incr or decr"; break; case PROTOCOL_BINARY_RESPONSE_NOT_STORED: errstr = "Not stored."; break; case PROTOCOL_BINARY_RESPONSE_AUTH_ERROR: errstr = "Auth failure."; break; default: assert(false); errstr = "UNHANDLED ERROR"; fprintf(stderr, ">%d UNHANDLED ERROR: %d\n", c->sfd, err); } if (settings.verbose > 1) { fprintf(stderr, ">%d Writing an error: %s\n", c->sfd, errstr); } len = strlen(errstr); add_bin_header(c, err, 0, 0, len); if (len > 0) { add_iov(c, errstr, len); } conn_set_state(c, conn_mwrite); if(swallow > 0) { c->sbytes = swallow; c->write_and_go = conn_swallow; } else { c->write_and_go = conn_new_cmd; } } /* Form and send a response to a command over the binary protocol */ static void write_bin_response(conn *c, void *d, int hlen, int keylen, int dlen) { if (!c->noreply || c->cmd == PROTOCOL_BINARY_CMD_GET || c->cmd == PROTOCOL_BINARY_CMD_GETK) { add_bin_header(c, 0, hlen, keylen, dlen); if(dlen > 0) { add_iov(c, d, dlen); } conn_set_state(c, conn_mwrite); c->write_and_go = conn_new_cmd; } else { conn_set_state(c, conn_new_cmd); } } static void complete_incr_bin(conn *c) { item *it; char *key; size_t nkey; protocol_binary_response_incr* rsp = (protocol_binary_response_incr*)c->wbuf; protocol_binary_request_incr* req = binary_get_request(c); assert(c != NULL); assert(c->wsize >= sizeof(*rsp)); /* fix byteorder in the request */ req->message.body.delta = ntohll(req->message.body.delta); req->message.body.initial = ntohll(req->message.body.initial); req->message.body.expiration = ntohl(req->message.body.expiration); key = binary_get_key(c); nkey = c->binary_header.request.keylen; if (settings.verbose > 1) { int i; fprintf(stderr, "incr "); for (i = 0; i < nkey; i++) { fprintf(stderr, "%c", key[i]); } fprintf(stderr, " %lld, %llu, %d\n", (long long)req->message.body.delta, (long long)req->message.body.initial, req->message.body.expiration); } it = item_get(key, nkey); if (it && (c->binary_header.request.cas == 0 || c->binary_header.request.cas == ITEM_get_cas(it))) { /* Weird magic in add_delta forces me to pad here */ char tmpbuf[INCR_MAX_STORAGE_LEN]; protocol_binary_response_status st = PROTOCOL_BINARY_RESPONSE_SUCCESS; switch(add_delta(c, it, c->cmd == PROTOCOL_BINARY_CMD_INCREMENT, req->message.body.delta, tmpbuf)) { case OK: break; case NON_NUMERIC: st = PROTOCOL_BINARY_RESPONSE_DELTA_BADVAL; break; case EOM: st = PROTOCOL_BINARY_RESPONSE_ENOMEM; break; } if (st != PROTOCOL_BINARY_RESPONSE_SUCCESS) { write_bin_error(c, st, 0); } else { rsp->message.body.value = htonll(strtoull(tmpbuf, NULL, 10)); c->cas = ITEM_get_cas(it); write_bin_response(c, &rsp->message.body, 0, 0, sizeof(rsp->message.body.value)); } item_remove(it); /* release our reference */ } else if (!it && req->message.body.expiration != 0xffffffff) { /* Save some room for the response */ rsp->message.body.value = htonll(req->message.body.initial); it = item_alloc(key, nkey, 0, realtime(req->message.body.expiration), INCR_MAX_STORAGE_LEN); if (it != NULL) { snprintf(ITEM_data(it), INCR_MAX_STORAGE_LEN, "%llu", (unsigned long long)req->message.body.initial); if (store_item(it, NREAD_SET, c)) { c->cas = ITEM_get_cas(it); write_bin_response(c, &rsp->message.body, 0, 0, sizeof(rsp->message.body.value)); } else { write_bin_error(c, PROTOCOL_BINARY_RESPONSE_NOT_STORED, 0); } item_remove(it); /* release our reference */ } else { write_bin_error(c, PROTOCOL_BINARY_RESPONSE_ENOMEM, 0); } } else if (it) { /* incorrect CAS */ item_remove(it); /* release our reference */ write_bin_error(c, PROTOCOL_BINARY_RESPONSE_KEY_EEXISTS, 0); } else { pthread_mutex_lock(&c->thread->stats.mutex); if (c->cmd == PROTOCOL_BINARY_CMD_INCREMENT) { c->thread->stats.incr_misses++; } else { c->thread->stats.decr_misses++; } pthread_mutex_unlock(&c->thread->stats.mutex); write_bin_error(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT, 0); } } static void complete_update_bin(conn *c) { protocol_binary_response_status eno = PROTOCOL_BINARY_RESPONSE_EINVAL; enum store_item_type ret = NOT_STORED; assert(c != NULL); item *it = c->item; pthread_mutex_lock(&c->thread->stats.mutex); c->thread->stats.slab_stats[it->slabs_clsid].set_cmds++; pthread_mutex_unlock(&c->thread->stats.mutex); /* We don't actually receive the trailing two characters in the bin * protocol, so we're going to just set them here */ *(ITEM_data(it) + it->nbytes - 2) = '\r'; *(ITEM_data(it) + it->nbytes - 1) = '\n'; ret = store_item(it, c->cmd, c); #ifdef ENABLE_DTRACE uint64_t cas = ITEM_get_cas(it); switch (c->cmd) { case NREAD_ADD: MEMCACHED_COMMAND_ADD(c->sfd, ITEM_key(it), it->nkey, (ret == STORED) ? it->nbytes : -1, cas); break; case NREAD_REPLACE: MEMCACHED_COMMAND_REPLACE(c->sfd, ITEM_key(it), it->nkey, (ret == STORED) ? it->nbytes : -1, cas); break; case NREAD_APPEND: MEMCACHED_COMMAND_APPEND(c->sfd, ITEM_key(it), it->nkey, (ret == STORED) ? it->nbytes : -1, cas); break; case NREAD_PREPEND: MEMCACHED_COMMAND_PREPEND(c->sfd, ITEM_key(it), it->nkey, (ret == STORED) ? it->nbytes : -1, cas); break; case NREAD_SET: MEMCACHED_COMMAND_SET(c->sfd, ITEM_key(it), it->nkey, (ret == STORED) ? it->nbytes : -1, cas); break; } #endif switch (ret) { case STORED: /* Stored */ write_bin_response(c, NULL, 0, 0, 0); break; case EXISTS: write_bin_error(c, PROTOCOL_BINARY_RESPONSE_KEY_EEXISTS, 0); break; case NOT_FOUND: write_bin_error(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT, 0); break; case NOT_STORED: if (c->cmd == NREAD_ADD) { eno = PROTOCOL_BINARY_RESPONSE_KEY_EEXISTS; } else if(c->cmd == NREAD_REPLACE) { eno = PROTOCOL_BINARY_RESPONSE_KEY_ENOENT; } else { eno = PROTOCOL_BINARY_RESPONSE_NOT_STORED; } write_bin_error(c, eno, 0); } item_remove(c->item); /* release the c->item reference */ c->item = 0; } static void process_bin_get(conn *c) { item *it; protocol_binary_response_get* rsp = (protocol_binary_response_get*)c->wbuf; char* key = binary_get_key(c); size_t nkey = c->binary_header.request.keylen; if (settings.verbose > 1) { int ii; fprintf(stderr, "<%d GET ", c->sfd); for (ii = 0; ii < nkey; ++ii) { fprintf(stderr, "%c", key[ii]); } fprintf(stderr, "\n"); } it = item_get(key, nkey); if (it) { /* the length has two unnecessary bytes ("\r\n") */ uint16_t keylen = 0; uint32_t bodylen = sizeof(rsp->message.body) + (it->nbytes - 2); pthread_mutex_lock(&c->thread->stats.mutex); c->thread->stats.get_cmds++; c->thread->stats.slab_stats[it->slabs_clsid].get_hits++; pthread_mutex_unlock(&c->thread->stats.mutex); MEMCACHED_COMMAND_GET(c->sfd, ITEM_key(it), it->nkey, it->nbytes, ITEM_get_cas(it)); if (c->cmd == PROTOCOL_BINARY_CMD_GETK) { bodylen += nkey; keylen = nkey; } add_bin_header(c, 0, sizeof(rsp->message.body), keylen, bodylen); rsp->message.header.response.cas = htonll(ITEM_get_cas(it)); // add the flags rsp->message.body.flags = htonl(strtoul(ITEM_suffix(it), NULL, 10)); add_iov(c, &rsp->message.body, sizeof(rsp->message.body)); if (c->cmd == PROTOCOL_BINARY_CMD_GETK) { add_iov(c, ITEM_key(it), nkey); } /* Add the data minus the CRLF */ add_iov(c, ITEM_data(it), it->nbytes - 2); conn_set_state(c, conn_mwrite); /* Remember this command so we can garbage collect it later */ c->item = it; } else { pthread_mutex_lock(&c->thread->stats.mutex); c->thread->stats.get_cmds++; c->thread->stats.get_misses++; pthread_mutex_unlock(&c->thread->stats.mutex); MEMCACHED_COMMAND_GET(c->sfd, key, nkey, -1, 0); if (c->noreply) { conn_set_state(c, conn_new_cmd); } else { if (c->cmd == PROTOCOL_BINARY_CMD_GETK) { char *ofs = c->wbuf + sizeof(protocol_binary_response_header); add_bin_header(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT, 0, nkey, nkey); memcpy(ofs, key, nkey); add_iov(c, ofs, nkey); conn_set_state(c, conn_mwrite); } else { write_bin_error(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT, 0); } } } if (settings.detail_enabled) { stats_prefix_record_get(key, nkey, NULL != it); } } static void append_bin_stats(const char *key, const uint16_t klen, const char *val, const uint32_t vlen, conn *c) { char *buf = c->stats.buffer + c->stats.offset; uint32_t bodylen = klen + vlen; protocol_binary_response_header header = { .response.magic = (uint8_t)PROTOCOL_BINARY_RES, .response.opcode = PROTOCOL_BINARY_CMD_STAT, .response.keylen = (uint16_t)htons(klen), .response.datatype = (uint8_t)PROTOCOL_BINARY_RAW_BYTES, .response.bodylen = htonl(bodylen), .response.opaque = c->opaque }; memcpy(buf, header.bytes, sizeof(header.response)); buf += sizeof(header.response); if (klen > 0) { memcpy(buf, key, klen); buf += klen; if (vlen > 0) { memcpy(buf, val, vlen); } } c->stats.offset += sizeof(header.response) + bodylen; } static void append_ascii_stats(const char *key, const uint16_t klen, const char *val, const uint32_t vlen, conn *c) { char *pos = c->stats.buffer + c->stats.offset; uint32_t nbytes = 0; int remaining = c->stats.size - c->stats.offset; int room = remaining - 1; if (klen == 0 && vlen == 0) { nbytes = snprintf(pos, room, "END\r\n"); } else if (vlen == 0) { nbytes = snprintf(pos, room, "STAT %s\r\n", key); } else { nbytes = snprintf(pos, room, "STAT %s %s\r\n", key, val); } c->stats.offset += nbytes; } static bool grow_stats_buf(conn *c, size_t needed) { size_t nsize = c->stats.size; size_t available = nsize - c->stats.offset; bool rv = true; /* Special case: No buffer -- need to allocate fresh */ if (c->stats.buffer == NULL) { nsize = 1024; available = c->stats.size = c->stats.offset = 0; } while (needed > available) { assert(nsize > 0); nsize = nsize << 1; available = nsize - c->stats.offset; } if (nsize != c->stats.size) { char *ptr = realloc(c->stats.buffer, nsize); if (ptr) { c->stats.buffer = ptr; c->stats.size = nsize; } else { rv = false; } } return rv; } static void append_stats(const char *key, const uint16_t klen, const char *val, const uint32_t vlen, const void *cookie) { /* value without a key is invalid */ if (klen == 0 && vlen > 0) { return ; } conn *c = (conn*)cookie; if (c->protocol == binary_prot) { size_t needed = vlen + klen + sizeof(protocol_binary_response_header); if (!grow_stats_buf(c, needed)) { return ; } append_bin_stats(key, klen, val, vlen, c); } else { size_t needed = vlen + klen + 10; // 10 == "STAT = \r\n" if (!grow_stats_buf(c, needed)) { return ; } append_ascii_stats(key, klen, val, vlen, c); } assert(c->stats.offset <= c->stats.size); } static void process_bin_stat(conn *c) { char *subcommand = binary_get_key(c); size_t nkey = c->binary_header.request.keylen; if (settings.verbose > 1) { int ii; fprintf(stderr, "<%d STATS ", c->sfd); for (ii = 0; ii < nkey; ++ii) { fprintf(stderr, "%c", subcommand[ii]); } fprintf(stderr, "\n"); } if (nkey == 0) { /* request all statistics */ server_stats(&append_stats, c); (void)get_stats(NULL, 0, &append_stats, c); } else if (strncmp(subcommand, "reset", 5) == 0) { stats_reset(); } else if (strncmp(subcommand, "settings", 8) == 0) { process_stat_settings(&append_stats, c); } else if (strncmp(subcommand, "detail", 6) == 0) { char *subcmd_pos = subcommand + 6; if (strncmp(subcmd_pos, " dump", 5) == 0) { int len; char *dump_buf = stats_prefix_dump(&len); if (dump_buf == NULL || len <= 0) { write_bin_error(c, PROTOCOL_BINARY_RESPONSE_ENOMEM, 0); return ; } else { append_stats("detailed", strlen("detailed"), dump_buf, len, c); free(dump_buf); } } else if (strncmp(subcmd_pos, " on", 3) == 0) { settings.detail_enabled = 1; } else if (strncmp(subcmd_pos, " off", 4) == 0) { settings.detail_enabled = 0; } else { write_bin_error(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT, 0); return; } } else { if (get_stats(subcommand, nkey, &append_stats, c)) { if (c->stats.buffer == NULL) { write_bin_error(c, PROTOCOL_BINARY_RESPONSE_ENOMEM, 0); } else { write_and_free(c, c->stats.buffer, c->stats.offset); c->stats.buffer = NULL; } } else { write_bin_error(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT, 0); } return; } /* Append termination package and start the transfer */ append_stats(NULL, 0, NULL, 0, c); if (c->stats.buffer == NULL) { write_bin_error(c, PROTOCOL_BINARY_RESPONSE_ENOMEM, 0); } else { write_and_free(c, c->stats.buffer, c->stats.offset); c->stats.buffer = NULL; } } static void bin_read_key(conn *c, enum bin_substates next_substate, int extra) { assert(c); c->substate = next_substate; c->rlbytes = c->keylen + extra; /* Ok... do we have room for the extras and the key in the input buffer? */ ptrdiff_t offset = c->rcurr + sizeof(protocol_binary_request_header) - c->rbuf; if (c->rlbytes > c->rsize - offset) { size_t nsize = c->rsize; size_t size = c->rlbytes + sizeof(protocol_binary_request_header); while (size > nsize) { nsize *= 2; } if (nsize != c->rsize) { if (settings.verbose > 1) { fprintf(stderr, "%d: Need to grow buffer from %lu to %lu\n", c->sfd, (unsigned long)c->rsize, (unsigned long)nsize); } char *newm = realloc(c->rbuf, nsize); if (newm == NULL) { if (settings.verbose) { fprintf(stderr, "%d: Failed to grow buffer.. closing connection\n", c->sfd); } conn_set_state(c, conn_closing); return; } c->rbuf= newm; /* rcurr should point to the same offset in the packet */ c->rcurr = c->rbuf + offset - sizeof(protocol_binary_request_header); c->rsize = nsize; } if (c->rbuf != c->rcurr) { memmove(c->rbuf, c->rcurr, c->rbytes); c->rcurr = c->rbuf; if (settings.verbose > 1) { fprintf(stderr, "%d: Repack input buffer\n", c->sfd); } } } /* preserve the header in the buffer.. */ c->ritem = c->rcurr + sizeof(protocol_binary_request_header); conn_set_state(c, conn_nread); } /* Just write an error message and disconnect the client */ static void handle_binary_protocol_error(conn *c) { write_bin_error(c, PROTOCOL_BINARY_RESPONSE_EINVAL, 0); if (settings.verbose) { fprintf(stderr, "Protocol error (opcode %02x), close connection %d\n", c->binary_header.request.opcode, c->sfd); } c->write_and_go = conn_closing; } static void init_sasl_conn(conn *c) { assert(c); /* should something else be returned? */ if (!settings.sasl) return; if (!c->sasl_conn) { int result=sasl_server_new("memcached", NULL, NULL, NULL, NULL, NULL, 0, &c->sasl_conn); if (result != SASL_OK) { if (settings.verbose) { fprintf(stderr, "Failed to initialize SASL conn.\n"); } c->sasl_conn = NULL; } } } static void bin_list_sasl_mechs(conn *c) { // Guard against a disabled SASL. if (!settings.sasl) { write_bin_error(c, PROTOCOL_BINARY_RESPONSE_UNKNOWN_COMMAND, c->binary_header.request.bodylen - c->binary_header.request.keylen); return; } init_sasl_conn(c); const char *result_string = NULL; unsigned int string_length = 0; int result=sasl_listmech(c->sasl_conn, NULL, "", /* What to prepend the string with */ " ", /* What to separate mechanisms with */ "", /* What to append to the string */ &result_string, &string_length, NULL); if (result != SASL_OK) { /* Perhaps there's a better error for this... */ if (settings.verbose) { fprintf(stderr, "Failed to list SASL mechanisms.\n"); } write_bin_error(c, PROTOCOL_BINARY_RESPONSE_AUTH_ERROR, 0); return; } write_bin_response(c, (char*)result_string, 0, 0, string_length); } static void process_bin_sasl_auth(conn *c) { // Guard for handling disabled SASL on the server. if (!settings.sasl) { write_bin_error(c, PROTOCOL_BINARY_RESPONSE_UNKNOWN_COMMAND, c->binary_header.request.bodylen - c->binary_header.request.keylen); return; } assert(c->binary_header.request.extlen == 0); int nkey = c->binary_header.request.keylen; int vlen = c->binary_header.request.bodylen - nkey; if (nkey > MAX_SASL_MECH_LEN) { write_bin_error(c, PROTOCOL_BINARY_RESPONSE_EINVAL, vlen); c->write_and_go = conn_swallow; return; } char *key = binary_get_key(c); assert(key); item *it = item_alloc(key, nkey, 0, 0, vlen); if (it == 0) { write_bin_error(c, PROTOCOL_BINARY_RESPONSE_ENOMEM, vlen); c->write_and_go = conn_swallow; return; } c->item = it; c->ritem = ITEM_data(it); c->rlbytes = vlen; conn_set_state(c, conn_nread); c->substate = bin_reading_sasl_auth_data; } static void process_bin_complete_sasl_auth(conn *c) { assert(settings.sasl); const char *out = NULL; unsigned int outlen = 0; assert(c->item); init_sasl_conn(c); int nkey = c->binary_header.request.keylen; int vlen = c->binary_header.request.bodylen - nkey; char mech[nkey+1]; memcpy(mech, ITEM_key((item*)c->item), nkey); mech[nkey] = 0x00; if (settings.verbose) fprintf(stderr, "mech: ``%s'' with %d bytes of data\n", mech, vlen); const char *challenge = vlen == 0 ? NULL : ITEM_data((item*) c->item); int result=-1; switch (c->cmd) { case PROTOCOL_BINARY_CMD_SASL_AUTH: result = sasl_server_start(c->sasl_conn, mech, challenge, vlen, &out, &outlen); break; case PROTOCOL_BINARY_CMD_SASL_STEP: result = sasl_server_step(c->sasl_conn, challenge, vlen, &out, &outlen); break; default: assert(false); /* CMD should be one of the above */ /* This code is pretty much impossible, but makes the compiler happier */ if (settings.verbose) { fprintf(stderr, "Unhandled command %d with challenge %s\n", c->cmd, challenge); } break; } item_unlink(c->item); if (settings.verbose) { fprintf(stderr, "sasl result code: %d\n", result); } switch(result) { case SASL_OK: write_bin_response(c, "Authenticated", 0, 0, strlen("Authenticated")); break; case SASL_CONTINUE: add_bin_header(c, PROTOCOL_BINARY_RESPONSE_AUTH_CONTINUE, 0, 0, outlen); if(outlen > 0) { add_iov(c, out, outlen); } conn_set_state(c, conn_mwrite); c->write_and_go = conn_new_cmd; break; default: if (settings.verbose) fprintf(stderr, "Unknown sasl response: %d\n", result); write_bin_error(c, PROTOCOL_BINARY_RESPONSE_AUTH_ERROR, 0); } } static bool authenticated(conn *c) { assert(settings.sasl); bool rv = false; switch (c->cmd) { case PROTOCOL_BINARY_CMD_SASL_LIST_MECHS: /* FALLTHROUGH */ case PROTOCOL_BINARY_CMD_SASL_AUTH: /* FALLTHROUGH */ case PROTOCOL_BINARY_CMD_SASL_STEP: /* FALLTHROUGH */ case PROTOCOL_BINARY_CMD_VERSION: /* FALLTHROUGH */ rv = true; break; default: if (c->sasl_conn) { const void *uname = NULL; sasl_getprop(c->sasl_conn, SASL_USERNAME, &uname); rv = uname != NULL; } } if (settings.verbose > 1) { fprintf(stderr, "authenticated() in cmd 0x%02x is %s\n", c->cmd, rv ? "true" : "false"); } return rv; } static void dispatch_bin_command(conn *c) { int protocol_error = 0; int extlen = c->binary_header.request.extlen; int keylen = c->binary_header.request.keylen; uint32_t bodylen = c->binary_header.request.bodylen; if (settings.sasl && !authenticated(c)) { write_bin_error(c, PROTOCOL_BINARY_RESPONSE_AUTH_ERROR, 0); c->write_and_go = conn_closing; return; } MEMCACHED_PROCESS_COMMAND_START(c->sfd, c->rcurr, c->rbytes); c->noreply = true; /* binprot supports 16bit keys, but internals are still 8bit */ if (keylen > KEY_MAX_LENGTH) { handle_binary_protocol_error(c); return; } switch (c->cmd) { case PROTOCOL_BINARY_CMD_SETQ: c->cmd = PROTOCOL_BINARY_CMD_SET; break; case PROTOCOL_BINARY_CMD_ADDQ: c->cmd = PROTOCOL_BINARY_CMD_ADD; break; case PROTOCOL_BINARY_CMD_REPLACEQ: c->cmd = PROTOCOL_BINARY_CMD_REPLACE; break; case PROTOCOL_BINARY_CMD_DELETEQ: c->cmd = PROTOCOL_BINARY_CMD_DELETE; break; case PROTOCOL_BINARY_CMD_INCREMENTQ: c->cmd = PROTOCOL_BINARY_CMD_INCREMENT; break; case PROTOCOL_BINARY_CMD_DECREMENTQ: c->cmd = PROTOCOL_BINARY_CMD_DECREMENT; break; case PROTOCOL_BINARY_CMD_QUITQ: c->cmd = PROTOCOL_BINARY_CMD_QUIT; break; case PROTOCOL_BINARY_CMD_FLUSHQ: c->cmd = PROTOCOL_BINARY_CMD_FLUSH; break; case PROTOCOL_BINARY_CMD_APPENDQ: c->cmd = PROTOCOL_BINARY_CMD_APPEND; break; case PROTOCOL_BINARY_CMD_PREPENDQ: c->cmd = PROTOCOL_BINARY_CMD_PREPEND; break; case PROTOCOL_BINARY_CMD_GETQ: c->cmd = PROTOCOL_BINARY_CMD_GET; break; case PROTOCOL_BINARY_CMD_GETKQ: c->cmd = PROTOCOL_BINARY_CMD_GETK; break; default: c->noreply = false; } switch (c->cmd) { case PROTOCOL_BINARY_CMD_VERSION: if (extlen == 0 && keylen == 0 && bodylen == 0) { write_bin_response(c, VERSION, 0, 0, strlen(VERSION)); } else { protocol_error = 1; } break; case PROTOCOL_BINARY_CMD_FLUSH: if (keylen == 0 && bodylen == extlen && (extlen == 0 || extlen == 4)) { bin_read_key(c, bin_read_flush_exptime, extlen); } else { protocol_error = 1; } break; case PROTOCOL_BINARY_CMD_NOOP: if (extlen == 0 && keylen == 0 && bodylen == 0) { write_bin_response(c, NULL, 0, 0, 0); } else { protocol_error = 1; } break; case PROTOCOL_BINARY_CMD_SET: /* FALLTHROUGH */ case PROTOCOL_BINARY_CMD_ADD: /* FALLTHROUGH */ case PROTOCOL_BINARY_CMD_REPLACE: if (extlen == 8 && keylen != 0 && bodylen >= (keylen + 8)) { bin_read_key(c, bin_reading_set_header, 8); } else { protocol_error = 1; } break; case PROTOCOL_BINARY_CMD_GETQ: /* FALLTHROUGH */ case PROTOCOL_BINARY_CMD_GET: /* FALLTHROUGH */ case PROTOCOL_BINARY_CMD_GETKQ: /* FALLTHROUGH */ case PROTOCOL_BINARY_CMD_GETK: if (extlen == 0 && bodylen == keylen && keylen > 0) { bin_read_key(c, bin_reading_get_key, 0); } else { protocol_error = 1; } break; case PROTOCOL_BINARY_CMD_DELETE: if (keylen > 0 && extlen == 0 && bodylen == keylen) { bin_read_key(c, bin_reading_del_header, extlen); } else { protocol_error = 1; } break; case PROTOCOL_BINARY_CMD_INCREMENT: case PROTOCOL_BINARY_CMD_DECREMENT: if (keylen > 0 && extlen == 20 && bodylen == (keylen + extlen)) { bin_read_key(c, bin_reading_incr_header, 20); } else { protocol_error = 1; } break; case PROTOCOL_BINARY_CMD_APPEND: case PROTOCOL_BINARY_CMD_PREPEND: if (keylen > 0 && extlen == 0) { bin_read_key(c, bin_reading_set_header, 0); } else { protocol_error = 1; } break; case PROTOCOL_BINARY_CMD_STAT: if (extlen == 0) { bin_read_key(c, bin_reading_stat, 0); } else { protocol_error = 1; } break; case PROTOCOL_BINARY_CMD_QUIT: if (keylen == 0 && extlen == 0 && bodylen == 0) { write_bin_response(c, NULL, 0, 0, 0); c->write_and_go = conn_closing; if (c->noreply) { conn_set_state(c, conn_closing); } } else { protocol_error = 1; } break; case PROTOCOL_BINARY_CMD_SASL_LIST_MECHS: if (extlen == 0 && keylen == 0 && bodylen == 0) { bin_list_sasl_mechs(c); } else { protocol_error = 1; } break; case PROTOCOL_BINARY_CMD_SASL_AUTH: case PROTOCOL_BINARY_CMD_SASL_STEP: if (extlen == 0 && keylen != 0) { bin_read_key(c, bin_reading_sasl_auth, 0); } else { protocol_error = 1; } break; default: write_bin_error(c, PROTOCOL_BINARY_RESPONSE_UNKNOWN_COMMAND, bodylen); } if (protocol_error) handle_binary_protocol_error(c); } static void process_bin_update(conn *c) { char *key; int nkey; int vlen; item *it; protocol_binary_request_set* req = binary_get_request(c); assert(c != NULL); key = binary_get_key(c); nkey = c->binary_header.request.keylen; /* fix byteorder in the request */ req->message.body.flags = ntohl(req->message.body.flags); req->message.body.expiration = ntohl(req->message.body.expiration); vlen = c->binary_header.request.bodylen - (nkey + c->binary_header.request.extlen); if (settings.verbose > 1) { int ii; if (c->cmd == PROTOCOL_BINARY_CMD_ADD) { fprintf(stderr, "<%d ADD ", c->sfd); } else if (c->cmd == PROTOCOL_BINARY_CMD_SET) { fprintf(stderr, "<%d SET ", c->sfd); } else { fprintf(stderr, "<%d REPLACE ", c->sfd); } for (ii = 0; ii < nkey; ++ii) { fprintf(stderr, "%c", key[ii]); } fprintf(stderr, " Value len is %d", vlen); fprintf(stderr, "\n"); } if (settings.detail_enabled) { stats_prefix_record_set(key, nkey); } it = item_alloc(key, nkey, req->message.body.flags, realtime(req->message.body.expiration), vlen+2); if (it == 0) { if (! item_size_ok(nkey, req->message.body.flags, vlen + 2)) { write_bin_error(c, PROTOCOL_BINARY_RESPONSE_E2BIG, vlen); } else { write_bin_error(c, PROTOCOL_BINARY_RESPONSE_ENOMEM, vlen); } /* Avoid stale data persisting in cache because we failed alloc. * Unacceptable for SET. Anywhere else too? */ if (c->cmd == PROTOCOL_BINARY_CMD_SET) { it = item_get(key, nkey); if (it) { item_unlink(it); item_remove(it); } } /* swallow the data line */ c->write_and_go = conn_swallow; return; } ITEM_set_cas(it, c->binary_header.request.cas); switch (c->cmd) { case PROTOCOL_BINARY_CMD_ADD: c->cmd = NREAD_ADD; break; case PROTOCOL_BINARY_CMD_SET: c->cmd = NREAD_SET; break; case PROTOCOL_BINARY_CMD_REPLACE: c->cmd = NREAD_REPLACE; break; default: assert(0); } if (ITEM_get_cas(it) != 0) { c->cmd = NREAD_CAS; } c->item = it; c->ritem = ITEM_data(it); c->rlbytes = vlen; conn_set_state(c, conn_nread); c->substate = bin_read_set_value; } static void process_bin_append_prepend(conn *c) { char *key; int nkey; int vlen; item *it; assert(c != NULL); key = binary_get_key(c); nkey = c->binary_header.request.keylen; vlen = c->binary_header.request.bodylen - nkey; if (settings.verbose > 1) { fprintf(stderr, "Value len is %d\n", vlen); } if (settings.detail_enabled) { stats_prefix_record_set(key, nkey); } it = item_alloc(key, nkey, 0, 0, vlen+2); if (it == 0) { if (! item_size_ok(nkey, 0, vlen + 2)) { write_bin_error(c, PROTOCOL_BINARY_RESPONSE_E2BIG, vlen); } else { write_bin_error(c, PROTOCOL_BINARY_RESPONSE_ENOMEM, vlen); } /* swallow the data line */ c->write_and_go = conn_swallow; return; } ITEM_set_cas(it, c->binary_header.request.cas); switch (c->cmd) { case PROTOCOL_BINARY_CMD_APPEND: c->cmd = NREAD_APPEND; break; case PROTOCOL_BINARY_CMD_PREPEND: c->cmd = NREAD_PREPEND; break; default: assert(0); } c->item = it; c->ritem = ITEM_data(it); c->rlbytes = vlen; conn_set_state(c, conn_nread); c->substate = bin_read_set_value; } static void process_bin_flush(conn *c) { time_t exptime = 0; protocol_binary_request_flush* req = binary_get_request(c); if (c->binary_header.request.extlen == sizeof(req->message.body)) { exptime = ntohl(req->message.body.expiration); } set_current_time(); if (exptime > 0) { settings.oldest_live = realtime(exptime) - 1; } else { settings.oldest_live = current_time - 1; } item_flush_expired(); pthread_mutex_lock(&c->thread->stats.mutex); c->thread->stats.flush_cmds++; pthread_mutex_unlock(&c->thread->stats.mutex); write_bin_response(c, NULL, 0, 0, 0); } static void process_bin_delete(conn *c) { item *it; protocol_binary_request_delete* req = binary_get_request(c); char* key = binary_get_key(c); size_t nkey = c->binary_header.request.keylen; assert(c != NULL); if (settings.verbose > 1) { fprintf(stderr, "Deleting %s\n", key); } if (settings.detail_enabled) { stats_prefix_record_delete(key, nkey); } it = item_get(key, nkey); if (it) { uint64_t cas = ntohll(req->message.header.request.cas); if (cas == 0 || cas == ITEM_get_cas(it)) { MEMCACHED_COMMAND_DELETE(c->sfd, ITEM_key(it), it->nkey); item_unlink(it); write_bin_response(c, NULL, 0, 0, 0); } else { write_bin_error(c, PROTOCOL_BINARY_RESPONSE_KEY_EEXISTS, 0); } item_remove(it); /* release our reference */ } else { write_bin_error(c, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT, 0); } } static void complete_nread_binary(conn *c) { assert(c != NULL); assert(c->cmd >= 0); switch(c->substate) { case bin_reading_set_header: if (c->cmd == PROTOCOL_BINARY_CMD_APPEND || c->cmd == PROTOCOL_BINARY_CMD_PREPEND) { process_bin_append_prepend(c); } else { process_bin_update(c); } break; case bin_read_set_value: complete_update_bin(c); break; case bin_reading_get_key: process_bin_get(c); break; case bin_reading_stat: process_bin_stat(c); break; case bin_reading_del_header: process_bin_delete(c); break; case bin_reading_incr_header: complete_incr_bin(c); break; case bin_read_flush_exptime: process_bin_flush(c); break; case bin_reading_sasl_auth: process_bin_sasl_auth(c); break; case bin_reading_sasl_auth_data: process_bin_complete_sasl_auth(c); break; default: fprintf(stderr, "Not handling substate %d\n", c->substate); assert(0); } } static void reset_cmd_handler(conn *c) { c->cmd = -1; c->substate = bin_no_state; if(c->item != NULL) { item_remove(c->item); c->item = NULL; } conn_shrink(c); if (c->rbytes > 0) { conn_set_state(c, conn_parse_cmd); } else { conn_set_state(c, conn_waiting); } } static void complete_nread(conn *c) { assert(c != NULL); assert(c->protocol == ascii_prot || c->protocol == binary_prot); if (c->protocol == ascii_prot) { complete_nread_ascii(c); } else if (c->protocol == binary_prot) { complete_nread_binary(c); } } /* * Stores an item in the cache according to the semantics of one of the set * commands. In threaded mode, this is protected by the cache lock. * * Returns the state of storage. */ enum store_item_type do_store_item(item *it, int comm, conn *c) { char *key = ITEM_key(it); item *old_it = do_item_get(key, it->nkey); enum store_item_type stored = NOT_STORED; item *new_it = NULL; int flags; if (old_it != NULL && comm == NREAD_ADD) { /* add only adds a nonexistent item, but promote to head of LRU */ do_item_update(old_it); } else if (!old_it && (comm == NREAD_REPLACE || comm == NREAD_APPEND || comm == NREAD_PREPEND)) { /* replace only replaces an existing value; don't store */ } else if (comm == NREAD_CAS) { /* validate cas operation */ if(old_it == NULL) { // LRU expired stored = NOT_FOUND; pthread_mutex_lock(&c->thread->stats.mutex); c->thread->stats.cas_misses++; pthread_mutex_unlock(&c->thread->stats.mutex); } else if (ITEM_get_cas(it) == ITEM_get_cas(old_it)) { // cas validates // it and old_it may belong to different classes. // I'm updating the stats for the one that's getting pushed out pthread_mutex_lock(&c->thread->stats.mutex); c->thread->stats.slab_stats[old_it->slabs_clsid].cas_hits++; pthread_mutex_unlock(&c->thread->stats.mutex); item_replace(old_it, it); stored = STORED; } else { pthread_mutex_lock(&c->thread->stats.mutex); c->thread->stats.slab_stats[old_it->slabs_clsid].cas_badval++; pthread_mutex_unlock(&c->thread->stats.mutex); if(settings.verbose > 1) { fprintf(stderr, "CAS: failure: expected %llu, got %llu\n", (unsigned long long)ITEM_get_cas(old_it), (unsigned long long)ITEM_get_cas(it)); } stored = EXISTS; } } else { /* * Append - combine new and old record into single one. Here it's * atomic and thread-safe. */ if (comm == NREAD_APPEND || comm == NREAD_PREPEND) { /* * Validate CAS */ if (ITEM_get_cas(it) != 0) { // CAS much be equal if (ITEM_get_cas(it) != ITEM_get_cas(old_it)) { stored = EXISTS; } } if (stored == NOT_STORED) { /* we have it and old_it here - alloc memory to hold both */ /* flags was already lost - so recover them from ITEM_suffix(it) */ flags = (int) strtol(ITEM_suffix(old_it), (char **) NULL, 10); new_it = do_item_alloc(key, it->nkey, flags, old_it->exptime, it->nbytes + old_it->nbytes - 2 /* CRLF */); if (new_it == NULL) { /* SERVER_ERROR out of memory */ if (old_it != NULL) do_item_remove(old_it); return NOT_STORED; } /* copy data from it and old_it to new_it */ if (comm == NREAD_APPEND) { memcpy(ITEM_data(new_it), ITEM_data(old_it), old_it->nbytes); memcpy(ITEM_data(new_it) + old_it->nbytes - 2 /* CRLF */, ITEM_data(it), it->nbytes); } else { /* NREAD_PREPEND */ memcpy(ITEM_data(new_it), ITEM_data(it), it->nbytes); memcpy(ITEM_data(new_it) + it->nbytes - 2 /* CRLF */, ITEM_data(old_it), old_it->nbytes); } it = new_it; } } if (stored == NOT_STORED) { if (old_it != NULL) item_replace(old_it, it); else do_item_link(it); c->cas = ITEM_get_cas(it); stored = STORED; } } if (old_it != NULL) do_item_remove(old_it); /* release our reference */ if (new_it != NULL) do_item_remove(new_it); if (stored == STORED) { c->cas = ITEM_get_cas(it); } return stored; } typedef struct token_s { char *value; size_t length; } token_t; #define COMMAND_TOKEN 0 #define SUBCOMMAND_TOKEN 1 #define KEY_TOKEN 1 #define MAX_TOKENS 8 /* * Tokenize the command string by replacing whitespace with '\0' and update * the token array tokens with pointer to start of each token and length. * Returns total number of tokens. The last valid token is the terminal * token (value points to the first unprocessed character of the string and * length zero). * * Usage example: * * while(tokenize_command(command, ncommand, tokens, max_tokens) > 0) { * for(int ix = 0; tokens[ix].length != 0; ix++) { * ... * } * ncommand = tokens[ix].value - command; * command = tokens[ix].value; * } */ static size_t tokenize_command(char *command, token_t *tokens, const size_t max_tokens) { char *s, *e; size_t ntokens = 0; assert(command != NULL && tokens != NULL && max_tokens > 1); for (s = e = command; ntokens < max_tokens - 1; ++e) { if (*e == ' ') { if (s != e) { tokens[ntokens].value = s; tokens[ntokens].length = e - s; ntokens++; *e = '\0'; } s = e + 1; } else if (*e == '\0') { if (s != e) { tokens[ntokens].value = s; tokens[ntokens].length = e - s; ntokens++; } break; /* string end */ } } /* * If we scanned the whole string, the terminal value pointer is null, * otherwise it is the first unprocessed character. */ tokens[ntokens].value = *e == '\0' ? NULL : e; tokens[ntokens].length = 0; ntokens++; return ntokens; } /* set up a connection to write a buffer then free it, used for stats */ static void write_and_free(conn *c, char *buf, int bytes) { if (buf) { c->write_and_free = buf; c->wcurr = buf; c->wbytes = bytes; conn_set_state(c, conn_write); c->write_and_go = conn_new_cmd; } else { out_string(c, "SERVER_ERROR out of memory writing stats"); } } static inline void set_noreply_maybe(conn *c, token_t *tokens, size_t ntokens) { int noreply_index = ntokens - 2; /* NOTE: this function is not the first place where we are going to send the reply. We could send it instead from process_command() if the request line has wrong number of tokens. However parsing malformed line for "noreply" option is not reliable anyway, so it can't be helped. */ if (tokens[noreply_index].value && strcmp(tokens[noreply_index].value, "noreply") == 0) { c->noreply = true; } } void append_stat(const char *name, ADD_STAT add_stats, conn *c, const char *fmt, ...) { char val_str[STAT_VAL_LEN]; int vlen; va_list ap; assert(name); assert(add_stats); assert(c); assert(fmt); va_start(ap, fmt); vlen = vsnprintf(val_str, sizeof(val_str) - 1, fmt, ap); va_end(ap); add_stats(name, strlen(name), val_str, vlen, c); } inline static void process_stats_detail(conn *c, const char *command) { assert(c != NULL); if (strcmp(command, "on") == 0) { settings.detail_enabled = 1; out_string(c, "OK"); } else if (strcmp(command, "off") == 0) { settings.detail_enabled = 0; out_string(c, "OK"); } else if (strcmp(command, "dump") == 0) { int len; char *stats = stats_prefix_dump(&len); write_and_free(c, stats, len); } else { out_string(c, "CLIENT_ERROR usage: stats detail on|off|dump"); } } /* return server specific stats only */ static void server_stats(ADD_STAT add_stats, conn *c) { pid_t pid = getpid(); rel_time_t now = current_time; struct thread_stats thread_stats; threadlocal_stats_aggregate(&thread_stats); struct slab_stats slab_stats; slab_stats_aggregate(&thread_stats, &slab_stats); #ifndef WIN32 struct rusage usage; getrusage(RUSAGE_SELF, &usage); #endif /* !WIN32 */ STATS_LOCK(); APPEND_STAT("pid", "%lu", (long)pid); APPEND_STAT("uptime", "%u", now); APPEND_STAT("time", "%ld", now + (long)process_started); APPEND_STAT("version", "%s", VERSION); APPEND_STAT("pointer_size", "%d", (int)(8 * sizeof(void *))); #ifndef WIN32 append_stat("rusage_user", add_stats, c, "%ld.%06ld", (long)usage.ru_utime.tv_sec, (long)usage.ru_utime.tv_usec); append_stat("rusage_system", add_stats, c, "%ld.%06ld", (long)usage.ru_stime.tv_sec, (long)usage.ru_stime.tv_usec); #endif /* !WIN32 */ APPEND_STAT("curr_connections", "%u", stats.curr_conns - 1); APPEND_STAT("total_connections", "%u", stats.total_conns); APPEND_STAT("connection_structures", "%u", stats.conn_structs); APPEND_STAT("cmd_get", "%llu", (unsigned long long)thread_stats.get_cmds); APPEND_STAT("cmd_set", "%llu", (unsigned long long)slab_stats.set_cmds); APPEND_STAT("cmd_flush", "%llu", (unsigned long long)thread_stats.flush_cmds); APPEND_STAT("get_hits", "%llu", (unsigned long long)slab_stats.get_hits); APPEND_STAT("get_misses", "%llu", (unsigned long long)thread_stats.get_misses); APPEND_STAT("delete_misses", "%llu", (unsigned long long)thread_stats.delete_misses); APPEND_STAT("delete_hits", "%llu", (unsigned long long)slab_stats.delete_hits); APPEND_STAT("incr_misses", "%llu", (unsigned long long)thread_stats.incr_misses); APPEND_STAT("incr_hits", "%llu", (unsigned long long)slab_stats.incr_hits); APPEND_STAT("decr_misses", "%llu", (unsigned long long)thread_stats.decr_misses); APPEND_STAT("decr_hits", "%llu", (unsigned long long)slab_stats.decr_hits); APPEND_STAT("cas_misses", "%llu", (unsigned long long)thread_stats.cas_misses); APPEND_STAT("cas_hits", "%llu", (unsigned long long)slab_stats.cas_hits); APPEND_STAT("cas_badval", "%llu", (unsigned long long)slab_stats.cas_badval); APPEND_STAT("bytes_read", "%llu", (unsigned long long)thread_stats.bytes_read); APPEND_STAT("bytes_written", "%llu", (unsigned long long)thread_stats.bytes_written); APPEND_STAT("limit_maxbytes", "%llu", (unsigned long long)settings.maxbytes); APPEND_STAT("accepting_conns", "%u", stats.accepting_conns); APPEND_STAT("listen_disabled_num", "%llu", (unsigned long long)stats.listen_disabled_num); APPEND_STAT("threads", "%d", settings.num_threads); APPEND_STAT("conn_yields", "%llu", (unsigned long long)thread_stats.conn_yields); STATS_UNLOCK(); } static void process_stat_settings(ADD_STAT add_stats, void *c) { assert(add_stats); APPEND_STAT("maxbytes", "%u", (unsigned int)settings.maxbytes); APPEND_STAT("maxconns", "%d", settings.maxconns); APPEND_STAT("tcpport", "%d", settings.port); APPEND_STAT("udpport", "%d", settings.udpport); APPEND_STAT("inter", "%s", settings.inter ? settings.inter : "NULL"); APPEND_STAT("verbosity", "%d", settings.verbose); APPEND_STAT("oldest", "%lu", (unsigned long)settings.oldest_live); APPEND_STAT("evictions", "%s", settings.evict_to_free ? "on" : "off"); APPEND_STAT("domain_socket", "%s", settings.socketpath ? settings.socketpath : "NULL"); APPEND_STAT("umask", "%o", settings.access); APPEND_STAT("growth_factor", "%.2f", settings.factor); APPEND_STAT("chunk_size", "%d", settings.chunk_size); APPEND_STAT("num_threads", "%d", settings.num_threads); APPEND_STAT("stat_key_prefix", "%c", settings.prefix_delimiter); APPEND_STAT("detail_enabled", "%s", settings.detail_enabled ? "yes" : "no"); APPEND_STAT("reqs_per_event", "%d", settings.reqs_per_event); APPEND_STAT("cas_enabled", "%s", settings.use_cas ? "yes" : "no"); APPEND_STAT("tcp_backlog", "%d", settings.backlog); APPEND_STAT("binding_protocol", "%s", prot_text(settings.binding_protocol)); APPEND_STAT("item_size_max", "%d", settings.item_size_max); } static void process_stat(conn *c, token_t *tokens, const size_t ntokens) { const char *subcommand = tokens[SUBCOMMAND_TOKEN].value; assert(c != NULL); if (ntokens < 2) { out_string(c, "CLIENT_ERROR bad command line"); return; } if (ntokens == 2) { server_stats(&append_stats, c); (void)get_stats(NULL, 0, &append_stats, c); } else if (strcmp(subcommand, "reset") == 0) { stats_reset(); out_string(c, "RESET"); return ; } else if (strcmp(subcommand, "detail") == 0) { /* NOTE: how to tackle detail with binary? */ if (ntokens < 4) process_stats_detail(c, ""); /* outputs the error message */ else process_stats_detail(c, tokens[2].value); /* Output already generated */ return ; } else if (strcmp(subcommand, "settings") == 0) { process_stat_settings(&append_stats, c); } else if (strcmp(subcommand, "cachedump") == 0) { char *buf; unsigned int bytes, id, limit = 0; if (ntokens < 5) { out_string(c, "CLIENT_ERROR bad command line"); return; } if (!safe_strtoul(tokens[2].value, &id) || !safe_strtoul(tokens[3].value, &limit)) { out_string(c, "CLIENT_ERROR bad command line format"); return; } if (id >= POWER_LARGEST) { out_string(c, "CLIENT_ERROR Illegal slab id"); return; } buf = item_cachedump(id, limit, &bytes); write_and_free(c, buf, bytes); return ; } else { /* getting here means that the subcommand is either engine specific or is invalid. query the engine and see. */ if (get_stats(subcommand, strlen(subcommand), &append_stats, c)) { if (c->stats.buffer == NULL) { out_string(c, "SERVER_ERROR out of memory writing stats"); } else { write_and_free(c, c->stats.buffer, c->stats.offset); c->stats.buffer = NULL; } } else { out_string(c, "ERROR"); } return ; } /* append terminator and start the transfer */ append_stats(NULL, 0, NULL, 0, c); if (c->stats.buffer == NULL) { out_string(c, "SERVER_ERROR out of memory writing stats"); } else { write_and_free(c, c->stats.buffer, c->stats.offset); c->stats.buffer = NULL; } } /* ntokens is overwritten here... shrug.. */ static inline void process_get_command(conn *c, token_t *tokens, size_t ntokens, bool return_cas) { char *key; size_t nkey; int i = 0; item *it; token_t *key_token = &tokens[KEY_TOKEN]; char *suffix; assert(c != NULL); do { while(key_token->length != 0) { key = key_token->value; nkey = key_token->length; if(nkey > KEY_MAX_LENGTH) { out_string(c, "CLIENT_ERROR bad command line format"); return; } it = item_get(key, nkey); if (settings.detail_enabled) { stats_prefix_record_get(key, nkey, NULL != it); } if (it) { if (i >= c->isize) { item **new_list = realloc(c->ilist, sizeof(item *) * c->isize * 2); if (new_list) { c->isize *= 2; c->ilist = new_list; } else { item_remove(it); break; } } /* * Construct the response. Each hit adds three elements to the * outgoing data list: * "VALUE " * key * " " + flags + " " + data length + "\r\n" + data (with \r\n) */ if (return_cas) { MEMCACHED_COMMAND_GET(c->sfd, ITEM_key(it), it->nkey, it->nbytes, ITEM_get_cas(it)); /* Goofy mid-flight realloc. */ if (i >= c->suffixsize) { char **new_suffix_list = realloc(c->suffixlist, sizeof(char *) * c->suffixsize * 2); if (new_suffix_list) { c->suffixsize *= 2; c->suffixlist = new_suffix_list; } else { item_remove(it); break; } } suffix = cache_alloc(c->thread->suffix_cache); if (suffix == NULL) { out_string(c, "SERVER_ERROR out of memory making CAS suffix"); item_remove(it); return; } *(c->suffixlist + i) = suffix; int suffix_len = snprintf(suffix, SUFFIX_SIZE, " %llu\r\n", (unsigned long long)ITEM_get_cas(it)); if (add_iov(c, "VALUE ", 6) != 0 || add_iov(c, ITEM_key(it), it->nkey) != 0 || add_iov(c, ITEM_suffix(it), it->nsuffix - 2) != 0 || add_iov(c, suffix, suffix_len) != 0 || add_iov(c, ITEM_data(it), it->nbytes) != 0) { item_remove(it); break; } } else { MEMCACHED_COMMAND_GET(c->sfd, ITEM_key(it), it->nkey, it->nbytes, ITEM_get_cas(it)); if (add_iov(c, "VALUE ", 6) != 0 || add_iov(c, ITEM_key(it), it->nkey) != 0 || add_iov(c, ITEM_suffix(it), it->nsuffix + it->nbytes) != 0) { item_remove(it); break; } } if (settings.verbose > 1) fprintf(stderr, ">%d sending key %s\n", c->sfd, ITEM_key(it)); /* item_get() has incremented it->refcount for us */ pthread_mutex_lock(&c->thread->stats.mutex); c->thread->stats.slab_stats[it->slabs_clsid].get_hits++; c->thread->stats.get_cmds++; pthread_mutex_unlock(&c->thread->stats.mutex); item_update(it); *(c->ilist + i) = it; i++; } else { pthread_mutex_lock(&c->thread->stats.mutex); c->thread->stats.get_misses++; pthread_mutex_unlock(&c->thread->stats.mutex); MEMCACHED_COMMAND_GET(c->sfd, key, nkey, -1, 0); } key_token++; } /* * If the command string hasn't been fully processed, get the next set * of tokens. */ if(key_token->value != NULL) { ntokens = tokenize_command(key_token->value, tokens, MAX_TOKENS); key_token = tokens; } } while(key_token->value != NULL); c->icurr = c->ilist; c->ileft = i; if (return_cas) { c->suffixcurr = c->suffixlist; c->suffixleft = i; } if (settings.verbose > 1) fprintf(stderr, ">%d END\n", c->sfd); /* If the loop was terminated because of out-of-memory, it is not reliable to add END\r\n to the buffer, because it might not end in \r\n. So we send SERVER_ERROR instead. */ if (key_token->value != NULL || add_iov(c, "END\r\n", 5) != 0 || (IS_UDP(c->transport) && build_udp_headers(c) != 0)) { out_string(c, "SERVER_ERROR out of memory writing get response"); } else { conn_set_state(c, conn_mwrite); c->msgcurr = 0; } return; } static void process_update_command(conn *c, token_t *tokens, const size_t ntokens, int comm, bool handle_cas) { char *key; size_t nkey; unsigned int flags; int32_t exptime_int = 0; time_t exptime; int vlen; uint64_t req_cas_id=0; item *it; assert(c != NULL); set_noreply_maybe(c, tokens, ntokens); if (tokens[KEY_TOKEN].length > KEY_MAX_LENGTH) { out_string(c, "CLIENT_ERROR bad command line format"); return; } key = tokens[KEY_TOKEN].value; nkey = tokens[KEY_TOKEN].length; if (! (safe_strtoul(tokens[2].value, (uint32_t *)&flags) && safe_strtol(tokens[3].value, &exptime_int) && safe_strtol(tokens[4].value, (int32_t *)&vlen))) { out_string(c, "CLIENT_ERROR bad command line format"); return; } /* Ubuntu 8.04 breaks when I pass exptime to safe_strtol */ exptime = exptime_int; // does cas value exist? if (handle_cas) { if (!safe_strtoull(tokens[5].value, &req_cas_id)) { out_string(c, "CLIENT_ERROR bad command line format"); return; } } vlen += 2; if (vlen < 0 || vlen - 2 < 0) { out_string(c, "CLIENT_ERROR bad command line format"); return; } if (settings.detail_enabled) { stats_prefix_record_set(key, nkey); } it = item_alloc(key, nkey, flags, realtime(exptime), vlen); if (it == 0) { if (! item_size_ok(nkey, flags, vlen)) out_string(c, "SERVER_ERROR object too large for cache"); else out_string(c, "SERVER_ERROR out of memory storing object"); /* swallow the data line */ c->write_and_go = conn_swallow; c->sbytes = vlen; /* Avoid stale data persisting in cache because we failed alloc. * Unacceptable for SET. Anywhere else too? */ if (comm == NREAD_SET) { it = item_get(key, nkey); if (it) { item_unlink(it); item_remove(it); } } return; } ITEM_set_cas(it, req_cas_id); c->item = it; c->ritem = ITEM_data(it); c->rlbytes = it->nbytes; c->cmd = comm; conn_set_state(c, conn_nread); } static void process_arithmetic_command(conn *c, token_t *tokens, const size_t ntokens, const bool incr) { char temp[INCR_MAX_STORAGE_LEN]; item *it; uint64_t delta; char *key; size_t nkey; assert(c != NULL); set_noreply_maybe(c, tokens, ntokens); if (tokens[KEY_TOKEN].length > KEY_MAX_LENGTH) { out_string(c, "CLIENT_ERROR bad command line format"); return; } key = tokens[KEY_TOKEN].value; nkey = tokens[KEY_TOKEN].length; if (!safe_strtoull(tokens[2].value, &delta)) { out_string(c, "CLIENT_ERROR invalid numeric delta argument"); return; } it = item_get(key, nkey); if (!it) { pthread_mutex_lock(&c->thread->stats.mutex); if (incr) { c->thread->stats.incr_misses++; } else { c->thread->stats.decr_misses++; } pthread_mutex_unlock(&c->thread->stats.mutex); out_string(c, "NOT_FOUND"); return; } switch(add_delta(c, it, incr, delta, temp)) { case OK: out_string(c, temp); break; case NON_NUMERIC: out_string(c, "CLIENT_ERROR cannot increment or decrement non-numeric value"); break; case EOM: out_string(c, "SERVER_ERROR out of memory"); break; } item_remove(it); /* release our reference */ } /* * adds a delta value to a numeric item. * * c connection requesting the operation * it item to adjust * incr true to increment value, false to decrement * delta amount to adjust value by * buf buffer for response string * * returns a response string to send back to the client. */ enum delta_result_type do_add_delta(conn *c, item *it, const bool incr, const int64_t delta, char *buf) { char *ptr; uint64_t value; int res; ptr = ITEM_data(it); if (!safe_strtoull(ptr, &value)) { return NON_NUMERIC; } if (incr) { value += delta; MEMCACHED_COMMAND_INCR(c->sfd, ITEM_key(it), it->nkey, value); } else { if(delta > value) { value = 0; } else { value -= delta; } MEMCACHED_COMMAND_DECR(c->sfd, ITEM_key(it), it->nkey, value); } pthread_mutex_lock(&c->thread->stats.mutex); if (incr) { c->thread->stats.slab_stats[it->slabs_clsid].incr_hits++; } else { c->thread->stats.slab_stats[it->slabs_clsid].decr_hits++; } pthread_mutex_unlock(&c->thread->stats.mutex); snprintf(buf, INCR_MAX_STORAGE_LEN, "%llu", (unsigned long long)value); res = strlen(buf); if (res + 2 > it->nbytes) { /* need to realloc */ item *new_it; new_it = do_item_alloc(ITEM_key(it), it->nkey, atoi(ITEM_suffix(it) + 1), it->exptime, res + 2 ); if (new_it == 0) { return EOM; } memcpy(ITEM_data(new_it), buf, res); memcpy(ITEM_data(new_it) + res, "\r\n", 2); item_replace(it, new_it); do_item_remove(new_it); /* release our reference */ } else { /* replace in-place */ /* When changing the value without replacing the item, we need to update the CAS on the existing item. */ ITEM_set_cas(it, (settings.use_cas) ? get_cas_id() : 0); memcpy(ITEM_data(it), buf, res); memset(ITEM_data(it) + res, ' ', it->nbytes - res - 2); } return OK; } static void process_delete_command(conn *c, token_t *tokens, const size_t ntokens) { char *key; size_t nkey; item *it; assert(c != NULL); set_noreply_maybe(c, tokens, ntokens); key = tokens[KEY_TOKEN].value; nkey = tokens[KEY_TOKEN].length; if(nkey > KEY_MAX_LENGTH) { out_string(c, "CLIENT_ERROR bad command line format"); return; } if (settings.detail_enabled) { stats_prefix_record_delete(key, nkey); } it = item_get(key, nkey); if (it) { MEMCACHED_COMMAND_DELETE(c->sfd, ITEM_key(it), it->nkey); pthread_mutex_lock(&c->thread->stats.mutex); c->thread->stats.slab_stats[it->slabs_clsid].delete_hits++; pthread_mutex_unlock(&c->thread->stats.mutex); item_unlink(it); item_remove(it); /* release our reference */ out_string(c, "DELETED"); } else { pthread_mutex_lock(&c->thread->stats.mutex); c->thread->stats.delete_misses++; pthread_mutex_unlock(&c->thread->stats.mutex); out_string(c, "NOT_FOUND"); } } static void process_verbosity_command(conn *c, token_t *tokens, const size_t ntokens) { unsigned int level; assert(c != NULL); set_noreply_maybe(c, tokens, ntokens); level = strtoul(tokens[1].value, NULL, 10); settings.verbose = level > MAX_VERBOSITY_LEVEL ? MAX_VERBOSITY_LEVEL : level; out_string(c, "OK"); return; } static void process_command(conn *c, char *command) { token_t tokens[MAX_TOKENS]; size_t ntokens; int comm; assert(c != NULL); MEMCACHED_PROCESS_COMMAND_START(c->sfd, c->rcurr, c->rbytes); if (settings.verbose > 1) fprintf(stderr, "<%d %s\n", c->sfd, command); /* * for commands set/add/replace, we build an item and read the data * directly into it, then continue in nread_complete(). */ c->msgcurr = 0; c->msgused = 0; c->iovused = 0; if (add_msghdr(c) != 0) { out_string(c, "SERVER_ERROR out of memory preparing response"); return; } ntokens = tokenize_command(command, tokens, MAX_TOKENS); if (ntokens >= 3 && ((strcmp(tokens[COMMAND_TOKEN].value, "get") == 0) || (strcmp(tokens[COMMAND_TOKEN].value, "bget") == 0))) { process_get_command(c, tokens, ntokens, false); } else if ((ntokens == 6 || ntokens == 7) && ((strcmp(tokens[COMMAND_TOKEN].value, "add") == 0 && (comm = NREAD_ADD)) || (strcmp(tokens[COMMAND_TOKEN].value, "set") == 0 && (comm = NREAD_SET)) || (strcmp(tokens[COMMAND_TOKEN].value, "replace") == 0 && (comm = NREAD_REPLACE)) || (strcmp(tokens[COMMAND_TOKEN].value, "prepend") == 0 && (comm = NREAD_PREPEND)) || (strcmp(tokens[COMMAND_TOKEN].value, "append") == 0 && (comm = NREAD_APPEND)) )) { process_update_command(c, tokens, ntokens, comm, false); } else if ((ntokens == 7 || ntokens == 8) && (strcmp(tokens[COMMAND_TOKEN].value, "cas") == 0 && (comm = NREAD_CAS))) { process_update_command(c, tokens, ntokens, comm, true); } else if ((ntokens == 4 || ntokens == 5) && (strcmp(tokens[COMMAND_TOKEN].value, "incr") == 0)) { process_arithmetic_command(c, tokens, ntokens, 1); } else if (ntokens >= 3 && (strcmp(tokens[COMMAND_TOKEN].value, "gets") == 0)) { process_get_command(c, tokens, ntokens, true); } else if ((ntokens == 4 || ntokens == 5) && (strcmp(tokens[COMMAND_TOKEN].value, "decr") == 0)) { process_arithmetic_command(c, tokens, ntokens, 0); } else if (ntokens >= 3 && ntokens <= 4 && (strcmp(tokens[COMMAND_TOKEN].value, "delete") == 0)) { process_delete_command(c, tokens, ntokens); } else if (ntokens >= 2 && (strcmp(tokens[COMMAND_TOKEN].value, "stats") == 0)) { process_stat(c, tokens, ntokens); } else if (ntokens >= 2 && ntokens <= 4 && (strcmp(tokens[COMMAND_TOKEN].value, "flush_all") == 0)) { time_t exptime = 0; set_current_time(); set_noreply_maybe(c, tokens, ntokens); pthread_mutex_lock(&c->thread->stats.mutex); c->thread->stats.flush_cmds++; pthread_mutex_unlock(&c->thread->stats.mutex); if(ntokens == (c->noreply ? 3 : 2)) { settings.oldest_live = current_time - 1; item_flush_expired(); out_string(c, "OK"); return; } exptime = strtol(tokens[1].value, NULL, 10); if(errno == ERANGE) { out_string(c, "CLIENT_ERROR bad command line format"); return; } /* If exptime is zero realtime() would return zero too, and realtime(exptime) - 1 would overflow to the max unsigned value. So we process exptime == 0 the same way we do when no delay is given at all. */ if (exptime > 0) settings.oldest_live = realtime(exptime) - 1; else /* exptime == 0 */ settings.oldest_live = current_time - 1; item_flush_expired(); out_string(c, "OK"); return; } else if (ntokens == 2 && (strcmp(tokens[COMMAND_TOKEN].value, "version") == 0)) { out_string(c, "VERSION " VERSION); } else if (ntokens == 2 && (strcmp(tokens[COMMAND_TOKEN].value, "quit") == 0)) { conn_set_state(c, conn_closing); } else if ((ntokens == 3 || ntokens == 4) && (strcmp(tokens[COMMAND_TOKEN].value, "verbosity") == 0)) { process_verbosity_command(c, tokens, ntokens); } else { out_string(c, "ERROR"); } return; } /* * if we have a complete line in the buffer, process it. */ static int try_read_command(conn *c) { assert(c != NULL); assert(c->rcurr <= (c->rbuf + c->rsize)); assert(c->rbytes > 0); if (c->protocol == negotiating_prot || c->transport == udp_transport) { if ((unsigned char)c->rbuf[0] == (unsigned char)PROTOCOL_BINARY_REQ) { c->protocol = binary_prot; } else { c->protocol = ascii_prot; } if (settings.verbose > 1) { fprintf(stderr, "%d: Client using the %s protocol\n", c->sfd, prot_text(c->protocol)); } } if (c->protocol == binary_prot) { /* Do we have the complete packet header? */ if (c->rbytes < sizeof(c->binary_header)) { /* need more data! */ return 0; } else { #ifdef NEED_ALIGN if (((long)(c->rcurr)) % 8 != 0) { /* must realign input buffer */ memmove(c->rbuf, c->rcurr, c->rbytes); c->rcurr = c->rbuf; if (settings.verbose > 1) { fprintf(stderr, "%d: Realign input buffer\n", c->sfd); } } #endif protocol_binary_request_header* req; req = (protocol_binary_request_header*)c->rcurr; if (settings.verbose > 1) { /* Dump the packet before we convert it to host order */ int ii; fprintf(stderr, "<%d Read binary protocol data:", c->sfd); for (ii = 0; ii < sizeof(req->bytes); ++ii) { if (ii % 4 == 0) { fprintf(stderr, "\n<%d ", c->sfd); } fprintf(stderr, " 0x%02x", req->bytes[ii]); } fprintf(stderr, "\n"); } c->binary_header = *req; c->binary_header.request.keylen = ntohs(req->request.keylen); c->binary_header.request.bodylen = ntohl(req->request.bodylen); c->binary_header.request.cas = ntohll(req->request.cas); if (c->binary_header.request.magic != PROTOCOL_BINARY_REQ) { if (settings.verbose) { fprintf(stderr, "Invalid magic: %x\n", c->binary_header.request.magic); } conn_set_state(c, conn_closing); return -1; } c->msgcurr = 0; c->msgused = 0; c->iovused = 0; if (add_msghdr(c) != 0) { out_string(c, "SERVER_ERROR out of memory"); return 0; } c->cmd = c->binary_header.request.opcode; c->keylen = c->binary_header.request.keylen; c->opaque = c->binary_header.request.opaque; /* clear the returned cas value */ c->cas = 0; dispatch_bin_command(c); c->rbytes -= sizeof(c->binary_header); c->rcurr += sizeof(c->binary_header); } } else { char *el, *cont; if (c->rbytes == 0) return 0; el = memchr(c->rcurr, '\n', c->rbytes); if (!el) return 0; cont = el + 1; if ((el - c->rcurr) > 1 && *(el - 1) == '\r') { el--; } *el = '\0'; assert(cont <= (c->rcurr + c->rbytes)); process_command(c, c->rcurr); c->rbytes -= (cont - c->rcurr); c->rcurr = cont; assert(c->rcurr <= (c->rbuf + c->rsize)); } return 1; } /* * read a UDP request. */ static enum try_read_result try_read_udp(conn *c) { int res; assert(c != NULL); c->request_addr_size = sizeof(c->request_addr); res = recvfrom(c->sfd, c->rbuf, c->rsize, 0, &c->request_addr, &c->request_addr_size); if (res > 8) { unsigned char *buf = (unsigned char *)c->rbuf; pthread_mutex_lock(&c->thread->stats.mutex); c->thread->stats.bytes_read += res; pthread_mutex_unlock(&c->thread->stats.mutex); /* Beginning of UDP packet is the request ID; save it. */ c->request_id = buf[0] * 256 + buf[1]; /* If this is a multi-packet request, drop it. */ if (buf[4] != 0 || buf[5] != 1) { out_string(c, "SERVER_ERROR multi-packet request not supported"); return READ_NO_DATA_RECEIVED; } /* Don't care about any of the rest of the header. */ res -= 8; memmove(c->rbuf, c->rbuf + 8, res); c->rbytes += res; c->rcurr = c->rbuf; return READ_DATA_RECEIVED; } return READ_NO_DATA_RECEIVED; } /* * read from network as much as we can, handle buffer overflow and connection * close. * before reading, move the remaining incomplete fragment of a command * (if any) to the beginning of the buffer. * @return enum try_read_result */ static enum try_read_result try_read_network(conn *c) { enum try_read_result gotdata = READ_NO_DATA_RECEIVED; int res; assert(c != NULL); if (c->rcurr != c->rbuf) { if (c->rbytes != 0) /* otherwise there's nothing to copy */ memmove(c->rbuf, c->rcurr, c->rbytes); c->rcurr = c->rbuf; } while (1) { if (c->rbytes >= c->rsize) { char *new_rbuf = realloc(c->rbuf, c->rsize * 2); if (!new_rbuf) { if (settings.verbose > 0) fprintf(stderr, "Couldn't realloc input buffer\n"); c->rbytes = 0; /* ignore what we read */ out_string(c, "SERVER_ERROR out of memory reading request"); c->write_and_go = conn_closing; return READ_MEMORY_ERROR; } c->rcurr = c->rbuf = new_rbuf; c->rsize *= 2; } int avail = c->rsize - c->rbytes; res = read(c->sfd, c->rbuf + c->rbytes, avail); if (res > 0) { pthread_mutex_lock(&c->thread->stats.mutex); c->thread->stats.bytes_read += res; pthread_mutex_unlock(&c->thread->stats.mutex); gotdata = READ_DATA_RECEIVED; c->rbytes += res; if (res == avail) { continue; } else { break; } } if (res == 0) { return READ_ERROR; } if (res == -1) { if (errno == EAGAIN || errno == EWOULDBLOCK) { break; } return READ_ERROR; } } return gotdata; } static bool update_event(conn *c, const int new_flags) { assert(c != NULL); struct event_base *base = c->event.ev_base; if (c->ev_flags == new_flags) return true; if (event_del(&c->event) == -1) return false; event_set(&c->event, c->sfd, new_flags, event_handler, (void *)c); event_base_set(base, &c->event); c->ev_flags = new_flags; if (event_add(&c->event, 0) == -1) return false; return true; } /* * Sets whether we are listening for new connections or not. */ void do_accept_new_conns(const bool do_accept) { conn *next; for (next = listen_conn; next; next = next->next) { if (do_accept) { update_event(next, EV_READ | EV_PERSIST); if (listen(next->sfd, settings.backlog) != 0) { perror("listen"); } } else { update_event(next, 0); if (listen(next->sfd, 0) != 0) { perror("listen"); } } } if (do_accept) { STATS_LOCK(); stats.accepting_conns = true; STATS_UNLOCK(); } else { STATS_LOCK(); stats.accepting_conns = false; stats.listen_disabled_num++; STATS_UNLOCK(); } } /* * Transmit the next chunk of data from our list of msgbuf structures. * * Returns: * TRANSMIT_COMPLETE All done writing. * TRANSMIT_INCOMPLETE More data remaining to write. * TRANSMIT_SOFT_ERROR Can't write any more right now. * TRANSMIT_HARD_ERROR Can't write (c->state is set to conn_closing) */ static enum transmit_result transmit(conn *c) { assert(c != NULL); if (c->msgcurr < c->msgused && c->msglist[c->msgcurr].msg_iovlen == 0) { /* Finished writing the current msg; advance to the next. */ c->msgcurr++; } if (c->msgcurr < c->msgused) { ssize_t res; struct msghdr *m = &c->msglist[c->msgcurr]; res = sendmsg(c->sfd, m, 0); if (res > 0) { pthread_mutex_lock(&c->thread->stats.mutex); c->thread->stats.bytes_written += res; pthread_mutex_unlock(&c->thread->stats.mutex); /* We've written some of the data. Remove the completed iovec entries from the list of pending writes. */ while (m->msg_iovlen > 0 && res >= m->msg_iov->iov_len) { res -= m->msg_iov->iov_len; m->msg_iovlen--; m->msg_iov++; } /* Might have written just part of the last iovec entry; adjust it so the next write will do the rest. */ if (res > 0) { m->msg_iov->iov_base = (caddr_t)m->msg_iov->iov_base + res; m->msg_iov->iov_len -= res; } return TRANSMIT_INCOMPLETE; } if (res == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)) { if (!update_event(c, EV_WRITE | EV_PERSIST)) { if (settings.verbose > 0) fprintf(stderr, "Couldn't update event\n"); conn_set_state(c, conn_closing); return TRANSMIT_HARD_ERROR; } return TRANSMIT_SOFT_ERROR; } /* if res == 0 or res == -1 and error is not EAGAIN or EWOULDBLOCK, we have a real error, on which we close the connection */ if (settings.verbose > 0) perror("Failed to write, and not due to blocking"); if (IS_UDP(c->transport)) conn_set_state(c, conn_read); else conn_set_state(c, conn_closing); return TRANSMIT_HARD_ERROR; } else { return TRANSMIT_COMPLETE; } } static void drive_machine(conn *c) { bool stop = false; int sfd, flags = 1; socklen_t addrlen; struct sockaddr_storage addr; int nreqs = settings.reqs_per_event; int res; assert(c != NULL); while (!stop) { switch(c->state) { case conn_listening: addrlen = sizeof(addr); if ((sfd = accept(c->sfd, (struct sockaddr *)&addr, &addrlen)) == -1) { if (errno == EAGAIN || errno == EWOULDBLOCK) { /* these are transient, so don't log anything */ stop = true; } else if (errno == EMFILE) { if (settings.verbose > 0) fprintf(stderr, "Too many open connections\n"); accept_new_conns(false); stop = true; } else { perror("accept()"); stop = true; } break; } if ((flags = fcntl(sfd, F_GETFL, 0)) < 0 || fcntl(sfd, F_SETFL, flags | O_NONBLOCK) < 0) { perror("setting O_NONBLOCK"); close(sfd); break; } dispatch_conn_new(sfd, conn_new_cmd, EV_READ | EV_PERSIST, DATA_BUFFER_SIZE, tcp_transport); stop = true; break; case conn_waiting: if (!update_event(c, EV_READ | EV_PERSIST)) { if (settings.verbose > 0) fprintf(stderr, "Couldn't update event\n"); conn_set_state(c, conn_closing); break; } conn_set_state(c, conn_read); stop = true; break; case conn_read: res = IS_UDP(c->transport) ? try_read_udp(c) : try_read_network(c); switch (res) { case READ_NO_DATA_RECEIVED: conn_set_state(c, conn_waiting); break; case READ_DATA_RECEIVED: conn_set_state(c, conn_parse_cmd); break; case READ_ERROR: conn_set_state(c, conn_closing); break; case READ_MEMORY_ERROR: /* Failed to allocate more memory */ /* State already set by try_read_network */ break; } break; case conn_parse_cmd : if (try_read_command(c) == 0) { /* wee need more data! */ conn_set_state(c, conn_waiting); } break; case conn_new_cmd: /* Only process nreqs at a time to avoid starving other connections */ --nreqs; if (nreqs >= 0) { reset_cmd_handler(c); } else { pthread_mutex_lock(&c->thread->stats.mutex); c->thread->stats.conn_yields++; pthread_mutex_unlock(&c->thread->stats.mutex); if (c->rbytes > 0) { /* We have already read in data into the input buffer, so libevent will most likely not signal read events on the socket (unless more data is available. As a hack we should just put in a request to write data, because that should be possible ;-) */ if (!update_event(c, EV_WRITE | EV_PERSIST)) { if (settings.verbose > 0) fprintf(stderr, "Couldn't update event\n"); conn_set_state(c, conn_closing); } } stop = true; } break; case conn_nread: if (c->rlbytes == 0) { complete_nread(c); break; } /* first check if we have leftovers in the conn_read buffer */ if (c->rbytes > 0) { int tocopy = c->rbytes > c->rlbytes ? c->rlbytes : c->rbytes; if (c->ritem != c->rcurr) { memmove(c->ritem, c->rcurr, tocopy); } c->ritem += tocopy; c->rlbytes -= tocopy; c->rcurr += tocopy; c->rbytes -= tocopy; if (c->rlbytes == 0) { break; } } /* now try reading from the socket */ res = read(c->sfd, c->ritem, c->rlbytes); if (res > 0) { pthread_mutex_lock(&c->thread->stats.mutex); c->thread->stats.bytes_read += res; pthread_mutex_unlock(&c->thread->stats.mutex); if (c->rcurr == c->ritem) { c->rcurr += res; } c->ritem += res; c->rlbytes -= res; break; } if (res == 0) { /* end of stream */ conn_set_state(c, conn_closing); break; } if (res == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)) { if (!update_event(c, EV_READ | EV_PERSIST)) { if (settings.verbose > 0) fprintf(stderr, "Couldn't update event\n"); conn_set_state(c, conn_closing); break; } stop = true; break; } /* otherwise we have a real error, on which we close the connection */ if (settings.verbose > 0) { fprintf(stderr, "Failed to read, and not due to blocking:\n" "errno: %d %s \n" "rcurr=%lx ritem=%lx rbuf=%lx rlbytes=%d rsize=%d\n", errno, strerror(errno), (long)c->rcurr, (long)c->ritem, (long)c->rbuf, (int)c->rlbytes, (int)c->rsize); } conn_set_state(c, conn_closing); break; case conn_swallow: /* we are reading sbytes and throwing them away */ if (c->sbytes == 0) { conn_set_state(c, conn_new_cmd); break; } /* first check if we have leftovers in the conn_read buffer */ if (c->rbytes > 0) { int tocopy = c->rbytes > c->sbytes ? c->sbytes : c->rbytes; c->sbytes -= tocopy; c->rcurr += tocopy; c->rbytes -= tocopy; break; } /* now try reading from the socket */ res = read(c->sfd, c->rbuf, c->rsize > c->sbytes ? c->sbytes : c->rsize); if (res > 0) { pthread_mutex_lock(&c->thread->stats.mutex); c->thread->stats.bytes_read += res; pthread_mutex_unlock(&c->thread->stats.mutex); c->sbytes -= res; break; } if (res == 0) { /* end of stream */ conn_set_state(c, conn_closing); break; } if (res == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)) { if (!update_event(c, EV_READ | EV_PERSIST)) { if (settings.verbose > 0) fprintf(stderr, "Couldn't update event\n"); conn_set_state(c, conn_closing); break; } stop = true; break; } /* otherwise we have a real error, on which we close the connection */ if (settings.verbose > 0) fprintf(stderr, "Failed to read, and not due to blocking\n"); conn_set_state(c, conn_closing); break; case conn_write: /* * We want to write out a simple response. If we haven't already, * assemble it into a msgbuf list (this will be a single-entry * list for TCP or a two-entry list for UDP). */ if (c->iovused == 0 || (IS_UDP(c->transport) && c->iovused == 1)) { if (add_iov(c, c->wcurr, c->wbytes) != 0) { if (settings.verbose > 0) fprintf(stderr, "Couldn't build response\n"); conn_set_state(c, conn_closing); break; } } /* fall through... */ case conn_mwrite: if (IS_UDP(c->transport) && c->msgcurr == 0 && build_udp_headers(c) != 0) { if (settings.verbose > 0) fprintf(stderr, "Failed to build UDP headers\n"); conn_set_state(c, conn_closing); break; } switch (transmit(c)) { case TRANSMIT_COMPLETE: if (c->state == conn_mwrite) { while (c->ileft > 0) { item *it = *(c->icurr); assert((it->it_flags & ITEM_SLABBED) == 0); item_remove(it); c->icurr++; c->ileft--; } while (c->suffixleft > 0) { char *suffix = *(c->suffixcurr); cache_free(c->thread->suffix_cache, suffix); c->suffixcurr++; c->suffixleft--; } /* XXX: I don't know why this wasn't the general case */ if(c->protocol == binary_prot) { conn_set_state(c, c->write_and_go); } else { conn_set_state(c, conn_new_cmd); } } else if (c->state == conn_write) { if (c->write_and_free) { free(c->write_and_free); c->write_and_free = 0; } conn_set_state(c, c->write_and_go); } else { if (settings.verbose > 0) fprintf(stderr, "Unexpected state %d\n", c->state); conn_set_state(c, conn_closing); } break; case TRANSMIT_INCOMPLETE: case TRANSMIT_HARD_ERROR: break; /* Continue in state machine. */ case TRANSMIT_SOFT_ERROR: stop = true; break; } break; case conn_closing: if (IS_UDP(c->transport)) conn_cleanup(c); else conn_close(c); stop = true; break; case conn_max_state: assert(false); break; } } return; } void event_handler(const int fd, const short which, void *arg) { conn *c; c = (conn *)arg; assert(c != NULL); c->which = which; /* sanity */ if (fd != c->sfd) { if (settings.verbose > 0) fprintf(stderr, "Catastrophic: event fd doesn't match conn fd!\n"); conn_close(c); return; } drive_machine(c); /* wait for next event */ return; } static int new_socket(struct addrinfo *ai) { int sfd; int flags; if ((sfd = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol)) == -1) { return -1; } if ((flags = fcntl(sfd, F_GETFL, 0)) < 0 || fcntl(sfd, F_SETFL, flags | O_NONBLOCK) < 0) { perror("setting O_NONBLOCK"); close(sfd); return -1; } return sfd; } /* * Sets a socket's send buffer size to the maximum allowed by the system. */ static void maximize_sndbuf(const int sfd) { socklen_t intsize = sizeof(int); int last_good = 0; int min, max, avg; int old_size; /* Start with the default size. */ if (getsockopt(sfd, SOL_SOCKET, SO_SNDBUF, &old_size, &intsize) != 0) { if (settings.verbose > 0) perror("getsockopt(SO_SNDBUF)"); return; } /* Binary-search for the real maximum. */ min = old_size; max = MAX_SENDBUF_SIZE; while (min <= max) { avg = ((unsigned int)(min + max)) / 2; if (setsockopt(sfd, SOL_SOCKET, SO_SNDBUF, (void *)&avg, intsize) == 0) { last_good = avg; min = avg + 1; } else { max = avg - 1; } } if (settings.verbose > 1) fprintf(stderr, "<%d send buffer was %d, now %d\n", sfd, old_size, last_good); } /** * Create a socket and bind it to a specific port number * @param port the port number to bind to * @param transport the transport protocol (TCP / UDP) * @param portnumber_file A filepointer to write the port numbers to * when they are successfully added to the list of ports we * listen on. */ static int server_socket(int port, enum network_transport transport, FILE *portnumber_file) { int sfd; struct linger ling = {0, 0}; struct addrinfo *ai; struct addrinfo *next; struct addrinfo hints = { .ai_flags = AI_PASSIVE, .ai_family = AF_UNSPEC }; char port_buf[NI_MAXSERV]; int error; int success = 0; int flags =1; hints.ai_socktype = IS_UDP(transport) ? SOCK_DGRAM : SOCK_STREAM; if (port == -1) { port = 0; } snprintf(port_buf, sizeof(port_buf), "%d", port); error= getaddrinfo(settings.inter, port_buf, &hints, &ai); if (error != 0) { if (error != EAI_SYSTEM) fprintf(stderr, "getaddrinfo(): %s\n", gai_strerror(error)); else perror("getaddrinfo()"); return 1; } for (next= ai; next; next= next->ai_next) { conn *listen_conn_add; if ((sfd = new_socket(next)) == -1) { /* getaddrinfo can return "junk" addresses, * we make sure at least one works before erroring. */ continue; } #ifdef IPV6_V6ONLY if (next->ai_family == AF_INET6) { error = setsockopt(sfd, IPPROTO_IPV6, IPV6_V6ONLY, (char *) &flags, sizeof(flags)); if (error != 0) { perror("setsockopt"); close(sfd); continue; } } #endif setsockopt(sfd, SOL_SOCKET, SO_REUSEADDR, (void *)&flags, sizeof(flags)); if (IS_UDP(transport)) { maximize_sndbuf(sfd); } else { error = setsockopt(sfd, SOL_SOCKET, SO_KEEPALIVE, (void *)&flags, sizeof(flags)); if (error != 0) perror("setsockopt"); error = setsockopt(sfd, SOL_SOCKET, SO_LINGER, (void *)&ling, sizeof(ling)); if (error != 0) perror("setsockopt"); error = setsockopt(sfd, IPPROTO_TCP, TCP_NODELAY, (void *)&flags, sizeof(flags)); if (error != 0) perror("setsockopt"); } if (bind(sfd, next->ai_addr, next->ai_addrlen) == -1) { if (errno != EADDRINUSE) { perror("bind()"); close(sfd); freeaddrinfo(ai); return 1; } close(sfd); continue; } else { success++; if (!IS_UDP(transport) && listen(sfd, settings.backlog) == -1) { perror("listen()"); close(sfd); freeaddrinfo(ai); return 1; } if (portnumber_file != NULL && (next->ai_addr->sa_family == AF_INET || next->ai_addr->sa_family == AF_INET6)) { union { struct sockaddr_in in; struct sockaddr_in6 in6; } my_sockaddr; socklen_t len = sizeof(my_sockaddr); if (getsockname(sfd, (struct sockaddr*)&my_sockaddr, &len)==0) { if (next->ai_addr->sa_family == AF_INET) { fprintf(portnumber_file, "%s INET: %u\n", IS_UDP(transport) ? "UDP" : "TCP", ntohs(my_sockaddr.in.sin_port)); } else { fprintf(portnumber_file, "%s INET6: %u\n", IS_UDP(transport) ? "UDP" : "TCP", ntohs(my_sockaddr.in6.sin6_port)); } } } } if (IS_UDP(transport)) { int c; for (c = 0; c < settings.num_threads; c++) { /* this is guaranteed to hit all threads because we round-robin */ dispatch_conn_new(sfd, conn_read, EV_READ | EV_PERSIST, UDP_READ_BUFFER_SIZE, transport); } } else { if (!(listen_conn_add = conn_new(sfd, conn_listening, EV_READ | EV_PERSIST, 1, transport, main_base))) { fprintf(stderr, "failed to create listening connection\n"); exit(EXIT_FAILURE); } listen_conn_add->next = listen_conn; listen_conn = listen_conn_add; } } freeaddrinfo(ai); /* Return zero iff we detected no errors in starting up connections */ return success == 0; } static int new_socket_unix(void) { int sfd; int flags; if ((sfd = socket(AF_UNIX, SOCK_STREAM, 0)) == -1) { perror("socket()"); return -1; } if ((flags = fcntl(sfd, F_GETFL, 0)) < 0 || fcntl(sfd, F_SETFL, flags | O_NONBLOCK) < 0) { perror("setting O_NONBLOCK"); close(sfd); return -1; } return sfd; } static int server_socket_unix(const char *path, int access_mask) { int sfd; struct linger ling = {0, 0}; struct sockaddr_un addr; struct stat tstat; int flags =1; int old_umask; if (!path) { return 1; } if ((sfd = new_socket_unix()) == -1) { return 1; } /* * Clean up a previous socket file if we left it around */ if (lstat(path, &tstat) == 0) { if (S_ISSOCK(tstat.st_mode)) unlink(path); } setsockopt(sfd, SOL_SOCKET, SO_REUSEADDR, (void *)&flags, sizeof(flags)); setsockopt(sfd, SOL_SOCKET, SO_KEEPALIVE, (void *)&flags, sizeof(flags)); setsockopt(sfd, SOL_SOCKET, SO_LINGER, (void *)&ling, sizeof(ling)); /* * the memset call clears nonstandard fields in some impementations * that otherwise mess things up. */ memset(&addr, 0, sizeof(addr)); addr.sun_family = AF_UNIX; strncpy(addr.sun_path, path, sizeof(addr.sun_path) - 1); assert(strcmp(addr.sun_path, path) == 0); old_umask = umask( ~(access_mask&0777)); if (bind(sfd, (struct sockaddr *)&addr, sizeof(addr)) == -1) { perror("bind()"); close(sfd); umask(old_umask); return 1; } umask(old_umask); if (listen(sfd, settings.backlog) == -1) { perror("listen()"); close(sfd); return 1; } if (!(listen_conn = conn_new(sfd, conn_listening, EV_READ | EV_PERSIST, 1, local_transport, main_base))) { fprintf(stderr, "failed to create listening connection\n"); exit(EXIT_FAILURE); } return 0; } /* * We keep the current time of day in a global variable that's updated by a * timer event. This saves us a bunch of time() system calls (we really only * need to get the time once a second, whereas there can be tens of thousands * of requests a second) and allows us to use server-start-relative timestamps * rather than absolute UNIX timestamps, a space savings on systems where * sizeof(time_t) > sizeof(unsigned int). */ volatile rel_time_t current_time; static struct event clockevent; /* time-sensitive callers can call it by hand with this, outside the normal ever-1-second timer */ static void set_current_time(void) { struct timeval timer; gettimeofday(&timer, NULL); current_time = (rel_time_t) (timer.tv_sec - process_started); } static void clock_handler(const int fd, const short which, void *arg) { struct timeval t = {.tv_sec = 1, .tv_usec = 0}; static bool initialized = false; if (initialized) { /* only delete the event if it's actually there. */ evtimer_del(&clockevent); } else { initialized = true; } evtimer_set(&clockevent, clock_handler, 0); event_base_set(main_base, &clockevent); evtimer_add(&clockevent, &t); set_current_time(); } static void usage(void) { printf(PACKAGE " " VERSION "\n"); printf("-p <num> TCP port number to listen on (default: 11211)\n" "-U <num> UDP port number to listen on (default: 11211, 0 is off)\n" "-s <file> UNIX socket path to listen on (disables network support)\n" "-a <mask> access mask for UNIX socket, in octal (default: 0700)\n" "-l <ip_addr> interface to listen on (default: INADDR_ANY, all addresses)\n" "-d run as a daemon\n" "-r maximize core file limit\n" "-u <username> assume identity of <username> (only when run as root)\n" "-m <num> max memory to use for items in megabytes (default: 64 MB)\n" "-M return error on memory exhausted (rather than removing items)\n" "-c <num> max simultaneous connections (default: 1024)\n" "-k lock down all paged memory. Note that there is a\n" " limit on how much memory you may lock. Trying to\n" " allocate more than that would fail, so be sure you\n" " set the limit correctly for the user you started\n" " the daemon with (not for -u <username> user;\n" " under sh this is done with 'ulimit -S -l NUM_KB').\n" "-v verbose (print errors/warnings while in event loop)\n" "-vv very verbose (also print client commands/reponses)\n" "-vvv extremely verbose (also print internal state transitions)\n" "-h print this help and exit\n" "-i print memcached and libevent license\n" "-P <file> save PID in <file>, only used with -d option\n" "-f <factor> chunk size growth factor (default: 1.25)\n" "-n <bytes> minimum space allocated for key+value+flags (default: 48)\n"); printf("-L Try to use large memory pages (if available). Increasing\n" " the memory page size could reduce the number of TLB misses\n" " and improve the performance. In order to get large pages\n" " from the OS, memcached will allocate the total item-cache\n" " in one large chunk.\n"); printf("-D <char> Use <char> as the delimiter between key prefixes and IDs.\n" " This is used for per-prefix stats reporting. The default is\n" " \":\" (colon). If this option is specified, stats collection\n" " is turned on automatically; if not, then it may be turned on\n" " by sending the \"stats detail on\" command to the server.\n"); printf("-t <num> number of threads to use (default: 4)\n"); printf("-R Maximum number of requests per event, limits the number of\n" " requests process for a given connection to prevent \n" " starvation (default: 20)\n"); printf("-C Disable use of CAS\n"); printf("-b Set the backlog queue limit (default: 1024)\n"); printf("-B Binding protocol - one of ascii, binary, or auto (default)\n"); printf("-I Override the size of each slab page. Adjusts max item size\n" " (default: 1mb, min: 1k, max: 128m)\n"); #ifdef ENABLE_SASL printf("-S Turn on Sasl authentication\n"); #endif return; } static void usage_license(void) { printf(PACKAGE " " VERSION "\n\n"); printf( "Copyright (c) 2003, Danga Interactive, Inc. <http://www.danga.com/>\n" "All rights reserved.\n" "\n" "Redistribution and use in source and binary forms, with or without\n" "modification, are permitted provided that the following conditions are\n" "met:\n" "\n" " * Redistributions of source code must retain the above copyright\n" "notice, this list of conditions and the following disclaimer.\n" "\n" " * Redistributions in binary form must reproduce the above\n" "copyright notice, this list of conditions and the following disclaimer\n" "in the documentation and/or other materials provided with the\n" "distribution.\n" "\n" " * Neither the name of the Danga Interactive nor the names of its\n" "contributors may be used to endorse or promote products derived from\n" "this software without specific prior written permission.\n" "\n" "THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n" "\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n" "LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n" "A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n" "OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n" "SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n" "LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n" "DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n" "THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n" "(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n" "OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n" "\n" "\n" "This product includes software developed by Niels Provos.\n" "\n" "[ libevent ]\n" "\n" "Copyright 2000-2003 Niels Provos <provos@citi.umich.edu>\n" "All rights reserved.\n" "\n" "Redistribution and use in source and binary forms, with or without\n" "modification, are permitted provided that the following conditions\n" "are met:\n" "1. Redistributions of source code must retain the above copyright\n" " notice, this list of conditions and the following disclaimer.\n" "2. Redistributions in binary form must reproduce the above copyright\n" " notice, this list of conditions and the following disclaimer in the\n" " documentation and/or other materials provided with the distribution.\n" "3. All advertising materials mentioning features or use of this software\n" " must display the following acknowledgement:\n" " This product includes software developed by Niels Provos.\n" "4. The name of the author may not be used to endorse or promote products\n" " derived from this software without specific prior written permission.\n" "\n" "THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n" "IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES\n" "OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n" "IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,\n" "INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n" "NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n" "DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n" "THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n" "(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF\n" "THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n" ); return; } static void save_pid(const pid_t pid, const char *pid_file) { FILE *fp; if (pid_file == NULL) return; if ((fp = fopen(pid_file, "w")) == NULL) { fprintf(stderr, "Could not open the pid file %s for writing\n", pid_file); return; } fprintf(fp,"%ld\n", (long)pid); if (fclose(fp) == -1) { fprintf(stderr, "Could not close the pid file %s.\n", pid_file); return; } } static void remove_pidfile(const char *pid_file) { if (pid_file == NULL) return; if (unlink(pid_file) != 0) { fprintf(stderr, "Could not remove the pid file %s.\n", pid_file); } } static void sig_handler(const int sig) { printf("SIGINT handled.\n"); exit(EXIT_SUCCESS); } #ifndef HAVE_SIGIGNORE static int sigignore(int sig) { struct sigaction sa = { .sa_handler = SIG_IGN, .sa_flags = 0 }; if (sigemptyset(&sa.sa_mask) == -1 || sigaction(sig, &sa, 0) == -1) { return -1; } return 0; } #endif /* * On systems that supports multiple page sizes we may reduce the * number of TLB-misses by using the biggest available page size */ static int enable_large_pages(void) { #if defined(HAVE_GETPAGESIZES) && defined(HAVE_MEMCNTL) int ret = -1; size_t sizes[32]; int avail = getpagesizes(sizes, 32); if (avail != -1) { size_t max = sizes[0]; struct memcntl_mha arg = {0}; int ii; for (ii = 1; ii < avail; ++ii) { if (max < sizes[ii]) { max = sizes[ii]; } } arg.mha_flags = 0; arg.mha_pagesize = max; arg.mha_cmd = MHA_MAPSIZE_BSSBRK; if (memcntl(0, 0, MC_HAT_ADVISE, (caddr_t)&arg, 0, 0) == -1) { fprintf(stderr, "Failed to set large pages: %s\n", strerror(errno)); fprintf(stderr, "Will use default page size\n"); } else { ret = 0; } } else { fprintf(stderr, "Failed to get supported pagesizes: %s\n", strerror(errno)); fprintf(stderr, "Will use default page size\n"); } return ret; #else return 0; #endif } int main (int argc, char **argv) { int c; bool lock_memory = false; bool do_daemonize = false; bool preallocate = false; int maxcore = 0; char *username = NULL; char *pid_file = NULL; struct passwd *pw; struct rlimit rlim; char unit = '\0'; int size_max = 0; /* listening sockets */ static int *l_socket = NULL; /* udp socket */ static int *u_socket = NULL; bool protocol_specified = false; /* handle SIGINT */ signal(SIGINT, sig_handler); /* init settings */ settings_init(); /* set stderr non-buffering (for running under, say, daemontools) */ setbuf(stderr, NULL); /* process arguments */ while (-1 != (c = getopt(argc, argv, "a:" /* access mask for unix socket */ "p:" /* TCP port number to listen on */ "s:" /* unix socket path to listen on */ "U:" /* UDP port number to listen on */ "m:" /* max memory to use for items in megabytes */ "M" /* return error on memory exhausted */ "c:" /* max simultaneous connections */ "k" /* lock down all paged memory */ "hi" /* help, licence info */ "r" /* maximize core file limit */ "v" /* verbose */ "d" /* daemon mode */ "l:" /* interface to listen on */ "u:" /* user identity to run as */ "P:" /* save PID in file */ "f:" /* factor? */ "n:" /* minimum space allocated for key+value+flags */ "t:" /* threads */ "D:" /* prefix delimiter? */ "L" /* Large memory pages */ "R:" /* max requests per event */ "C" /* Disable use of CAS */ "b:" /* backlog queue limit */ "B:" /* Binding protocol */ "I:" /* Max item size */ "S" /* Sasl ON */ ))) { switch (c) { case 'a': /* access for unix domain socket, as octal mask (like chmod)*/ settings.access= strtol(optarg,NULL,8); break; case 'U': settings.udpport = atoi(optarg); break; case 'p': settings.port = atoi(optarg); break; case 's': settings.socketpath = optarg; break; case 'm': settings.maxbytes = ((size_t)atoi(optarg)) * 1024 * 1024; break; case 'M': settings.evict_to_free = 0; break; case 'c': settings.maxconns = atoi(optarg); break; case 'h': usage(); exit(EXIT_SUCCESS); case 'i': usage_license(); exit(EXIT_SUCCESS); case 'k': lock_memory = true; break; case 'v': settings.verbose++; break; case 'l': settings.inter= strdup(optarg); break; case 'd': do_daemonize = true; break; case 'r': maxcore = 1; break; case 'R': settings.reqs_per_event = atoi(optarg); if (settings.reqs_per_event == 0) { fprintf(stderr, "Number of requests per event must be greater than 0\n"); return 1; } break; case 'u': username = optarg; break; case 'P': pid_file = optarg; break; case 'f': settings.factor = atof(optarg); if (settings.factor <= 1.0) { fprintf(stderr, "Factor must be greater than 1\n"); return 1; } break; case 'n': settings.chunk_size = atoi(optarg); if (settings.chunk_size == 0) { fprintf(stderr, "Chunk size must be greater than 0\n"); return 1; } break; case 't': settings.num_threads = atoi(optarg); if (settings.num_threads <= 0) { fprintf(stderr, "Number of threads must be greater than 0\n"); return 1; } /* There're other problems when you get above 64 threads. * In the future we should portably detect # of cores for the * default. */ if (settings.num_threads > 64) { fprintf(stderr, "WARNING: Setting a high number of worker" "threads is not recommended.\n" " Set this value to the number of cores in" " your machine or less.\n"); } break; case 'D': if (! optarg || ! optarg[0]) { fprintf(stderr, "No delimiter specified\n"); return 1; } settings.prefix_delimiter = optarg[0]; settings.detail_enabled = 1; break; case 'L' : if (enable_large_pages() == 0) { preallocate = true; } break; case 'C' : settings.use_cas = false; break; case 'b' : settings.backlog = atoi(optarg); break; case 'B': protocol_specified = true; if (strcmp(optarg, "auto") == 0) { settings.binding_protocol = negotiating_prot; } else if (strcmp(optarg, "binary") == 0) { settings.binding_protocol = binary_prot; } else if (strcmp(optarg, "ascii") == 0) { settings.binding_protocol = ascii_prot; } else { fprintf(stderr, "Invalid value for binding protocol: %s\n" " -- should be one of auto, binary, or ascii\n", optarg); exit(EX_USAGE); } break; case 'I': unit = optarg[strlen(optarg)-1]; if (unit == 'k' || unit == 'm' || unit == 'K' || unit == 'M') { optarg[strlen(optarg)-1] = '\0'; size_max = atoi(optarg); if (unit == 'k' || unit == 'K') size_max *= 1024; if (unit == 'm' || unit == 'M') size_max *= 1024 * 1024; settings.item_size_max = size_max; } else { settings.item_size_max = atoi(optarg); } if (settings.item_size_max < 1024) { fprintf(stderr, "Item max size cannot be less than 1024 bytes.\n"); return 1; } if (settings.item_size_max > 1024 * 1024 * 128) { fprintf(stderr, "Cannot set item size limit higher than 128 mb.\n"); return 1; } if (settings.item_size_max > 1024 * 1024) { fprintf(stderr, "WARNING: Setting item max size above 1MB is not" " recommended!\n" " Raising this limit increases the minimum memory requirements\n" " and will decrease your memory efficiency.\n" ); } break; case 'S': /* set Sasl authentication to true. Default is false */ #ifndef ENABLE_SASL fprintf(stderr, "This server is not built with SASL support.\n"); exit(EX_USAGE); #endif settings.sasl = true; break; default: fprintf(stderr, "Illegal argument \"%c\"\n", c); return 1; } } if (settings.sasl) { if (!protocol_specified) { settings.binding_protocol = binary_prot; } else { if (settings.binding_protocol != binary_prot) { fprintf(stderr, "WARNING: You shouldn't allow the ASCII protocol while using SASL\n"); exit(EX_USAGE); } } } if (maxcore != 0) { struct rlimit rlim_new; /* * First try raising to infinity; if that fails, try bringing * the soft limit to the hard. */ if (getrlimit(RLIMIT_CORE, &rlim) == 0) { rlim_new.rlim_cur = rlim_new.rlim_max = RLIM_INFINITY; if (setrlimit(RLIMIT_CORE, &rlim_new)!= 0) { /* failed. try raising just to the old max */ rlim_new.rlim_cur = rlim_new.rlim_max = rlim.rlim_max; (void)setrlimit(RLIMIT_CORE, &rlim_new); } } /* * getrlimit again to see what we ended up with. Only fail if * the soft limit ends up 0, because then no core files will be * created at all. */ if ((getrlimit(RLIMIT_CORE, &rlim) != 0) || rlim.rlim_cur == 0) { fprintf(stderr, "failed to ensure corefile creation\n"); exit(EX_OSERR); } } /* * If needed, increase rlimits to allow as many connections * as needed. */ if (getrlimit(RLIMIT_NOFILE, &rlim) != 0) { fprintf(stderr, "failed to getrlimit number of files\n"); exit(EX_OSERR); } else { int maxfiles = settings.maxconns; if (rlim.rlim_cur < maxfiles) rlim.rlim_cur = maxfiles; if (rlim.rlim_max < rlim.rlim_cur) rlim.rlim_max = rlim.rlim_cur; if (setrlimit(RLIMIT_NOFILE, &rlim) != 0) { fprintf(stderr, "failed to set rlimit for open files. Try running as root or requesting smaller maxconns value.\n"); exit(EX_OSERR); } } /* lose root privileges if we have them */ if (getuid() == 0 || geteuid() == 0) { if (username == 0 || *username == '\0') { fprintf(stderr, "can't run as root without the -u switch\n"); exit(EX_USAGE); } if ((pw = getpwnam(username)) == 0) { fprintf(stderr, "can't find the user %s to switch to\n", username); exit(EX_NOUSER); } if (setgid(pw->pw_gid) < 0 || setuid(pw->pw_uid) < 0) { fprintf(stderr, "failed to assume identity of user %s\n", username); exit(EX_OSERR); } } /* Initialize Sasl if -S was specified */ if (settings.sasl) { init_sasl(); } /* daemonize if requested */ /* if we want to ensure our ability to dump core, don't chdir to / */ if (do_daemonize) { if (sigignore(SIGHUP) == -1) { perror("Failed to ignore SIGHUP"); } if (daemonize(maxcore, settings.verbose) == -1) { fprintf(stderr, "failed to daemon() in order to daemonize\n"); exit(EXIT_FAILURE); } } /* lock paged memory if needed */ if (lock_memory) { #ifdef HAVE_MLOCKALL int res = mlockall(MCL_CURRENT | MCL_FUTURE); if (res != 0) { fprintf(stderr, "warning: -k invalid, mlockall() failed: %s\n", strerror(errno)); } #else fprintf(stderr, "warning: -k invalid, mlockall() not supported on this platform. proceeding without.\n"); #endif } /* initialize main thread libevent instance */ main_base = event_init(); /* initialize other stuff */ stats_init(); assoc_init(); conn_init(); slabs_init(settings.maxbytes, settings.factor, preallocate); /* * ignore SIGPIPE signals; we can use errno == EPIPE if we * need that information */ if (sigignore(SIGPIPE) == -1) { perror("failed to ignore SIGPIPE; sigaction"); exit(EX_OSERR); } /* start up worker threads if MT mode */ thread_init(settings.num_threads, main_base); /* save the PID in if we're a daemon, do this after thread_init due to a file descriptor handling bug somewhere in libevent */ if (start_assoc_maintenance_thread() == -1) { exit(EXIT_FAILURE); } if (do_daemonize) save_pid(getpid(), pid_file); /* initialise clock event */ clock_handler(0, 0, 0); /* create unix mode sockets after dropping privileges */ if (settings.socketpath != NULL) { errno = 0; if (server_socket_unix(settings.socketpath,settings.access)) { vperror("failed to listen on UNIX socket: %s", settings.socketpath); exit(EX_OSERR); } } /* create the listening socket, bind it, and init */ if (settings.socketpath == NULL) { int udp_port; const char *portnumber_filename = getenv("MEMCACHED_PORT_FILENAME"); char temp_portnumber_filename[PATH_MAX]; FILE *portnumber_file = NULL; if (portnumber_filename != NULL) { snprintf(temp_portnumber_filename, sizeof(temp_portnumber_filename), "%s.lck", portnumber_filename); portnumber_file = fopen(temp_portnumber_filename, "a"); if (portnumber_file == NULL) { fprintf(stderr, "Failed to open \"%s\": %s\n", temp_portnumber_filename, strerror(errno)); } } errno = 0; if (settings.port && server_socket(settings.port, tcp_transport, portnumber_file)) { vperror("failed to listen on TCP port %d", settings.port); exit(EX_OSERR); } /* * initialization order: first create the listening sockets * (may need root on low ports), then drop root if needed, * then daemonise if needed, then init libevent (in some cases * descriptors created by libevent wouldn't survive forking). */ udp_port = settings.udpport ? settings.udpport : settings.port; /* create the UDP listening socket and bind it */ errno = 0; if (settings.udpport && server_socket(settings.udpport, udp_transport, portnumber_file)) { vperror("failed to listen on UDP port %d", settings.udpport); exit(EX_OSERR); } if (portnumber_file) { fclose(portnumber_file); rename(temp_portnumber_filename, portnumber_filename); } } /* Drop privileges no longer needed */ drop_privileges(); /* enter the event loop */ event_base_loop(main_base, 0); stop_assoc_maintenance_thread(); /* remove the PID file if we're a daemon */ if (do_daemonize) remove_pidfile(pid_file); /* Clean up strdup() call for bind() address */ if (settings.inter) free(settings.inter); if (l_socket) free(l_socket); if (u_socket) free(u_socket); return EXIT_SUCCESS; }
./CrossVul/dataset_final_sorted/CWE-20/c/bad_4699_0
crossvul-cpp_data_bad_2589_0
/* * RTMP input format * Copyright (c) 2009 Konstantin Shishkov * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavcodec/bytestream.h" #include "libavutil/avstring.h" #include "libavutil/intfloat.h" #include "avformat.h" #include "rtmppkt.h" #include "flv.h" #include "url.h" void ff_amf_write_bool(uint8_t **dst, int val) { bytestream_put_byte(dst, AMF_DATA_TYPE_BOOL); bytestream_put_byte(dst, val); } void ff_amf_write_number(uint8_t **dst, double val) { bytestream_put_byte(dst, AMF_DATA_TYPE_NUMBER); bytestream_put_be64(dst, av_double2int(val)); } void ff_amf_write_string(uint8_t **dst, const char *str) { bytestream_put_byte(dst, AMF_DATA_TYPE_STRING); bytestream_put_be16(dst, strlen(str)); bytestream_put_buffer(dst, str, strlen(str)); } void ff_amf_write_string2(uint8_t **dst, const char *str1, const char *str2) { int len1 = 0, len2 = 0; if (str1) len1 = strlen(str1); if (str2) len2 = strlen(str2); bytestream_put_byte(dst, AMF_DATA_TYPE_STRING); bytestream_put_be16(dst, len1 + len2); bytestream_put_buffer(dst, str1, len1); bytestream_put_buffer(dst, str2, len2); } void ff_amf_write_null(uint8_t **dst) { bytestream_put_byte(dst, AMF_DATA_TYPE_NULL); } void ff_amf_write_object_start(uint8_t **dst) { bytestream_put_byte(dst, AMF_DATA_TYPE_OBJECT); } void ff_amf_write_field_name(uint8_t **dst, const char *str) { bytestream_put_be16(dst, strlen(str)); bytestream_put_buffer(dst, str, strlen(str)); } void ff_amf_write_object_end(uint8_t **dst) { /* first two bytes are field name length = 0, * AMF object should end with it and end marker */ bytestream_put_be24(dst, AMF_DATA_TYPE_OBJECT_END); } int ff_amf_read_bool(GetByteContext *bc, int *val) { if (bytestream2_get_byte(bc) != AMF_DATA_TYPE_BOOL) return AVERROR_INVALIDDATA; *val = bytestream2_get_byte(bc); return 0; } int ff_amf_read_number(GetByteContext *bc, double *val) { uint64_t read; if (bytestream2_get_byte(bc) != AMF_DATA_TYPE_NUMBER) return AVERROR_INVALIDDATA; read = bytestream2_get_be64(bc); *val = av_int2double(read); return 0; } int ff_amf_get_string(GetByteContext *bc, uint8_t *str, int strsize, int *length) { int stringlen = 0; int readsize; stringlen = bytestream2_get_be16(bc); if (stringlen + 1 > strsize) return AVERROR(EINVAL); readsize = bytestream2_get_buffer(bc, str, stringlen); if (readsize != stringlen) { av_log(NULL, AV_LOG_WARNING, "Unable to read as many bytes as AMF string signaled\n"); } str[readsize] = '\0'; *length = FFMIN(stringlen, readsize); return 0; } int ff_amf_read_string(GetByteContext *bc, uint8_t *str, int strsize, int *length) { if (bytestream2_get_byte(bc) != AMF_DATA_TYPE_STRING) return AVERROR_INVALIDDATA; return ff_amf_get_string(bc, str, strsize, length); } int ff_amf_read_null(GetByteContext *bc) { if (bytestream2_get_byte(bc) != AMF_DATA_TYPE_NULL) return AVERROR_INVALIDDATA; return 0; } int ff_rtmp_check_alloc_array(RTMPPacket **prev_pkt, int *nb_prev_pkt, int channel) { int nb_alloc; RTMPPacket *ptr; if (channel < *nb_prev_pkt) return 0; nb_alloc = channel + 16; // This can't use the av_reallocp family of functions, since we // would need to free each element in the array before the array // itself is freed. ptr = av_realloc_array(*prev_pkt, nb_alloc, sizeof(**prev_pkt)); if (!ptr) return AVERROR(ENOMEM); memset(ptr + *nb_prev_pkt, 0, (nb_alloc - *nb_prev_pkt) * sizeof(*ptr)); *prev_pkt = ptr; *nb_prev_pkt = nb_alloc; return 0; } int ff_rtmp_packet_read(URLContext *h, RTMPPacket *p, int chunk_size, RTMPPacket **prev_pkt, int *nb_prev_pkt) { uint8_t hdr; if (ffurl_read(h, &hdr, 1) != 1) return AVERROR(EIO); return ff_rtmp_packet_read_internal(h, p, chunk_size, prev_pkt, nb_prev_pkt, hdr); } static int rtmp_packet_read_one_chunk(URLContext *h, RTMPPacket *p, int chunk_size, RTMPPacket **prev_pkt_ptr, int *nb_prev_pkt, uint8_t hdr) { uint8_t buf[16]; int channel_id, timestamp, size; uint32_t ts_field; // non-extended timestamp or delta field uint32_t extra = 0; enum RTMPPacketType type; int written = 0; int ret, toread; RTMPPacket *prev_pkt; written++; channel_id = hdr & 0x3F; if (channel_id < 2) { //special case for channel number >= 64 buf[1] = 0; if (ffurl_read_complete(h, buf, channel_id + 1) != channel_id + 1) return AVERROR(EIO); written += channel_id + 1; channel_id = AV_RL16(buf) + 64; } if ((ret = ff_rtmp_check_alloc_array(prev_pkt_ptr, nb_prev_pkt, channel_id)) < 0) return ret; prev_pkt = *prev_pkt_ptr; size = prev_pkt[channel_id].size; type = prev_pkt[channel_id].type; extra = prev_pkt[channel_id].extra; hdr >>= 6; // header size indicator if (hdr == RTMP_PS_ONEBYTE) { ts_field = prev_pkt[channel_id].ts_field; } else { if (ffurl_read_complete(h, buf, 3) != 3) return AVERROR(EIO); written += 3; ts_field = AV_RB24(buf); if (hdr != RTMP_PS_FOURBYTES) { if (ffurl_read_complete(h, buf, 3) != 3) return AVERROR(EIO); written += 3; size = AV_RB24(buf); if (ffurl_read_complete(h, buf, 1) != 1) return AVERROR(EIO); written++; type = buf[0]; if (hdr == RTMP_PS_TWELVEBYTES) { if (ffurl_read_complete(h, buf, 4) != 4) return AVERROR(EIO); written += 4; extra = AV_RL32(buf); } } } if (ts_field == 0xFFFFFF) { if (ffurl_read_complete(h, buf, 4) != 4) return AVERROR(EIO); timestamp = AV_RB32(buf); } else { timestamp = ts_field; } if (hdr != RTMP_PS_TWELVEBYTES) timestamp += prev_pkt[channel_id].timestamp; if (prev_pkt[channel_id].read && size != prev_pkt[channel_id].size) { av_log(h, AV_LOG_ERROR, "RTMP packet size mismatch %d != %d\n", size, prev_pkt[channel_id].size); ff_rtmp_packet_destroy(&prev_pkt[channel_id]); prev_pkt[channel_id].read = 0; return AVERROR_INVALIDDATA; } if (!prev_pkt[channel_id].read) { if ((ret = ff_rtmp_packet_create(p, channel_id, type, timestamp, size)) < 0) return ret; p->read = written; p->offset = 0; prev_pkt[channel_id].ts_field = ts_field; prev_pkt[channel_id].timestamp = timestamp; } else { // previous packet in this channel hasn't completed reading RTMPPacket *prev = &prev_pkt[channel_id]; p->data = prev->data; p->size = prev->size; p->channel_id = prev->channel_id; p->type = prev->type; p->ts_field = prev->ts_field; p->extra = prev->extra; p->offset = prev->offset; p->read = prev->read + written; p->timestamp = prev->timestamp; prev->data = NULL; } p->extra = extra; // save history prev_pkt[channel_id].channel_id = channel_id; prev_pkt[channel_id].type = type; prev_pkt[channel_id].size = size; prev_pkt[channel_id].extra = extra; size = size - p->offset; toread = FFMIN(size, chunk_size); if (ffurl_read_complete(h, p->data + p->offset, toread) != toread) { ff_rtmp_packet_destroy(p); return AVERROR(EIO); } size -= toread; p->read += toread; p->offset += toread; if (size > 0) { RTMPPacket *prev = &prev_pkt[channel_id]; prev->data = p->data; prev->read = p->read; prev->offset = p->offset; p->data = NULL; return AVERROR(EAGAIN); } prev_pkt[channel_id].read = 0; // read complete; reset if needed return p->read; } int ff_rtmp_packet_read_internal(URLContext *h, RTMPPacket *p, int chunk_size, RTMPPacket **prev_pkt, int *nb_prev_pkt, uint8_t hdr) { while (1) { int ret = rtmp_packet_read_one_chunk(h, p, chunk_size, prev_pkt, nb_prev_pkt, hdr); if (ret > 0 || ret != AVERROR(EAGAIN)) return ret; if (ffurl_read(h, &hdr, 1) != 1) return AVERROR(EIO); } } int ff_rtmp_packet_write(URLContext *h, RTMPPacket *pkt, int chunk_size, RTMPPacket **prev_pkt_ptr, int *nb_prev_pkt) { uint8_t pkt_hdr[16], *p = pkt_hdr; int mode = RTMP_PS_TWELVEBYTES; int off = 0; int written = 0; int ret; RTMPPacket *prev_pkt; int use_delta; // flag if using timestamp delta, not RTMP_PS_TWELVEBYTES uint32_t timestamp; // full 32-bit timestamp or delta value if ((ret = ff_rtmp_check_alloc_array(prev_pkt_ptr, nb_prev_pkt, pkt->channel_id)) < 0) return ret; prev_pkt = *prev_pkt_ptr; //if channel_id = 0, this is first presentation of prev_pkt, send full hdr. use_delta = prev_pkt[pkt->channel_id].channel_id && pkt->extra == prev_pkt[pkt->channel_id].extra && pkt->timestamp >= prev_pkt[pkt->channel_id].timestamp; timestamp = pkt->timestamp; if (use_delta) { timestamp -= prev_pkt[pkt->channel_id].timestamp; } if (timestamp >= 0xFFFFFF) { pkt->ts_field = 0xFFFFFF; } else { pkt->ts_field = timestamp; } if (use_delta) { if (pkt->type == prev_pkt[pkt->channel_id].type && pkt->size == prev_pkt[pkt->channel_id].size) { mode = RTMP_PS_FOURBYTES; if (pkt->ts_field == prev_pkt[pkt->channel_id].ts_field) mode = RTMP_PS_ONEBYTE; } else { mode = RTMP_PS_EIGHTBYTES; } } if (pkt->channel_id < 64) { bytestream_put_byte(&p, pkt->channel_id | (mode << 6)); } else if (pkt->channel_id < 64 + 256) { bytestream_put_byte(&p, 0 | (mode << 6)); bytestream_put_byte(&p, pkt->channel_id - 64); } else { bytestream_put_byte(&p, 1 | (mode << 6)); bytestream_put_le16(&p, pkt->channel_id - 64); } if (mode != RTMP_PS_ONEBYTE) { bytestream_put_be24(&p, pkt->ts_field); if (mode != RTMP_PS_FOURBYTES) { bytestream_put_be24(&p, pkt->size); bytestream_put_byte(&p, pkt->type); if (mode == RTMP_PS_TWELVEBYTES) bytestream_put_le32(&p, pkt->extra); } } if (pkt->ts_field == 0xFFFFFF) bytestream_put_be32(&p, timestamp); // save history prev_pkt[pkt->channel_id].channel_id = pkt->channel_id; prev_pkt[pkt->channel_id].type = pkt->type; prev_pkt[pkt->channel_id].size = pkt->size; prev_pkt[pkt->channel_id].timestamp = pkt->timestamp; prev_pkt[pkt->channel_id].ts_field = pkt->ts_field; prev_pkt[pkt->channel_id].extra = pkt->extra; if ((ret = ffurl_write(h, pkt_hdr, p - pkt_hdr)) < 0) return ret; written = p - pkt_hdr + pkt->size; while (off < pkt->size) { int towrite = FFMIN(chunk_size, pkt->size - off); if ((ret = ffurl_write(h, pkt->data + off, towrite)) < 0) return ret; off += towrite; if (off < pkt->size) { uint8_t marker = 0xC0 | pkt->channel_id; if ((ret = ffurl_write(h, &marker, 1)) < 0) return ret; written++; if (pkt->ts_field == 0xFFFFFF) { uint8_t ts_header[4]; AV_WB32(ts_header, timestamp); if ((ret = ffurl_write(h, ts_header, 4)) < 0) return ret; written += 4; } } } return written; } int ff_rtmp_packet_create(RTMPPacket *pkt, int channel_id, RTMPPacketType type, int timestamp, int size) { if (size) { pkt->data = av_realloc(NULL, size); if (!pkt->data) return AVERROR(ENOMEM); } pkt->size = size; pkt->channel_id = channel_id; pkt->type = type; pkt->timestamp = timestamp; pkt->extra = 0; pkt->ts_field = 0; return 0; } void ff_rtmp_packet_destroy(RTMPPacket *pkt) { if (!pkt) return; av_freep(&pkt->data); pkt->size = 0; } static int amf_tag_skip(GetByteContext *gb) { AMFDataType type; unsigned nb = -1; int parse_key = 1; if (bytestream2_get_bytes_left(gb) < 1) return -1; type = bytestream2_get_byte(gb); switch (type) { case AMF_DATA_TYPE_NUMBER: bytestream2_get_be64(gb); return 0; case AMF_DATA_TYPE_BOOL: bytestream2_get_byte(gb); return 0; case AMF_DATA_TYPE_STRING: bytestream2_skip(gb, bytestream2_get_be16(gb)); return 0; case AMF_DATA_TYPE_LONG_STRING: bytestream2_skip(gb, bytestream2_get_be32(gb)); return 0; case AMF_DATA_TYPE_NULL: return 0; case AMF_DATA_TYPE_DATE: bytestream2_skip(gb, 10); return 0; case AMF_DATA_TYPE_ARRAY: parse_key = 0; case AMF_DATA_TYPE_MIXEDARRAY: nb = bytestream2_get_be32(gb); case AMF_DATA_TYPE_OBJECT: while (nb-- > 0 || type != AMF_DATA_TYPE_ARRAY) { int t; if (parse_key) { int size = bytestream2_get_be16(gb); if (!size) { bytestream2_get_byte(gb); break; } if (size < 0 || size >= bytestream2_get_bytes_left(gb)) return -1; bytestream2_skip(gb, size); } t = amf_tag_skip(gb); if (t < 0 || bytestream2_get_bytes_left(gb) <= 0) return -1; } return 0; case AMF_DATA_TYPE_OBJECT_END: return 0; default: return -1; } } int ff_amf_tag_size(const uint8_t *data, const uint8_t *data_end) { GetByteContext gb; int ret; if (data >= data_end) return -1; bytestream2_init(&gb, data, data_end - data); ret = amf_tag_skip(&gb); if (ret < 0 || bytestream2_get_bytes_left(&gb) <= 0) return -1; av_assert0(bytestream2_tell(&gb) >= 0 && bytestream2_tell(&gb) <= data_end - data); return bytestream2_tell(&gb); } int ff_amf_get_field_value(const uint8_t *data, const uint8_t *data_end, const uint8_t *name, uint8_t *dst, int dst_size) { int namelen = strlen(name); int len; while (*data != AMF_DATA_TYPE_OBJECT && data < data_end) { len = ff_amf_tag_size(data, data_end); if (len < 0) len = data_end - data; data += len; } if (data_end - data < 3) return -1; data++; for (;;) { int size = bytestream_get_be16(&data); if (!size) break; if (size < 0 || size >= data_end - data) return -1; data += size; if (size == namelen && !memcmp(data-size, name, namelen)) { switch (*data++) { case AMF_DATA_TYPE_NUMBER: snprintf(dst, dst_size, "%g", av_int2double(AV_RB64(data))); break; case AMF_DATA_TYPE_BOOL: snprintf(dst, dst_size, "%s", *data ? "true" : "false"); break; case AMF_DATA_TYPE_STRING: len = bytestream_get_be16(&data); av_strlcpy(dst, data, FFMIN(len+1, dst_size)); break; default: return -1; } return 0; } len = ff_amf_tag_size(data, data_end); if (len < 0 || len >= data_end - data) return -1; data += len; } return -1; } static const char* rtmp_packet_type(int type) { switch (type) { case RTMP_PT_CHUNK_SIZE: return "chunk size"; case RTMP_PT_BYTES_READ: return "bytes read"; case RTMP_PT_PING: return "ping"; case RTMP_PT_SERVER_BW: return "server bandwidth"; case RTMP_PT_CLIENT_BW: return "client bandwidth"; case RTMP_PT_AUDIO: return "audio packet"; case RTMP_PT_VIDEO: return "video packet"; case RTMP_PT_FLEX_STREAM: return "Flex shared stream"; case RTMP_PT_FLEX_OBJECT: return "Flex shared object"; case RTMP_PT_FLEX_MESSAGE: return "Flex shared message"; case RTMP_PT_NOTIFY: return "notification"; case RTMP_PT_SHARED_OBJ: return "shared object"; case RTMP_PT_INVOKE: return "invoke"; case RTMP_PT_METADATA: return "metadata"; default: return "unknown"; } } static void amf_tag_contents(void *ctx, const uint8_t *data, const uint8_t *data_end) { unsigned int size, nb = -1; char buf[1024]; AMFDataType type; int parse_key = 1; if (data >= data_end) return; switch ((type = *data++)) { case AMF_DATA_TYPE_NUMBER: av_log(ctx, AV_LOG_DEBUG, " number %g\n", av_int2double(AV_RB64(data))); return; case AMF_DATA_TYPE_BOOL: av_log(ctx, AV_LOG_DEBUG, " bool %d\n", *data); return; case AMF_DATA_TYPE_STRING: case AMF_DATA_TYPE_LONG_STRING: if (type == AMF_DATA_TYPE_STRING) { size = bytestream_get_be16(&data); } else { size = bytestream_get_be32(&data); } size = FFMIN(size, sizeof(buf) - 1); memcpy(buf, data, size); buf[size] = 0; av_log(ctx, AV_LOG_DEBUG, " string '%s'\n", buf); return; case AMF_DATA_TYPE_NULL: av_log(ctx, AV_LOG_DEBUG, " NULL\n"); return; case AMF_DATA_TYPE_ARRAY: parse_key = 0; case AMF_DATA_TYPE_MIXEDARRAY: nb = bytestream_get_be32(&data); case AMF_DATA_TYPE_OBJECT: av_log(ctx, AV_LOG_DEBUG, " {\n"); while (nb-- > 0 || type != AMF_DATA_TYPE_ARRAY) { int t; if (parse_key) { size = bytestream_get_be16(&data); size = FFMIN(size, sizeof(buf) - 1); if (!size) { av_log(ctx, AV_LOG_DEBUG, " }\n"); data++; break; } memcpy(buf, data, size); buf[size] = 0; if (size >= data_end - data) return; data += size; av_log(ctx, AV_LOG_DEBUG, " %s: ", buf); } amf_tag_contents(ctx, data, data_end); t = ff_amf_tag_size(data, data_end); if (t < 0 || t >= data_end - data) return; data += t; } return; case AMF_DATA_TYPE_OBJECT_END: av_log(ctx, AV_LOG_DEBUG, " }\n"); return; default: return; } } void ff_rtmp_packet_dump(void *ctx, RTMPPacket *p) { av_log(ctx, AV_LOG_DEBUG, "RTMP packet type '%s'(%d) for channel %d, timestamp %d, extra field %d size %d\n", rtmp_packet_type(p->type), p->type, p->channel_id, p->timestamp, p->extra, p->size); if (p->type == RTMP_PT_INVOKE || p->type == RTMP_PT_NOTIFY) { uint8_t *src = p->data, *src_end = p->data + p->size; while (src < src_end) { int sz; amf_tag_contents(ctx, src, src_end); sz = ff_amf_tag_size(src, src_end); if (sz < 0) break; src += sz; } } else if (p->type == RTMP_PT_SERVER_BW){ av_log(ctx, AV_LOG_DEBUG, "Server BW = %d\n", AV_RB32(p->data)); } else if (p->type == RTMP_PT_CLIENT_BW){ av_log(ctx, AV_LOG_DEBUG, "Client BW = %d\n", AV_RB32(p->data)); } else if (p->type != RTMP_PT_AUDIO && p->type != RTMP_PT_VIDEO && p->type != RTMP_PT_METADATA) { int i; for (i = 0; i < p->size; i++) av_log(ctx, AV_LOG_DEBUG, " %02X", p->data[i]); av_log(ctx, AV_LOG_DEBUG, "\n"); } } int ff_amf_match_string(const uint8_t *data, int size, const char *str) { int len = strlen(str); int amf_len, type; if (size < 1) return 0; type = *data++; if (type != AMF_DATA_TYPE_LONG_STRING && type != AMF_DATA_TYPE_STRING) return 0; if (type == AMF_DATA_TYPE_LONG_STRING) { if ((size -= 4 + 1) < 0) return 0; amf_len = bytestream_get_be32(&data); } else { if ((size -= 2 + 1) < 0) return 0; amf_len = bytestream_get_be16(&data); } if (amf_len > size) return 0; if (amf_len != len) return 0; return !memcmp(data, str, len); }
./CrossVul/dataset_final_sorted/CWE-20/c/bad_2589_0
crossvul-cpp_data_good_3547_3
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * Copyright (C) Terry Dawson VK2KTJ (terry@animats.net) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/slab.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <net/arp.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <net/sock.h> #include <net/tcp_states.h> #include <asm/system.h> #include <asm/uaccess.h> #include <linux/fcntl.h> #include <linux/termios.h> /* For TIOCINQ/OUTQ */ #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/notifier.h> #include <linux/netfilter.h> #include <linux/init.h> #include <net/rose.h> #include <linux/seq_file.h> static unsigned int rose_neigh_no = 1; static struct rose_node *rose_node_list; static DEFINE_SPINLOCK(rose_node_list_lock); static struct rose_neigh *rose_neigh_list; static DEFINE_SPINLOCK(rose_neigh_list_lock); static struct rose_route *rose_route_list; static DEFINE_SPINLOCK(rose_route_list_lock); struct rose_neigh *rose_loopback_neigh; /* * Add a new route to a node, and in the process add the node and the * neighbour if it is new. */ static int __must_check rose_add_node(struct rose_route_struct *rose_route, struct net_device *dev) { struct rose_node *rose_node, *rose_tmpn, *rose_tmpp; struct rose_neigh *rose_neigh; int i, res = 0; spin_lock_bh(&rose_node_list_lock); spin_lock_bh(&rose_neigh_list_lock); rose_node = rose_node_list; while (rose_node != NULL) { if ((rose_node->mask == rose_route->mask) && (rosecmpm(&rose_route->address, &rose_node->address, rose_route->mask) == 0)) break; rose_node = rose_node->next; } if (rose_node != NULL && rose_node->loopback) { res = -EINVAL; goto out; } rose_neigh = rose_neigh_list; while (rose_neigh != NULL) { if (ax25cmp(&rose_route->neighbour, &rose_neigh->callsign) == 0 && rose_neigh->dev == dev) break; rose_neigh = rose_neigh->next; } if (rose_neigh == NULL) { rose_neigh = kmalloc(sizeof(*rose_neigh), GFP_ATOMIC); if (rose_neigh == NULL) { res = -ENOMEM; goto out; } rose_neigh->callsign = rose_route->neighbour; rose_neigh->digipeat = NULL; rose_neigh->ax25 = NULL; rose_neigh->dev = dev; rose_neigh->count = 0; rose_neigh->use = 0; rose_neigh->dce_mode = 0; rose_neigh->loopback = 0; rose_neigh->number = rose_neigh_no++; rose_neigh->restarted = 0; skb_queue_head_init(&rose_neigh->queue); init_timer(&rose_neigh->ftimer); init_timer(&rose_neigh->t0timer); if (rose_route->ndigis != 0) { rose_neigh->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC); if (rose_neigh->digipeat == NULL) { kfree(rose_neigh); res = -ENOMEM; goto out; } rose_neigh->digipeat->ndigi = rose_route->ndigis; rose_neigh->digipeat->lastrepeat = -1; for (i = 0; i < rose_route->ndigis; i++) { rose_neigh->digipeat->calls[i] = rose_route->digipeaters[i]; rose_neigh->digipeat->repeated[i] = 0; } } rose_neigh->next = rose_neigh_list; rose_neigh_list = rose_neigh; } /* * This is a new node to be inserted into the list. Find where it needs * to be inserted into the list, and insert it. We want to be sure * to order the list in descending order of mask size to ensure that * later when we are searching this list the first match will be the * best match. */ if (rose_node == NULL) { rose_tmpn = rose_node_list; rose_tmpp = NULL; while (rose_tmpn != NULL) { if (rose_tmpn->mask > rose_route->mask) { rose_tmpp = rose_tmpn; rose_tmpn = rose_tmpn->next; } else { break; } } /* create new node */ rose_node = kmalloc(sizeof(*rose_node), GFP_ATOMIC); if (rose_node == NULL) { res = -ENOMEM; goto out; } rose_node->address = rose_route->address; rose_node->mask = rose_route->mask; rose_node->count = 1; rose_node->loopback = 0; rose_node->neighbour[0] = rose_neigh; if (rose_tmpn == NULL) { if (rose_tmpp == NULL) { /* Empty list */ rose_node_list = rose_node; rose_node->next = NULL; } else { rose_tmpp->next = rose_node; rose_node->next = NULL; } } else { if (rose_tmpp == NULL) { /* 1st node */ rose_node->next = rose_node_list; rose_node_list = rose_node; } else { rose_tmpp->next = rose_node; rose_node->next = rose_tmpn; } } rose_neigh->count++; goto out; } /* We have space, slot it in */ if (rose_node->count < 3) { rose_node->neighbour[rose_node->count] = rose_neigh; rose_node->count++; rose_neigh->count++; } out: spin_unlock_bh(&rose_neigh_list_lock); spin_unlock_bh(&rose_node_list_lock); return res; } /* * Caller is holding rose_node_list_lock. */ static void rose_remove_node(struct rose_node *rose_node) { struct rose_node *s; if ((s = rose_node_list) == rose_node) { rose_node_list = rose_node->next; kfree(rose_node); return; } while (s != NULL && s->next != NULL) { if (s->next == rose_node) { s->next = rose_node->next; kfree(rose_node); return; } s = s->next; } } /* * Caller is holding rose_neigh_list_lock. */ static void rose_remove_neigh(struct rose_neigh *rose_neigh) { struct rose_neigh *s; rose_stop_ftimer(rose_neigh); rose_stop_t0timer(rose_neigh); skb_queue_purge(&rose_neigh->queue); if ((s = rose_neigh_list) == rose_neigh) { rose_neigh_list = rose_neigh->next; if (rose_neigh->ax25) ax25_cb_put(rose_neigh->ax25); kfree(rose_neigh->digipeat); kfree(rose_neigh); return; } while (s != NULL && s->next != NULL) { if (s->next == rose_neigh) { s->next = rose_neigh->next; if (rose_neigh->ax25) ax25_cb_put(rose_neigh->ax25); kfree(rose_neigh->digipeat); kfree(rose_neigh); return; } s = s->next; } } /* * Caller is holding rose_route_list_lock. */ static void rose_remove_route(struct rose_route *rose_route) { struct rose_route *s; if (rose_route->neigh1 != NULL) rose_route->neigh1->use--; if (rose_route->neigh2 != NULL) rose_route->neigh2->use--; if ((s = rose_route_list) == rose_route) { rose_route_list = rose_route->next; kfree(rose_route); return; } while (s != NULL && s->next != NULL) { if (s->next == rose_route) { s->next = rose_route->next; kfree(rose_route); return; } s = s->next; } } /* * "Delete" a node. Strictly speaking remove a route to a node. The node * is only deleted if no routes are left to it. */ static int rose_del_node(struct rose_route_struct *rose_route, struct net_device *dev) { struct rose_node *rose_node; struct rose_neigh *rose_neigh; int i, err = 0; spin_lock_bh(&rose_node_list_lock); spin_lock_bh(&rose_neigh_list_lock); rose_node = rose_node_list; while (rose_node != NULL) { if ((rose_node->mask == rose_route->mask) && (rosecmpm(&rose_route->address, &rose_node->address, rose_route->mask) == 0)) break; rose_node = rose_node->next; } if (rose_node == NULL || rose_node->loopback) { err = -EINVAL; goto out; } rose_neigh = rose_neigh_list; while (rose_neigh != NULL) { if (ax25cmp(&rose_route->neighbour, &rose_neigh->callsign) == 0 && rose_neigh->dev == dev) break; rose_neigh = rose_neigh->next; } if (rose_neigh == NULL) { err = -EINVAL; goto out; } for (i = 0; i < rose_node->count; i++) { if (rose_node->neighbour[i] == rose_neigh) { rose_neigh->count--; if (rose_neigh->count == 0 && rose_neigh->use == 0) rose_remove_neigh(rose_neigh); rose_node->count--; if (rose_node->count == 0) { rose_remove_node(rose_node); } else { switch (i) { case 0: rose_node->neighbour[0] = rose_node->neighbour[1]; case 1: rose_node->neighbour[1] = rose_node->neighbour[2]; case 2: break; } } goto out; } } err = -EINVAL; out: spin_unlock_bh(&rose_neigh_list_lock); spin_unlock_bh(&rose_node_list_lock); return err; } /* * Add the loopback neighbour. */ void rose_add_loopback_neigh(void) { struct rose_neigh *sn; rose_loopback_neigh = kmalloc(sizeof(struct rose_neigh), GFP_KERNEL); if (!rose_loopback_neigh) return; sn = rose_loopback_neigh; sn->callsign = null_ax25_address; sn->digipeat = NULL; sn->ax25 = NULL; sn->dev = NULL; sn->count = 0; sn->use = 0; sn->dce_mode = 1; sn->loopback = 1; sn->number = rose_neigh_no++; sn->restarted = 1; skb_queue_head_init(&sn->queue); init_timer(&sn->ftimer); init_timer(&sn->t0timer); spin_lock_bh(&rose_neigh_list_lock); sn->next = rose_neigh_list; rose_neigh_list = sn; spin_unlock_bh(&rose_neigh_list_lock); } /* * Add a loopback node. */ int rose_add_loopback_node(rose_address *address) { struct rose_node *rose_node; int err = 0; spin_lock_bh(&rose_node_list_lock); rose_node = rose_node_list; while (rose_node != NULL) { if ((rose_node->mask == 10) && (rosecmpm(address, &rose_node->address, 10) == 0) && rose_node->loopback) break; rose_node = rose_node->next; } if (rose_node != NULL) goto out; if ((rose_node = kmalloc(sizeof(*rose_node), GFP_ATOMIC)) == NULL) { err = -ENOMEM; goto out; } rose_node->address = *address; rose_node->mask = 10; rose_node->count = 1; rose_node->loopback = 1; rose_node->neighbour[0] = rose_loopback_neigh; /* Insert at the head of list. Address is always mask=10 */ rose_node->next = rose_node_list; rose_node_list = rose_node; rose_loopback_neigh->count++; out: spin_unlock_bh(&rose_node_list_lock); return err; } /* * Delete a loopback node. */ void rose_del_loopback_node(rose_address *address) { struct rose_node *rose_node; spin_lock_bh(&rose_node_list_lock); rose_node = rose_node_list; while (rose_node != NULL) { if ((rose_node->mask == 10) && (rosecmpm(address, &rose_node->address, 10) == 0) && rose_node->loopback) break; rose_node = rose_node->next; } if (rose_node == NULL) goto out; rose_remove_node(rose_node); rose_loopback_neigh->count--; out: spin_unlock_bh(&rose_node_list_lock); } /* * A device has been removed. Remove its routes and neighbours. */ void rose_rt_device_down(struct net_device *dev) { struct rose_neigh *s, *rose_neigh; struct rose_node *t, *rose_node; int i; spin_lock_bh(&rose_node_list_lock); spin_lock_bh(&rose_neigh_list_lock); rose_neigh = rose_neigh_list; while (rose_neigh != NULL) { s = rose_neigh; rose_neigh = rose_neigh->next; if (s->dev != dev) continue; rose_node = rose_node_list; while (rose_node != NULL) { t = rose_node; rose_node = rose_node->next; for (i = 0; i < t->count; i++) { if (t->neighbour[i] != s) continue; t->count--; switch (i) { case 0: t->neighbour[0] = t->neighbour[1]; case 1: t->neighbour[1] = t->neighbour[2]; case 2: break; } } if (t->count <= 0) rose_remove_node(t); } rose_remove_neigh(s); } spin_unlock_bh(&rose_neigh_list_lock); spin_unlock_bh(&rose_node_list_lock); } #if 0 /* Currently unused */ /* * A device has been removed. Remove its links. */ void rose_route_device_down(struct net_device *dev) { struct rose_route *s, *rose_route; spin_lock_bh(&rose_route_list_lock); rose_route = rose_route_list; while (rose_route != NULL) { s = rose_route; rose_route = rose_route->next; if (s->neigh1->dev == dev || s->neigh2->dev == dev) rose_remove_route(s); } spin_unlock_bh(&rose_route_list_lock); } #endif /* * Clear all nodes and neighbours out, except for neighbours with * active connections going through them. * Do not clear loopback neighbour and nodes. */ static int rose_clear_routes(void) { struct rose_neigh *s, *rose_neigh; struct rose_node *t, *rose_node; spin_lock_bh(&rose_node_list_lock); spin_lock_bh(&rose_neigh_list_lock); rose_neigh = rose_neigh_list; rose_node = rose_node_list; while (rose_node != NULL) { t = rose_node; rose_node = rose_node->next; if (!t->loopback) rose_remove_node(t); } while (rose_neigh != NULL) { s = rose_neigh; rose_neigh = rose_neigh->next; if (s->use == 0 && !s->loopback) { s->count = 0; rose_remove_neigh(s); } } spin_unlock_bh(&rose_neigh_list_lock); spin_unlock_bh(&rose_node_list_lock); return 0; } /* * Check that the device given is a valid AX.25 interface that is "up". * called whith RTNL */ static struct net_device *rose_ax25_dev_find(char *devname) { struct net_device *dev; if ((dev = __dev_get_by_name(&init_net, devname)) == NULL) return NULL; if ((dev->flags & IFF_UP) && dev->type == ARPHRD_AX25) return dev; return NULL; } /* * Find the first active ROSE device, usually "rose0". */ struct net_device *rose_dev_first(void) { struct net_device *dev, *first = NULL; rcu_read_lock(); for_each_netdev_rcu(&init_net, dev) { if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE) if (first == NULL || strncmp(dev->name, first->name, 3) < 0) first = dev; } rcu_read_unlock(); return first; } /* * Find the ROSE device for the given address. */ struct net_device *rose_dev_get(rose_address *addr) { struct net_device *dev; rcu_read_lock(); for_each_netdev_rcu(&init_net, dev) { if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0) { dev_hold(dev); goto out; } } dev = NULL; out: rcu_read_unlock(); return dev; } static int rose_dev_exists(rose_address *addr) { struct net_device *dev; rcu_read_lock(); for_each_netdev_rcu(&init_net, dev) { if ((dev->flags & IFF_UP) && dev->type == ARPHRD_ROSE && rosecmp(addr, (rose_address *)dev->dev_addr) == 0) goto out; } dev = NULL; out: rcu_read_unlock(); return dev != NULL; } struct rose_route *rose_route_free_lci(unsigned int lci, struct rose_neigh *neigh) { struct rose_route *rose_route; for (rose_route = rose_route_list; rose_route != NULL; rose_route = rose_route->next) if ((rose_route->neigh1 == neigh && rose_route->lci1 == lci) || (rose_route->neigh2 == neigh && rose_route->lci2 == lci)) return rose_route; return NULL; } /* * Find a neighbour or a route given a ROSE address. */ struct rose_neigh *rose_get_neigh(rose_address *addr, unsigned char *cause, unsigned char *diagnostic, int route_frame) { struct rose_neigh *res = NULL; struct rose_node *node; int failed = 0; int i; if (!route_frame) spin_lock_bh(&rose_node_list_lock); for (node = rose_node_list; node != NULL; node = node->next) { if (rosecmpm(addr, &node->address, node->mask) == 0) { for (i = 0; i < node->count; i++) { if (node->neighbour[i]->restarted) { res = node->neighbour[i]; goto out; } } } } if (!route_frame) { /* connect request */ for (node = rose_node_list; node != NULL; node = node->next) { if (rosecmpm(addr, &node->address, node->mask) == 0) { for (i = 0; i < node->count; i++) { if (!rose_ftimer_running(node->neighbour[i])) { res = node->neighbour[i]; failed = 0; goto out; } failed = 1; } } } } if (failed) { *cause = ROSE_OUT_OF_ORDER; *diagnostic = 0; } else { *cause = ROSE_NOT_OBTAINABLE; *diagnostic = 0; } out: if (!route_frame) spin_unlock_bh(&rose_node_list_lock); return res; } /* * Handle the ioctls that control the routing functions. */ int rose_rt_ioctl(unsigned int cmd, void __user *arg) { struct rose_route_struct rose_route; struct net_device *dev; int err; switch (cmd) { case SIOCADDRT: if (copy_from_user(&rose_route, arg, sizeof(struct rose_route_struct))) return -EFAULT; if ((dev = rose_ax25_dev_find(rose_route.device)) == NULL) return -EINVAL; if (rose_dev_exists(&rose_route.address)) /* Can't add routes to ourself */ return -EINVAL; if (rose_route.mask > 10) /* Mask can't be more than 10 digits */ return -EINVAL; if (rose_route.ndigis > AX25_MAX_DIGIS) return -EINVAL; err = rose_add_node(&rose_route, dev); return err; case SIOCDELRT: if (copy_from_user(&rose_route, arg, sizeof(struct rose_route_struct))) return -EFAULT; if ((dev = rose_ax25_dev_find(rose_route.device)) == NULL) return -EINVAL; err = rose_del_node(&rose_route, dev); return err; case SIOCRSCLRRT: return rose_clear_routes(); default: return -EINVAL; } return 0; } static void rose_del_route_by_neigh(struct rose_neigh *rose_neigh) { struct rose_route *rose_route, *s; rose_neigh->restarted = 0; rose_stop_t0timer(rose_neigh); rose_start_ftimer(rose_neigh); skb_queue_purge(&rose_neigh->queue); spin_lock_bh(&rose_route_list_lock); rose_route = rose_route_list; while (rose_route != NULL) { if ((rose_route->neigh1 == rose_neigh && rose_route->neigh2 == rose_neigh) || (rose_route->neigh1 == rose_neigh && rose_route->neigh2 == NULL) || (rose_route->neigh2 == rose_neigh && rose_route->neigh1 == NULL)) { s = rose_route->next; rose_remove_route(rose_route); rose_route = s; continue; } if (rose_route->neigh1 == rose_neigh) { rose_route->neigh1->use--; rose_route->neigh1 = NULL; rose_transmit_clear_request(rose_route->neigh2, rose_route->lci2, ROSE_OUT_OF_ORDER, 0); } if (rose_route->neigh2 == rose_neigh) { rose_route->neigh2->use--; rose_route->neigh2 = NULL; rose_transmit_clear_request(rose_route->neigh1, rose_route->lci1, ROSE_OUT_OF_ORDER, 0); } rose_route = rose_route->next; } spin_unlock_bh(&rose_route_list_lock); } /* * A level 2 link has timed out, therefore it appears to be a poor link, * then don't use that neighbour until it is reset. Blow away all through * routes and connections using this route. */ void rose_link_failed(ax25_cb *ax25, int reason) { struct rose_neigh *rose_neigh; spin_lock_bh(&rose_neigh_list_lock); rose_neigh = rose_neigh_list; while (rose_neigh != NULL) { if (rose_neigh->ax25 == ax25) break; rose_neigh = rose_neigh->next; } if (rose_neigh != NULL) { rose_neigh->ax25 = NULL; ax25_cb_put(ax25); rose_del_route_by_neigh(rose_neigh); rose_kill_by_neigh(rose_neigh); } spin_unlock_bh(&rose_neigh_list_lock); } /* * A device has been "downed" remove its link status. Blow away all * through routes and connections that use this device. */ void rose_link_device_down(struct net_device *dev) { struct rose_neigh *rose_neigh; for (rose_neigh = rose_neigh_list; rose_neigh != NULL; rose_neigh = rose_neigh->next) { if (rose_neigh->dev == dev) { rose_del_route_by_neigh(rose_neigh); rose_kill_by_neigh(rose_neigh); } } } /* * Route a frame to an appropriate AX.25 connection. */ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25) { struct rose_neigh *rose_neigh, *new_neigh; struct rose_route *rose_route; struct rose_facilities_struct facilities; rose_address *src_addr, *dest_addr; struct sock *sk; unsigned short frametype; unsigned int lci, new_lci; unsigned char cause, diagnostic; struct net_device *dev; int res = 0; char buf[11]; #if 0 if (call_in_firewall(PF_ROSE, skb->dev, skb->data, NULL, &skb) != FW_ACCEPT) return res; #endif if (skb->len < ROSE_MIN_LEN) return res; frametype = skb->data[2]; lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); if (frametype == ROSE_CALL_REQUEST && (skb->len <= ROSE_CALL_REQ_FACILITIES_OFF || skb->data[ROSE_CALL_REQ_ADDR_LEN_OFF] != ROSE_CALL_REQ_ADDR_LEN_VAL)) return res; src_addr = (rose_address *)(skb->data + ROSE_CALL_REQ_SRC_ADDR_OFF); dest_addr = (rose_address *)(skb->data + ROSE_CALL_REQ_DEST_ADDR_OFF); spin_lock_bh(&rose_neigh_list_lock); spin_lock_bh(&rose_route_list_lock); rose_neigh = rose_neigh_list; while (rose_neigh != NULL) { if (ax25cmp(&ax25->dest_addr, &rose_neigh->callsign) == 0 && ax25->ax25_dev->dev == rose_neigh->dev) break; rose_neigh = rose_neigh->next; } if (rose_neigh == NULL) { printk("rose_route : unknown neighbour or device %s\n", ax2asc(buf, &ax25->dest_addr)); goto out; } /* * Obviously the link is working, halt the ftimer. */ rose_stop_ftimer(rose_neigh); /* * LCI of zero is always for us, and its always a restart * frame. */ if (lci == 0) { rose_link_rx_restart(skb, rose_neigh, frametype); goto out; } /* * Find an existing socket. */ if ((sk = rose_find_socket(lci, rose_neigh)) != NULL) { if (frametype == ROSE_CALL_REQUEST) { struct rose_sock *rose = rose_sk(sk); /* Remove an existing unused socket */ rose_clear_queues(sk); rose->cause = ROSE_NETWORK_CONGESTION; rose->diagnostic = 0; rose->neighbour->use--; rose->neighbour = NULL; rose->lci = 0; rose->state = ROSE_STATE_0; sk->sk_state = TCP_CLOSE; sk->sk_err = 0; sk->sk_shutdown |= SEND_SHUTDOWN; if (!sock_flag(sk, SOCK_DEAD)) { sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); } } else { skb_reset_transport_header(skb); res = rose_process_rx_frame(sk, skb); goto out; } } /* * Is is a Call Request and is it for us ? */ if (frametype == ROSE_CALL_REQUEST) if ((dev = rose_dev_get(dest_addr)) != NULL) { res = rose_rx_call_request(skb, dev, rose_neigh, lci); dev_put(dev); goto out; } if (!sysctl_rose_routing_control) { rose_transmit_clear_request(rose_neigh, lci, ROSE_NOT_OBTAINABLE, 0); goto out; } /* * Route it to the next in line if we have an entry for it. */ rose_route = rose_route_list; while (rose_route != NULL) { if (rose_route->lci1 == lci && rose_route->neigh1 == rose_neigh) { if (frametype == ROSE_CALL_REQUEST) { /* F6FBB - Remove an existing unused route */ rose_remove_route(rose_route); break; } else if (rose_route->neigh2 != NULL) { skb->data[0] &= 0xF0; skb->data[0] |= (rose_route->lci2 >> 8) & 0x0F; skb->data[1] = (rose_route->lci2 >> 0) & 0xFF; rose_transmit_link(skb, rose_route->neigh2); if (frametype == ROSE_CLEAR_CONFIRMATION) rose_remove_route(rose_route); res = 1; goto out; } else { if (frametype == ROSE_CLEAR_CONFIRMATION) rose_remove_route(rose_route); goto out; } } if (rose_route->lci2 == lci && rose_route->neigh2 == rose_neigh) { if (frametype == ROSE_CALL_REQUEST) { /* F6FBB - Remove an existing unused route */ rose_remove_route(rose_route); break; } else if (rose_route->neigh1 != NULL) { skb->data[0] &= 0xF0; skb->data[0] |= (rose_route->lci1 >> 8) & 0x0F; skb->data[1] = (rose_route->lci1 >> 0) & 0xFF; rose_transmit_link(skb, rose_route->neigh1); if (frametype == ROSE_CLEAR_CONFIRMATION) rose_remove_route(rose_route); res = 1; goto out; } else { if (frametype == ROSE_CLEAR_CONFIRMATION) rose_remove_route(rose_route); goto out; } } rose_route = rose_route->next; } /* * We know that: * 1. The frame isn't for us, * 2. It isn't "owned" by any existing route. */ if (frametype != ROSE_CALL_REQUEST) { /* XXX */ res = 0; goto out; } memset(&facilities, 0x00, sizeof(struct rose_facilities_struct)); if (!rose_parse_facilities(skb->data + ROSE_CALL_REQ_FACILITIES_OFF, skb->len - ROSE_CALL_REQ_FACILITIES_OFF, &facilities)) { rose_transmit_clear_request(rose_neigh, lci, ROSE_INVALID_FACILITY, 76); goto out; } /* * Check for routing loops. */ rose_route = rose_route_list; while (rose_route != NULL) { if (rose_route->rand == facilities.rand && rosecmp(src_addr, &rose_route->src_addr) == 0 && ax25cmp(&facilities.dest_call, &rose_route->src_call) == 0 && ax25cmp(&facilities.source_call, &rose_route->dest_call) == 0) { rose_transmit_clear_request(rose_neigh, lci, ROSE_NOT_OBTAINABLE, 120); goto out; } rose_route = rose_route->next; } if ((new_neigh = rose_get_neigh(dest_addr, &cause, &diagnostic, 1)) == NULL) { rose_transmit_clear_request(rose_neigh, lci, cause, diagnostic); goto out; } if ((new_lci = rose_new_lci(new_neigh)) == 0) { rose_transmit_clear_request(rose_neigh, lci, ROSE_NETWORK_CONGESTION, 71); goto out; } if ((rose_route = kmalloc(sizeof(*rose_route), GFP_ATOMIC)) == NULL) { rose_transmit_clear_request(rose_neigh, lci, ROSE_NETWORK_CONGESTION, 120); goto out; } rose_route->lci1 = lci; rose_route->src_addr = *src_addr; rose_route->dest_addr = *dest_addr; rose_route->src_call = facilities.dest_call; rose_route->dest_call = facilities.source_call; rose_route->rand = facilities.rand; rose_route->neigh1 = rose_neigh; rose_route->lci2 = new_lci; rose_route->neigh2 = new_neigh; rose_route->neigh1->use++; rose_route->neigh2->use++; rose_route->next = rose_route_list; rose_route_list = rose_route; skb->data[0] &= 0xF0; skb->data[0] |= (rose_route->lci2 >> 8) & 0x0F; skb->data[1] = (rose_route->lci2 >> 0) & 0xFF; rose_transmit_link(skb, rose_route->neigh2); res = 1; out: spin_unlock_bh(&rose_route_list_lock); spin_unlock_bh(&rose_neigh_list_lock); return res; } #ifdef CONFIG_PROC_FS static void *rose_node_start(struct seq_file *seq, loff_t *pos) __acquires(rose_node_list_lock) { struct rose_node *rose_node; int i = 1; spin_lock_bh(&rose_node_list_lock); if (*pos == 0) return SEQ_START_TOKEN; for (rose_node = rose_node_list; rose_node && i < *pos; rose_node = rose_node->next, ++i); return (i == *pos) ? rose_node : NULL; } static void *rose_node_next(struct seq_file *seq, void *v, loff_t *pos) { ++*pos; return (v == SEQ_START_TOKEN) ? rose_node_list : ((struct rose_node *)v)->next; } static void rose_node_stop(struct seq_file *seq, void *v) __releases(rose_node_list_lock) { spin_unlock_bh(&rose_node_list_lock); } static int rose_node_show(struct seq_file *seq, void *v) { char rsbuf[11]; int i; if (v == SEQ_START_TOKEN) seq_puts(seq, "address mask n neigh neigh neigh\n"); else { const struct rose_node *rose_node = v; /* if (rose_node->loopback) { seq_printf(seq, "%-10s %04d 1 loopback\n", rose2asc(rsbuf, &rose_node->address), rose_node->mask); } else { */ seq_printf(seq, "%-10s %04d %d", rose2asc(rsbuf, &rose_node->address), rose_node->mask, rose_node->count); for (i = 0; i < rose_node->count; i++) seq_printf(seq, " %05d", rose_node->neighbour[i]->number); seq_puts(seq, "\n"); /* } */ } return 0; } static const struct seq_operations rose_node_seqops = { .start = rose_node_start, .next = rose_node_next, .stop = rose_node_stop, .show = rose_node_show, }; static int rose_nodes_open(struct inode *inode, struct file *file) { return seq_open(file, &rose_node_seqops); } const struct file_operations rose_nodes_fops = { .owner = THIS_MODULE, .open = rose_nodes_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static void *rose_neigh_start(struct seq_file *seq, loff_t *pos) __acquires(rose_neigh_list_lock) { struct rose_neigh *rose_neigh; int i = 1; spin_lock_bh(&rose_neigh_list_lock); if (*pos == 0) return SEQ_START_TOKEN; for (rose_neigh = rose_neigh_list; rose_neigh && i < *pos; rose_neigh = rose_neigh->next, ++i); return (i == *pos) ? rose_neigh : NULL; } static void *rose_neigh_next(struct seq_file *seq, void *v, loff_t *pos) { ++*pos; return (v == SEQ_START_TOKEN) ? rose_neigh_list : ((struct rose_neigh *)v)->next; } static void rose_neigh_stop(struct seq_file *seq, void *v) __releases(rose_neigh_list_lock) { spin_unlock_bh(&rose_neigh_list_lock); } static int rose_neigh_show(struct seq_file *seq, void *v) { char buf[11]; int i; if (v == SEQ_START_TOKEN) seq_puts(seq, "addr callsign dev count use mode restart t0 tf digipeaters\n"); else { struct rose_neigh *rose_neigh = v; /* if (!rose_neigh->loopback) { */ seq_printf(seq, "%05d %-9s %-4s %3d %3d %3s %3s %3lu %3lu", rose_neigh->number, (rose_neigh->loopback) ? "RSLOOP-0" : ax2asc(buf, &rose_neigh->callsign), rose_neigh->dev ? rose_neigh->dev->name : "???", rose_neigh->count, rose_neigh->use, (rose_neigh->dce_mode) ? "DCE" : "DTE", (rose_neigh->restarted) ? "yes" : "no", ax25_display_timer(&rose_neigh->t0timer) / HZ, ax25_display_timer(&rose_neigh->ftimer) / HZ); if (rose_neigh->digipeat != NULL) { for (i = 0; i < rose_neigh->digipeat->ndigi; i++) seq_printf(seq, " %s", ax2asc(buf, &rose_neigh->digipeat->calls[i])); } seq_puts(seq, "\n"); } return 0; } static const struct seq_operations rose_neigh_seqops = { .start = rose_neigh_start, .next = rose_neigh_next, .stop = rose_neigh_stop, .show = rose_neigh_show, }; static int rose_neigh_open(struct inode *inode, struct file *file) { return seq_open(file, &rose_neigh_seqops); } const struct file_operations rose_neigh_fops = { .owner = THIS_MODULE, .open = rose_neigh_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static void *rose_route_start(struct seq_file *seq, loff_t *pos) __acquires(rose_route_list_lock) { struct rose_route *rose_route; int i = 1; spin_lock_bh(&rose_route_list_lock); if (*pos == 0) return SEQ_START_TOKEN; for (rose_route = rose_route_list; rose_route && i < *pos; rose_route = rose_route->next, ++i); return (i == *pos) ? rose_route : NULL; } static void *rose_route_next(struct seq_file *seq, void *v, loff_t *pos) { ++*pos; return (v == SEQ_START_TOKEN) ? rose_route_list : ((struct rose_route *)v)->next; } static void rose_route_stop(struct seq_file *seq, void *v) __releases(rose_route_list_lock) { spin_unlock_bh(&rose_route_list_lock); } static int rose_route_show(struct seq_file *seq, void *v) { char buf[11], rsbuf[11]; if (v == SEQ_START_TOKEN) seq_puts(seq, "lci address callsign neigh <-> lci address callsign neigh\n"); else { struct rose_route *rose_route = v; if (rose_route->neigh1) seq_printf(seq, "%3.3X %-10s %-9s %05d ", rose_route->lci1, rose2asc(rsbuf, &rose_route->src_addr), ax2asc(buf, &rose_route->src_call), rose_route->neigh1->number); else seq_puts(seq, "000 * * 00000 "); if (rose_route->neigh2) seq_printf(seq, "%3.3X %-10s %-9s %05d\n", rose_route->lci2, rose2asc(rsbuf, &rose_route->dest_addr), ax2asc(buf, &rose_route->dest_call), rose_route->neigh2->number); else seq_puts(seq, "000 * * 00000\n"); } return 0; } static const struct seq_operations rose_route_seqops = { .start = rose_route_start, .next = rose_route_next, .stop = rose_route_stop, .show = rose_route_show, }; static int rose_route_open(struct inode *inode, struct file *file) { return seq_open(file, &rose_route_seqops); } const struct file_operations rose_routes_fops = { .owner = THIS_MODULE, .open = rose_route_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #endif /* CONFIG_PROC_FS */ /* * Release all memory associated with ROSE routing structures. */ void __exit rose_rt_free(void) { struct rose_neigh *s, *rose_neigh = rose_neigh_list; struct rose_node *t, *rose_node = rose_node_list; struct rose_route *u, *rose_route = rose_route_list; while (rose_neigh != NULL) { s = rose_neigh; rose_neigh = rose_neigh->next; rose_remove_neigh(s); } while (rose_node != NULL) { t = rose_node; rose_node = rose_node->next; rose_remove_node(t); } while (rose_route != NULL) { u = rose_route; rose_route = rose_route->next; rose_remove_route(u); } }
./CrossVul/dataset_final_sorted/CWE-20/c/good_3547_3
crossvul-cpp_data_bad_5845_12
/* * Copyright (C) ST-Ericsson AB 2010 * Author: Sjur Brendeland * License terms: GNU General Public License (GPL) version 2 */ #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ #include <linux/fs.h> #include <linux/init.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/list.h> #include <linux/wait.h> #include <linux/poll.h> #include <linux/tcp.h> #include <linux/uaccess.h> #include <linux/debugfs.h> #include <linux/caif/caif_socket.h> #include <linux/pkt_sched.h> #include <net/sock.h> #include <net/tcp_states.h> #include <net/caif/caif_layer.h> #include <net/caif/caif_dev.h> #include <net/caif/cfpkt.h> MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(AF_CAIF); /* * CAIF state is re-using the TCP socket states. * caif_states stored in sk_state reflect the state as reported by * the CAIF stack, while sk_socket->state is the state of the socket. */ enum caif_states { CAIF_CONNECTED = TCP_ESTABLISHED, CAIF_CONNECTING = TCP_SYN_SENT, CAIF_DISCONNECTED = TCP_CLOSE }; #define TX_FLOW_ON_BIT 1 #define RX_FLOW_ON_BIT 2 struct caifsock { struct sock sk; /* must be first member */ struct cflayer layer; u32 flow_state; struct caif_connect_request conn_req; struct mutex readlock; struct dentry *debugfs_socket_dir; int headroom, tailroom, maxframe; }; static int rx_flow_is_on(struct caifsock *cf_sk) { return test_bit(RX_FLOW_ON_BIT, (void *) &cf_sk->flow_state); } static int tx_flow_is_on(struct caifsock *cf_sk) { return test_bit(TX_FLOW_ON_BIT, (void *) &cf_sk->flow_state); } static void set_rx_flow_off(struct caifsock *cf_sk) { clear_bit(RX_FLOW_ON_BIT, (void *) &cf_sk->flow_state); } static void set_rx_flow_on(struct caifsock *cf_sk) { set_bit(RX_FLOW_ON_BIT, (void *) &cf_sk->flow_state); } static void set_tx_flow_off(struct caifsock *cf_sk) { clear_bit(TX_FLOW_ON_BIT, (void *) &cf_sk->flow_state); } static void set_tx_flow_on(struct caifsock *cf_sk) { set_bit(TX_FLOW_ON_BIT, (void *) &cf_sk->flow_state); } static void caif_read_lock(struct sock *sk) { struct caifsock *cf_sk; cf_sk = container_of(sk, struct caifsock, sk); mutex_lock(&cf_sk->readlock); } static void caif_read_unlock(struct sock *sk) { struct caifsock *cf_sk; cf_sk = container_of(sk, struct caifsock, sk); mutex_unlock(&cf_sk->readlock); } static int sk_rcvbuf_lowwater(struct caifsock *cf_sk) { /* A quarter of full buffer is used a low water mark */ return cf_sk->sk.sk_rcvbuf / 4; } static void caif_flow_ctrl(struct sock *sk, int mode) { struct caifsock *cf_sk; cf_sk = container_of(sk, struct caifsock, sk); if (cf_sk->layer.dn && cf_sk->layer.dn->modemcmd) cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode); } /* * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are * not dropped, but CAIF is sending flow off instead. */ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { int err; int skb_len; unsigned long flags; struct sk_buff_head *list = &sk->sk_receive_queue; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= (unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) { net_dbg_ratelimited("sending flow OFF (queue len = %d %d)\n", atomic_read(&cf_sk->sk.sk_rmem_alloc), sk_rcvbuf_lowwater(cf_sk)); set_rx_flow_off(cf_sk); caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); } err = sk_filter(sk, skb); if (err) return err; if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) { set_rx_flow_off(cf_sk); net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n"); caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); } skb->dev = NULL; skb_set_owner_r(skb, sk); /* Cache the SKB length before we tack it onto the receive * queue. Once it is added it no longer belongs to us and * may be freed by other threads of control pulling packets * from the queue. */ skb_len = skb->len; spin_lock_irqsave(&list->lock, flags); if (!sock_flag(sk, SOCK_DEAD)) __skb_queue_tail(list, skb); spin_unlock_irqrestore(&list->lock, flags); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, skb_len); else kfree_skb(skb); return 0; } /* Packet Receive Callback function called from CAIF Stack */ static int caif_sktrecv_cb(struct cflayer *layr, struct cfpkt *pkt) { struct caifsock *cf_sk; struct sk_buff *skb; cf_sk = container_of(layr, struct caifsock, layer); skb = cfpkt_tonative(pkt); if (unlikely(cf_sk->sk.sk_state != CAIF_CONNECTED)) { kfree_skb(skb); return 0; } caif_queue_rcv_skb(&cf_sk->sk, skb); return 0; } static void cfsk_hold(struct cflayer *layr) { struct caifsock *cf_sk = container_of(layr, struct caifsock, layer); sock_hold(&cf_sk->sk); } static void cfsk_put(struct cflayer *layr) { struct caifsock *cf_sk = container_of(layr, struct caifsock, layer); sock_put(&cf_sk->sk); } /* Packet Control Callback function called from CAIF */ static void caif_ctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow, int phyid) { struct caifsock *cf_sk = container_of(layr, struct caifsock, layer); switch (flow) { case CAIF_CTRLCMD_FLOW_ON_IND: /* OK from modem to start sending again */ set_tx_flow_on(cf_sk); cf_sk->sk.sk_state_change(&cf_sk->sk); break; case CAIF_CTRLCMD_FLOW_OFF_IND: /* Modem asks us to shut up */ set_tx_flow_off(cf_sk); cf_sk->sk.sk_state_change(&cf_sk->sk); break; case CAIF_CTRLCMD_INIT_RSP: /* We're now connected */ caif_client_register_refcnt(&cf_sk->layer, cfsk_hold, cfsk_put); cf_sk->sk.sk_state = CAIF_CONNECTED; set_tx_flow_on(cf_sk); cf_sk->sk.sk_shutdown = 0; cf_sk->sk.sk_state_change(&cf_sk->sk); break; case CAIF_CTRLCMD_DEINIT_RSP: /* We're now disconnected */ cf_sk->sk.sk_state = CAIF_DISCONNECTED; cf_sk->sk.sk_state_change(&cf_sk->sk); break; case CAIF_CTRLCMD_INIT_FAIL_RSP: /* Connect request failed */ cf_sk->sk.sk_err = ECONNREFUSED; cf_sk->sk.sk_state = CAIF_DISCONNECTED; cf_sk->sk.sk_shutdown = SHUTDOWN_MASK; /* * Socket "standards" seems to require POLLOUT to * be set at connect failure. */ set_tx_flow_on(cf_sk); cf_sk->sk.sk_state_change(&cf_sk->sk); break; case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND: /* Modem has closed this connection, or device is down. */ cf_sk->sk.sk_shutdown = SHUTDOWN_MASK; cf_sk->sk.sk_err = ECONNRESET; set_rx_flow_on(cf_sk); cf_sk->sk.sk_error_report(&cf_sk->sk); break; default: pr_debug("Unexpected flow command %d\n", flow); } } static void caif_check_flow_release(struct sock *sk) { struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); if (rx_flow_is_on(cf_sk)) return; if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) { set_rx_flow_on(cf_sk); caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ); } } /* * Copied from unix_dgram_recvmsg, but removed credit checks, * changed locking, address handling and added MSG_TRUNC. */ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, size_t len, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; int ret; int copylen; ret = -EOPNOTSUPP; if (m->msg_flags&MSG_OOB) goto read_error; m->msg_namelen = 0; skb = skb_recv_datagram(sk, flags, 0 , &ret); if (!skb) goto read_error; copylen = skb->len; if (len < copylen) { m->msg_flags |= MSG_TRUNC; copylen = len; } ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, copylen); if (ret) goto out_free; ret = (flags & MSG_TRUNC) ? skb->len : copylen; out_free: skb_free_datagram(sk, skb); caif_check_flow_release(sk); return ret; read_error: return ret; } /* Copied from unix_stream_wait_data, identical except for lock call. */ static long caif_stream_data_wait(struct sock *sk, long timeo) { DEFINE_WAIT(wait); lock_sock(sk); for (;;) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (!skb_queue_empty(&sk->sk_receive_queue) || sk->sk_err || sk->sk_state != CAIF_CONNECTED || sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN) || signal_pending(current) || !timeo) break; set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); } finish_wait(sk_sleep(sk), &wait); release_sock(sk); return timeo; } /* * Copied from unix_stream_recvmsg, but removed credit checks, * changed locking calls, changed address handling. */ static int caif_stream_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; int copied = 0; int target; int err = 0; long timeo; err = -EOPNOTSUPP; if (flags&MSG_OOB) goto out; msg->msg_namelen = 0; /* * Lock the socket to prevent queue disordering * while sleeps in memcpy_tomsg */ err = -EAGAIN; if (sk->sk_state == CAIF_CONNECTING) goto out; caif_read_lock(sk); target = sock_rcvlowat(sk, flags&MSG_WAITALL, size); timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT); do { int chunk; struct sk_buff *skb; lock_sock(sk); skb = skb_dequeue(&sk->sk_receive_queue); caif_check_flow_release(sk); if (skb == NULL) { if (copied >= target) goto unlock; /* * POSIX 1003.1g mandates this order. */ err = sock_error(sk); if (err) goto unlock; err = -ECONNRESET; if (sk->sk_shutdown & RCV_SHUTDOWN) goto unlock; err = -EPIPE; if (sk->sk_state != CAIF_CONNECTED) goto unlock; if (sock_flag(sk, SOCK_DEAD)) goto unlock; release_sock(sk); err = -EAGAIN; if (!timeo) break; caif_read_unlock(sk); timeo = caif_stream_data_wait(sk, timeo); if (signal_pending(current)) { err = sock_intr_errno(timeo); goto out; } caif_read_lock(sk); continue; unlock: release_sock(sk); break; } release_sock(sk); chunk = min_t(unsigned int, skb->len, size); if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) { skb_queue_head(&sk->sk_receive_queue, skb); if (copied == 0) copied = -EFAULT; break; } copied += chunk; size -= chunk; /* Mark read part of skb as used */ if (!(flags & MSG_PEEK)) { skb_pull(skb, chunk); /* put the skb back if we didn't use it up. */ if (skb->len) { skb_queue_head(&sk->sk_receive_queue, skb); break; } kfree_skb(skb); } else { /* * It is questionable, see note in unix_dgram_recvmsg. */ /* put message back and return */ skb_queue_head(&sk->sk_receive_queue, skb); break; } } while (size); caif_read_unlock(sk); out: return copied ? : err; } /* * Copied from sock.c:sock_wait_for_wmem, but change to wait for * CAIF flow-on and sock_writable. */ static long caif_wait_for_flow_on(struct caifsock *cf_sk, int wait_writeable, long timeo, int *err) { struct sock *sk = &cf_sk->sk; DEFINE_WAIT(wait); for (;;) { *err = 0; if (tx_flow_is_on(cf_sk) && (!wait_writeable || sock_writeable(&cf_sk->sk))) break; *err = -ETIMEDOUT; if (!timeo) break; *err = -ERESTARTSYS; if (signal_pending(current)) break; prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); *err = -ECONNRESET; if (sk->sk_shutdown & SHUTDOWN_MASK) break; *err = -sk->sk_err; if (sk->sk_err) break; *err = -EPIPE; if (cf_sk->sk.sk_state != CAIF_CONNECTED) break; timeo = schedule_timeout(timeo); } finish_wait(sk_sleep(sk), &wait); return timeo; } /* * Transmit a SKB. The device may temporarily request re-transmission * by returning EAGAIN. */ static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk, int noblock, long timeo) { struct cfpkt *pkt; pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb); memset(skb->cb, 0, sizeof(struct caif_payload_info)); cfpkt_set_prio(pkt, cf_sk->sk.sk_priority); if (cf_sk->layer.dn == NULL) { kfree_skb(skb); return -EINVAL; } return cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt); } /* Copied from af_unix:unix_dgram_sendmsg, and adapted to CAIF */ static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); int buffer_size; int ret = 0; struct sk_buff *skb = NULL; int noblock; long timeo; caif_assert(cf_sk); ret = sock_error(sk); if (ret) goto err; ret = -EOPNOTSUPP; if (msg->msg_flags&MSG_OOB) goto err; ret = -EOPNOTSUPP; if (msg->msg_namelen) goto err; ret = -EINVAL; if (unlikely(msg->msg_iov->iov_base == NULL)) goto err; noblock = msg->msg_flags & MSG_DONTWAIT; timeo = sock_sndtimeo(sk, noblock); timeo = caif_wait_for_flow_on(container_of(sk, struct caifsock, sk), 1, timeo, &ret); if (ret) goto err; ret = -EPIPE; if (cf_sk->sk.sk_state != CAIF_CONNECTED || sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN)) goto err; /* Error if trying to write more than maximum frame size. */ ret = -EMSGSIZE; if (len > cf_sk->maxframe && cf_sk->sk.sk_protocol != CAIFPROTO_RFM) goto err; buffer_size = len + cf_sk->headroom + cf_sk->tailroom; ret = -ENOMEM; skb = sock_alloc_send_skb(sk, buffer_size, noblock, &ret); if (!skb || skb_tailroom(skb) < buffer_size) goto err; skb_reserve(skb, cf_sk->headroom); ret = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); if (ret) goto err; ret = transmit_skb(skb, cf_sk, noblock, timeo); if (ret < 0) /* skb is already freed */ return ret; return len; err: kfree_skb(skb); return ret; } /* * Copied from unix_stream_sendmsg and adapted to CAIF: * Changed removed permission handling and added waiting for flow on * and other minor adaptations. */ static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); int err, size; struct sk_buff *skb; int sent = 0; long timeo; err = -EOPNOTSUPP; if (unlikely(msg->msg_flags&MSG_OOB)) goto out_err; if (unlikely(msg->msg_namelen)) goto out_err; timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); timeo = caif_wait_for_flow_on(cf_sk, 1, timeo, &err); if (unlikely(sk->sk_shutdown & SEND_SHUTDOWN)) goto pipe_err; while (sent < len) { size = len-sent; if (size > cf_sk->maxframe) size = cf_sk->maxframe; /* If size is more than half of sndbuf, chop up message */ if (size > ((sk->sk_sndbuf >> 1) - 64)) size = (sk->sk_sndbuf >> 1) - 64; if (size > SKB_MAX_ALLOC) size = SKB_MAX_ALLOC; skb = sock_alloc_send_skb(sk, size + cf_sk->headroom + cf_sk->tailroom, msg->msg_flags&MSG_DONTWAIT, &err); if (skb == NULL) goto out_err; skb_reserve(skb, cf_sk->headroom); /* * If you pass two values to the sock_alloc_send_skb * it tries to grab the large buffer with GFP_NOFS * (which can fail easily), and if it fails grab the * fallback size buffer which is under a page and will * succeed. [Alan] */ size = min_t(int, size, skb_tailroom(skb)); err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); if (err) { kfree_skb(skb); goto out_err; } err = transmit_skb(skb, cf_sk, msg->msg_flags&MSG_DONTWAIT, timeo); if (err < 0) /* skb is already freed */ goto pipe_err; sent += size; } return sent; pipe_err: if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL)) send_sig(SIGPIPE, current, 0); err = -EPIPE; out_err: return sent ? : err; } static int setsockopt(struct socket *sock, int lvl, int opt, char __user *ov, unsigned int ol) { struct sock *sk = sock->sk; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); int linksel; if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED) return -ENOPROTOOPT; switch (opt) { case CAIFSO_LINK_SELECT: if (ol < sizeof(int)) return -EINVAL; if (lvl != SOL_CAIF) goto bad_sol; if (copy_from_user(&linksel, ov, sizeof(int))) return -EINVAL; lock_sock(&(cf_sk->sk)); cf_sk->conn_req.link_selector = linksel; release_sock(&cf_sk->sk); return 0; case CAIFSO_REQ_PARAM: if (lvl != SOL_CAIF) goto bad_sol; if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL) return -ENOPROTOOPT; lock_sock(&(cf_sk->sk)); if (ol > sizeof(cf_sk->conn_req.param.data) || copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) { release_sock(&cf_sk->sk); return -EINVAL; } cf_sk->conn_req.param.size = ol; release_sock(&cf_sk->sk); return 0; default: return -ENOPROTOOPT; } return 0; bad_sol: return -ENOPROTOOPT; } /* * caif_connect() - Connect a CAIF Socket * Copied and modified af_irda.c:irda_connect(). * * Note : by consulting "errno", the user space caller may learn the cause * of the failure. Most of them are visible in the function, others may come * from subroutines called and are listed here : * o -EAFNOSUPPORT: bad socket family or type. * o -ESOCKTNOSUPPORT: bad socket type or protocol * o -EINVAL: bad socket address, or CAIF link type * o -ECONNREFUSED: remote end refused the connection. * o -EINPROGRESS: connect request sent but timed out (or non-blocking) * o -EISCONN: already connected. * o -ETIMEDOUT: Connection timed out (send timeout) * o -ENODEV: No link layer to send request * o -ECONNRESET: Received Shutdown indication or lost link layer * o -ENOMEM: Out of memory * * State Strategy: * o sk_state: holds the CAIF_* protocol state, it's updated by * caif_ctrl_cb. * o sock->state: holds the SS_* socket state and is updated by connect and * disconnect. */ static int caif_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); long timeo; int err; int ifindex, headroom, tailroom; unsigned int mtu; struct net_device *dev; lock_sock(sk); err = -EAFNOSUPPORT; if (uaddr->sa_family != AF_CAIF) goto out; switch (sock->state) { case SS_UNCONNECTED: /* Normal case, a fresh connect */ caif_assert(sk->sk_state == CAIF_DISCONNECTED); break; case SS_CONNECTING: switch (sk->sk_state) { case CAIF_CONNECTED: sock->state = SS_CONNECTED; err = -EISCONN; goto out; case CAIF_DISCONNECTED: /* Reconnect allowed */ break; case CAIF_CONNECTING: err = -EALREADY; if (flags & O_NONBLOCK) goto out; goto wait_connect; } break; case SS_CONNECTED: caif_assert(sk->sk_state == CAIF_CONNECTED || sk->sk_state == CAIF_DISCONNECTED); if (sk->sk_shutdown & SHUTDOWN_MASK) { /* Allow re-connect after SHUTDOWN_IND */ caif_disconnect_client(sock_net(sk), &cf_sk->layer); caif_free_client(&cf_sk->layer); break; } /* No reconnect on a seqpacket socket */ err = -EISCONN; goto out; case SS_DISCONNECTING: case SS_FREE: caif_assert(1); /*Should never happen */ break; } sk->sk_state = CAIF_DISCONNECTED; sock->state = SS_UNCONNECTED; sk_stream_kill_queues(&cf_sk->sk); err = -EINVAL; if (addr_len != sizeof(struct sockaddr_caif)) goto out; memcpy(&cf_sk->conn_req.sockaddr, uaddr, sizeof(struct sockaddr_caif)); /* Move to connecting socket, start sending Connect Requests */ sock->state = SS_CONNECTING; sk->sk_state = CAIF_CONNECTING; /* Check priority value comming from socket */ /* if priority value is out of range it will be ajusted */ if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX) cf_sk->conn_req.priority = CAIF_PRIO_MAX; else if (cf_sk->sk.sk_priority < CAIF_PRIO_MIN) cf_sk->conn_req.priority = CAIF_PRIO_MIN; else cf_sk->conn_req.priority = cf_sk->sk.sk_priority; /*ifindex = id of the interface.*/ cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if; cf_sk->layer.receive = caif_sktrecv_cb; err = caif_connect_client(sock_net(sk), &cf_sk->conn_req, &cf_sk->layer, &ifindex, &headroom, &tailroom); if (err < 0) { cf_sk->sk.sk_socket->state = SS_UNCONNECTED; cf_sk->sk.sk_state = CAIF_DISCONNECTED; goto out; } err = -ENODEV; rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(sk), ifindex); if (!dev) { rcu_read_unlock(); goto out; } cf_sk->headroom = LL_RESERVED_SPACE_EXTRA(dev, headroom); mtu = dev->mtu; rcu_read_unlock(); cf_sk->tailroom = tailroom; cf_sk->maxframe = mtu - (headroom + tailroom); if (cf_sk->maxframe < 1) { pr_warn("CAIF Interface MTU too small (%d)\n", dev->mtu); err = -ENODEV; goto out; } err = -EINPROGRESS; wait_connect: if (sk->sk_state != CAIF_CONNECTED && (flags & O_NONBLOCK)) goto out; timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); release_sock(sk); err = -ERESTARTSYS; timeo = wait_event_interruptible_timeout(*sk_sleep(sk), sk->sk_state != CAIF_CONNECTING, timeo); lock_sock(sk); if (timeo < 0) goto out; /* -ERESTARTSYS */ err = -ETIMEDOUT; if (timeo == 0 && sk->sk_state != CAIF_CONNECTED) goto out; if (sk->sk_state != CAIF_CONNECTED) { sock->state = SS_UNCONNECTED; err = sock_error(sk); if (!err) err = -ECONNREFUSED; goto out; } sock->state = SS_CONNECTED; err = 0; out: release_sock(sk); return err; } /* * caif_release() - Disconnect a CAIF Socket * Copied and modified af_irda.c:irda_release(). */ static int caif_release(struct socket *sock) { struct sock *sk = sock->sk; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); if (!sk) return 0; set_tx_flow_off(cf_sk); /* * Ensure that packets are not queued after this point in time. * caif_queue_rcv_skb checks SOCK_DEAD holding the queue lock, * this ensures no packets when sock is dead. */ spin_lock_bh(&sk->sk_receive_queue.lock); sock_set_flag(sk, SOCK_DEAD); spin_unlock_bh(&sk->sk_receive_queue.lock); sock->sk = NULL; WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir)); if (cf_sk->debugfs_socket_dir != NULL) debugfs_remove_recursive(cf_sk->debugfs_socket_dir); lock_sock(&(cf_sk->sk)); sk->sk_state = CAIF_DISCONNECTED; sk->sk_shutdown = SHUTDOWN_MASK; caif_disconnect_client(sock_net(sk), &cf_sk->layer); cf_sk->sk.sk_socket->state = SS_DISCONNECTING; wake_up_interruptible_poll(sk_sleep(sk), POLLERR|POLLHUP); sock_orphan(sk); sk_stream_kill_queues(&cf_sk->sk); release_sock(sk); sock_put(sk); return 0; } /* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */ static unsigned int caif_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; unsigned int mask; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); sock_poll_wait(file, sk_sleep(sk), wait); mask = 0; /* exceptional events? */ if (sk->sk_err) mask |= POLLERR; if (sk->sk_shutdown == SHUTDOWN_MASK) mask |= POLLHUP; if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= POLLRDHUP; /* readable? */ if (!skb_queue_empty(&sk->sk_receive_queue) || (sk->sk_shutdown & RCV_SHUTDOWN)) mask |= POLLIN | POLLRDNORM; /* * we set writable also when the other side has shut down the * connection. This prevents stuck sockets. */ if (sock_writeable(sk) && tx_flow_is_on(cf_sk)) mask |= POLLOUT | POLLWRNORM | POLLWRBAND; return mask; } static const struct proto_ops caif_seqpacket_ops = { .family = PF_CAIF, .owner = THIS_MODULE, .release = caif_release, .bind = sock_no_bind, .connect = caif_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, .poll = caif_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = setsockopt, .getsockopt = sock_no_getsockopt, .sendmsg = caif_seqpkt_sendmsg, .recvmsg = caif_seqpkt_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static const struct proto_ops caif_stream_ops = { .family = PF_CAIF, .owner = THIS_MODULE, .release = caif_release, .bind = sock_no_bind, .connect = caif_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, .poll = caif_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = setsockopt, .getsockopt = sock_no_getsockopt, .sendmsg = caif_stream_sendmsg, .recvmsg = caif_stream_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; /* This function is called when a socket is finally destroyed. */ static void caif_sock_destructor(struct sock *sk) { struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); caif_assert(!atomic_read(&sk->sk_wmem_alloc)); caif_assert(sk_unhashed(sk)); caif_assert(!sk->sk_socket); if (!sock_flag(sk, SOCK_DEAD)) { pr_debug("Attempt to release alive CAIF socket: %p\n", sk); return; } sk_stream_kill_queues(&cf_sk->sk); caif_free_client(&cf_sk->layer); } static int caif_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk = NULL; struct caifsock *cf_sk = NULL; static struct proto prot = {.name = "PF_CAIF", .owner = THIS_MODULE, .obj_size = sizeof(struct caifsock), }; if (!capable(CAP_SYS_ADMIN) && !capable(CAP_NET_ADMIN)) return -EPERM; /* * The sock->type specifies the socket type to use. * The CAIF socket is a packet stream in the sense * that it is packet based. CAIF trusts the reliability * of the link, no resending is implemented. */ if (sock->type == SOCK_SEQPACKET) sock->ops = &caif_seqpacket_ops; else if (sock->type == SOCK_STREAM) sock->ops = &caif_stream_ops; else return -ESOCKTNOSUPPORT; if (protocol < 0 || protocol >= CAIFPROTO_MAX) return -EPROTONOSUPPORT; /* * Set the socket state to unconnected. The socket state * is really not used at all in the net/core or socket.c but the * initialization makes sure that sock->state is not uninitialized. */ sk = sk_alloc(net, PF_CAIF, GFP_KERNEL, &prot); if (!sk) return -ENOMEM; cf_sk = container_of(sk, struct caifsock, sk); /* Store the protocol */ sk->sk_protocol = (unsigned char) protocol; /* Initialize default priority for well-known cases */ switch (protocol) { case CAIFPROTO_AT: sk->sk_priority = TC_PRIO_CONTROL; break; case CAIFPROTO_RFM: sk->sk_priority = TC_PRIO_INTERACTIVE_BULK; break; default: sk->sk_priority = TC_PRIO_BESTEFFORT; } /* * Lock in order to try to stop someone from opening the socket * too early. */ lock_sock(&(cf_sk->sk)); /* Initialize the nozero default sock structure data. */ sock_init_data(sock, sk); sk->sk_destruct = caif_sock_destructor; mutex_init(&cf_sk->readlock); /* single task reading lock */ cf_sk->layer.ctrlcmd = caif_ctrl_cb; cf_sk->sk.sk_socket->state = SS_UNCONNECTED; cf_sk->sk.sk_state = CAIF_DISCONNECTED; set_tx_flow_off(cf_sk); set_rx_flow_on(cf_sk); /* Set default options on configuration */ cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY; cf_sk->conn_req.protocol = protocol; release_sock(&cf_sk->sk); return 0; } static struct net_proto_family caif_family_ops = { .family = PF_CAIF, .create = caif_create, .owner = THIS_MODULE, }; static int __init caif_sktinit_module(void) { int err = sock_register(&caif_family_ops); if (!err) return err; return 0; } static void __exit caif_sktexit_module(void) { sock_unregister(PF_CAIF); } module_init(caif_sktinit_module); module_exit(caif_sktexit_module);
./CrossVul/dataset_final_sorted/CWE-20/c/bad_5845_12
crossvul-cpp_data_good_5845_20
/* * af_llc.c - LLC User Interface SAPs * Description: * Functions in this module are implementation of socket based llc * communications for the Linux operating system. Support of llc class * one and class two is provided via SOCK_DGRAM and SOCK_STREAM * respectively. * * An llc2 connection is (mac + sap), only one llc2 sap connection * is allowed per mac. Though one sap may have multiple mac + sap * connections. * * Copyright (c) 2001 by Jay Schulist <jschlst@samba.org> * 2002-2003 by Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * This program can be redistributed or modified under the terms of the * GNU General Public License as published by the Free Software Foundation. * This program is distributed without any warranty or implied warranty * of merchantability or fitness for a particular purpose. * * See the GNU General Public License for more details. */ #include <linux/compiler.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/rtnetlink.h> #include <linux/init.h> #include <linux/slab.h> #include <net/llc.h> #include <net/llc_sap.h> #include <net/llc_pdu.h> #include <net/llc_conn.h> #include <net/tcp_states.h> /* remember: uninitialized global data is zeroed because its in .bss */ static u16 llc_ui_sap_last_autoport = LLC_SAP_DYN_START; static u16 llc_ui_sap_link_no_max[256]; static struct sockaddr_llc llc_ui_addrnull; static const struct proto_ops llc_ui_ops; static int llc_ui_wait_for_conn(struct sock *sk, long timeout); static int llc_ui_wait_for_disc(struct sock *sk, long timeout); static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout); #if 0 #define dprintk(args...) printk(KERN_DEBUG args) #else #define dprintk(args...) #endif /* Maybe we'll add some more in the future. */ #define LLC_CMSG_PKTINFO 1 /** * llc_ui_next_link_no - return the next unused link number for a sap * @sap: Address of sap to get link number from. * * Return the next unused link number for a given sap. */ static inline u16 llc_ui_next_link_no(int sap) { return llc_ui_sap_link_no_max[sap]++; } /** * llc_proto_type - return eth protocol for ARP header type * @arphrd: ARP header type. * * Given an ARP header type return the corresponding ethernet protocol. */ static inline __be16 llc_proto_type(u16 arphrd) { return htons(ETH_P_802_2); } /** * llc_ui_addr_null - determines if a address structure is null * @addr: Address to test if null. */ static inline u8 llc_ui_addr_null(struct sockaddr_llc *addr) { return !memcmp(addr, &llc_ui_addrnull, sizeof(*addr)); } /** * llc_ui_header_len - return length of llc header based on operation * @sk: Socket which contains a valid llc socket type. * @addr: Complete sockaddr_llc structure received from the user. * * Provide the length of the llc header depending on what kind of * operation the user would like to perform and the type of socket. * Returns the correct llc header length. */ static inline u8 llc_ui_header_len(struct sock *sk, struct sockaddr_llc *addr) { u8 rc = LLC_PDU_LEN_U; if (addr->sllc_test || addr->sllc_xid) rc = LLC_PDU_LEN_U; else if (sk->sk_type == SOCK_STREAM) rc = LLC_PDU_LEN_I; return rc; } /** * llc_ui_send_data - send data via reliable llc2 connection * @sk: Connection the socket is using. * @skb: Data the user wishes to send. * @noblock: can we block waiting for data? * * Send data via reliable llc2 connection. * Returns 0 upon success, non-zero if action did not succeed. */ static int llc_ui_send_data(struct sock* sk, struct sk_buff *skb, int noblock) { struct llc_sock* llc = llc_sk(sk); int rc = 0; if (unlikely(llc_data_accept_state(llc->state) || llc->remote_busy_flag || llc->p_flag)) { long timeout = sock_sndtimeo(sk, noblock); rc = llc_ui_wait_for_busy_core(sk, timeout); } if (unlikely(!rc)) rc = llc_build_and_send_pkt(sk, skb); return rc; } static void llc_ui_sk_init(struct socket *sock, struct sock *sk) { sock_graft(sk, sock); sk->sk_type = sock->type; sock->ops = &llc_ui_ops; } static struct proto llc_proto = { .name = "LLC", .owner = THIS_MODULE, .obj_size = sizeof(struct llc_sock), .slab_flags = SLAB_DESTROY_BY_RCU, }; /** * llc_ui_create - alloc and init a new llc_ui socket * @net: network namespace (must be default network) * @sock: Socket to initialize and attach allocated sk to. * @protocol: Unused. * @kern: on behalf of kernel or userspace * * Allocate and initialize a new llc_ui socket, validate the user wants a * socket type we have available. * Returns 0 upon success, negative upon failure. */ static int llc_ui_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; int rc = -ESOCKTNOSUPPORT; if (!ns_capable(net->user_ns, CAP_NET_RAW)) return -EPERM; if (!net_eq(net, &init_net)) return -EAFNOSUPPORT; if (likely(sock->type == SOCK_DGRAM || sock->type == SOCK_STREAM)) { rc = -ENOMEM; sk = llc_sk_alloc(net, PF_LLC, GFP_KERNEL, &llc_proto); if (sk) { rc = 0; llc_ui_sk_init(sock, sk); } } return rc; } /** * llc_ui_release - shutdown socket * @sock: Socket to release. * * Shutdown and deallocate an existing socket. */ static int llc_ui_release(struct socket *sock) { struct sock *sk = sock->sk; struct llc_sock *llc; if (unlikely(sk == NULL)) goto out; sock_hold(sk); lock_sock(sk); llc = llc_sk(sk); dprintk("%s: closing local(%02X) remote(%02X)\n", __func__, llc->laddr.lsap, llc->daddr.lsap); if (!llc_send_disc(sk)) llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo); if (!sock_flag(sk, SOCK_ZAPPED)) llc_sap_remove_socket(llc->sap, sk); release_sock(sk); if (llc->dev) dev_put(llc->dev); sock_put(sk); llc_sk_free(sk); out: return 0; } /** * llc_ui_autoport - provide dynamically allocate SAP number * * Provide the caller with a dynamically allocated SAP number according * to the rules that are set in this function. Returns: 0, upon failure, * SAP number otherwise. */ static int llc_ui_autoport(void) { struct llc_sap *sap; int i, tries = 0; while (tries < LLC_SAP_DYN_TRIES) { for (i = llc_ui_sap_last_autoport; i < LLC_SAP_DYN_STOP; i += 2) { sap = llc_sap_find(i); if (!sap) { llc_ui_sap_last_autoport = i + 2; goto out; } llc_sap_put(sap); } llc_ui_sap_last_autoport = LLC_SAP_DYN_START; tries++; } i = 0; out: return i; } /** * llc_ui_autobind - automatically bind a socket to a sap * @sock: socket to bind * @addr: address to connect to * * Used by llc_ui_connect and llc_ui_sendmsg when the user hasn't * specifically used llc_ui_bind to bind to an specific address/sap * * Returns: 0 upon success, negative otherwise. */ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr) { struct sock *sk = sock->sk; struct llc_sock *llc = llc_sk(sk); struct llc_sap *sap; int rc = -EINVAL; if (!sock_flag(sk, SOCK_ZAPPED)) goto out; rc = -ENODEV; if (sk->sk_bound_dev_if) { llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if); if (llc->dev && addr->sllc_arphrd != llc->dev->type) { dev_put(llc->dev); llc->dev = NULL; } } else llc->dev = dev_getfirstbyhwtype(&init_net, addr->sllc_arphrd); if (!llc->dev) goto out; rc = -EUSERS; llc->laddr.lsap = llc_ui_autoport(); if (!llc->laddr.lsap) goto out; rc = -EBUSY; /* some other network layer is using the sap */ sap = llc_sap_open(llc->laddr.lsap, NULL); if (!sap) goto out; memcpy(llc->laddr.mac, llc->dev->dev_addr, IFHWADDRLEN); memcpy(&llc->addr, addr, sizeof(llc->addr)); /* assign new connection to its SAP */ llc_sap_add_socket(sap, sk); sock_reset_flag(sk, SOCK_ZAPPED); rc = 0; out: return rc; } /** * llc_ui_bind - bind a socket to a specific address. * @sock: Socket to bind an address to. * @uaddr: Address the user wants the socket bound to. * @addrlen: Length of the uaddr structure. * * Bind a socket to a specific address. For llc a user is able to bind to * a specific sap only or mac + sap. * If the user desires to bind to a specific mac + sap, it is possible to * have multiple sap connections via multiple macs. * Bind and autobind for that matter must enforce the correct sap usage * otherwise all hell will break loose. * Returns: 0 upon success, negative otherwise. */ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) { struct sockaddr_llc *addr = (struct sockaddr_llc *)uaddr; struct sock *sk = sock->sk; struct llc_sock *llc = llc_sk(sk); struct llc_sap *sap; int rc = -EINVAL; dprintk("%s: binding %02X\n", __func__, addr->sllc_sap); if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr))) goto out; rc = -EAFNOSUPPORT; if (unlikely(addr->sllc_family != AF_LLC)) goto out; rc = -ENODEV; rcu_read_lock(); if (sk->sk_bound_dev_if) { llc->dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if); if (llc->dev) { if (!addr->sllc_arphrd) addr->sllc_arphrd = llc->dev->type; if (is_zero_ether_addr(addr->sllc_mac)) memcpy(addr->sllc_mac, llc->dev->dev_addr, IFHWADDRLEN); if (addr->sllc_arphrd != llc->dev->type || !ether_addr_equal(addr->sllc_mac, llc->dev->dev_addr)) { rc = -EINVAL; llc->dev = NULL; } } } else llc->dev = dev_getbyhwaddr_rcu(&init_net, addr->sllc_arphrd, addr->sllc_mac); if (llc->dev) dev_hold(llc->dev); rcu_read_unlock(); if (!llc->dev) goto out; if (!addr->sllc_sap) { rc = -EUSERS; addr->sllc_sap = llc_ui_autoport(); if (!addr->sllc_sap) goto out; } sap = llc_sap_find(addr->sllc_sap); if (!sap) { sap = llc_sap_open(addr->sllc_sap, NULL); rc = -EBUSY; /* some other network layer is using the sap */ if (!sap) goto out; } else { struct llc_addr laddr, daddr; struct sock *ask; memset(&laddr, 0, sizeof(laddr)); memset(&daddr, 0, sizeof(daddr)); /* * FIXME: check if the address is multicast, * only SOCK_DGRAM can do this. */ memcpy(laddr.mac, addr->sllc_mac, IFHWADDRLEN); laddr.lsap = addr->sllc_sap; rc = -EADDRINUSE; /* mac + sap clash. */ ask = llc_lookup_established(sap, &daddr, &laddr); if (ask) { sock_put(ask); goto out_put; } } llc->laddr.lsap = addr->sllc_sap; memcpy(llc->laddr.mac, addr->sllc_mac, IFHWADDRLEN); memcpy(&llc->addr, addr, sizeof(llc->addr)); /* assign new connection to its SAP */ llc_sap_add_socket(sap, sk); sock_reset_flag(sk, SOCK_ZAPPED); rc = 0; out_put: llc_sap_put(sap); out: return rc; } /** * llc_ui_shutdown - shutdown a connect llc2 socket. * @sock: Socket to shutdown. * @how: What part of the socket to shutdown. * * Shutdown a connected llc2 socket. Currently this function only supports * shutting down both sends and receives (2), we could probably make this * function such that a user can shutdown only half the connection but not * right now. * Returns: 0 upon success, negative otherwise. */ static int llc_ui_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; int rc = -ENOTCONN; lock_sock(sk); if (unlikely(sk->sk_state != TCP_ESTABLISHED)) goto out; rc = -EINVAL; if (how != 2) goto out; rc = llc_send_disc(sk); if (!rc) rc = llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo); /* Wake up anyone sleeping in poll */ sk->sk_state_change(sk); out: release_sock(sk); return rc; } /** * llc_ui_connect - Connect to a remote llc2 mac + sap. * @sock: Socket which will be connected to the remote destination. * @uaddr: Remote and possibly the local address of the new connection. * @addrlen: Size of uaddr structure. * @flags: Operational flags specified by the user. * * Connect to a remote llc2 mac + sap. The caller must specify the * destination mac and address to connect to. If the user hasn't previously * called bind(2) with a smac the address of the first interface of the * specified arp type will be used. * This function will autobind if user did not previously call bind. * Returns: 0 upon success, negative otherwise. */ static int llc_ui_connect(struct socket *sock, struct sockaddr *uaddr, int addrlen, int flags) { struct sock *sk = sock->sk; struct llc_sock *llc = llc_sk(sk); struct sockaddr_llc *addr = (struct sockaddr_llc *)uaddr; int rc = -EINVAL; lock_sock(sk); if (unlikely(addrlen != sizeof(*addr))) goto out; rc = -EAFNOSUPPORT; if (unlikely(addr->sllc_family != AF_LLC)) goto out; if (unlikely(sk->sk_type != SOCK_STREAM)) goto out; rc = -EALREADY; if (unlikely(sock->state == SS_CONNECTING)) goto out; /* bind connection to sap if user hasn't done it. */ if (sock_flag(sk, SOCK_ZAPPED)) { /* bind to sap with null dev, exclusive */ rc = llc_ui_autobind(sock, addr); if (rc) goto out; } llc->daddr.lsap = addr->sllc_sap; memcpy(llc->daddr.mac, addr->sllc_mac, IFHWADDRLEN); sock->state = SS_CONNECTING; sk->sk_state = TCP_SYN_SENT; llc->link = llc_ui_next_link_no(llc->sap->laddr.lsap); rc = llc_establish_connection(sk, llc->dev->dev_addr, addr->sllc_mac, addr->sllc_sap); if (rc) { dprintk("%s: llc_ui_send_conn failed :-(\n", __func__); sock->state = SS_UNCONNECTED; sk->sk_state = TCP_CLOSE; goto out; } if (sk->sk_state == TCP_SYN_SENT) { const long timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); if (!timeo || !llc_ui_wait_for_conn(sk, timeo)) goto out; rc = sock_intr_errno(timeo); if (signal_pending(current)) goto out; } if (sk->sk_state == TCP_CLOSE) goto sock_error; sock->state = SS_CONNECTED; rc = 0; out: release_sock(sk); return rc; sock_error: rc = sock_error(sk) ? : -ECONNABORTED; sock->state = SS_UNCONNECTED; goto out; } /** * llc_ui_listen - allow a normal socket to accept incoming connections * @sock: Socket to allow incoming connections on. * @backlog: Number of connections to queue. * * Allow a normal socket to accept incoming connections. * Returns 0 upon success, negative otherwise. */ static int llc_ui_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; int rc = -EINVAL; lock_sock(sk); if (unlikely(sock->state != SS_UNCONNECTED)) goto out; rc = -EOPNOTSUPP; if (unlikely(sk->sk_type != SOCK_STREAM)) goto out; rc = -EAGAIN; if (sock_flag(sk, SOCK_ZAPPED)) goto out; rc = 0; if (!(unsigned int)backlog) /* BSDism */ backlog = 1; sk->sk_max_ack_backlog = backlog; if (sk->sk_state != TCP_LISTEN) { sk->sk_ack_backlog = 0; sk->sk_state = TCP_LISTEN; } sk->sk_socket->flags |= __SO_ACCEPTCON; out: release_sock(sk); return rc; } static int llc_ui_wait_for_disc(struct sock *sk, long timeout) { DEFINE_WAIT(wait); int rc = 0; while (1) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (sk_wait_event(sk, &timeout, sk->sk_state == TCP_CLOSE)) break; rc = -ERESTARTSYS; if (signal_pending(current)) break; rc = -EAGAIN; if (!timeout) break; rc = 0; } finish_wait(sk_sleep(sk), &wait); return rc; } static int llc_ui_wait_for_conn(struct sock *sk, long timeout) { DEFINE_WAIT(wait); while (1) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (sk_wait_event(sk, &timeout, sk->sk_state != TCP_SYN_SENT)) break; if (signal_pending(current) || !timeout) break; } finish_wait(sk_sleep(sk), &wait); return timeout; } static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout) { DEFINE_WAIT(wait); struct llc_sock *llc = llc_sk(sk); int rc; while (1) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); rc = 0; if (sk_wait_event(sk, &timeout, (sk->sk_shutdown & RCV_SHUTDOWN) || (!llc_data_accept_state(llc->state) && !llc->remote_busy_flag && !llc->p_flag))) break; rc = -ERESTARTSYS; if (signal_pending(current)) break; rc = -EAGAIN; if (!timeout) break; } finish_wait(sk_sleep(sk), &wait); return rc; } static int llc_wait_data(struct sock *sk, long timeo) { int rc; while (1) { /* * POSIX 1003.1g mandates this order. */ rc = sock_error(sk); if (rc) break; rc = 0; if (sk->sk_shutdown & RCV_SHUTDOWN) break; rc = -EAGAIN; if (!timeo) break; rc = sock_intr_errno(timeo); if (signal_pending(current)) break; rc = 0; if (sk_wait_data(sk, &timeo)) break; } return rc; } static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(skb->sk); if (llc->cmsg_flags & LLC_CMSG_PKTINFO) { struct llc_pktinfo info; info.lpi_ifindex = llc_sk(skb->sk)->dev->ifindex; llc_pdu_decode_dsap(skb, &info.lpi_sap); llc_pdu_decode_da(skb, info.lpi_mac); put_cmsg(msg, SOL_LLC, LLC_OPT_PKTINFO, sizeof(info), &info); } } /** * llc_ui_accept - accept a new incoming connection. * @sock: Socket which connections arrive on. * @newsock: Socket to move incoming connection to. * @flags: User specified operational flags. * * Accept a new incoming connection. * Returns 0 upon success, negative otherwise. */ static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags) { struct sock *sk = sock->sk, *newsk; struct llc_sock *llc, *newllc; struct sk_buff *skb; int rc = -EOPNOTSUPP; dprintk("%s: accepting on %02X\n", __func__, llc_sk(sk)->laddr.lsap); lock_sock(sk); if (unlikely(sk->sk_type != SOCK_STREAM)) goto out; rc = -EINVAL; if (unlikely(sock->state != SS_UNCONNECTED || sk->sk_state != TCP_LISTEN)) goto out; /* wait for a connection to arrive. */ if (skb_queue_empty(&sk->sk_receive_queue)) { rc = llc_wait_data(sk, sk->sk_rcvtimeo); if (rc) goto out; } dprintk("%s: got a new connection on %02X\n", __func__, llc_sk(sk)->laddr.lsap); skb = skb_dequeue(&sk->sk_receive_queue); rc = -EINVAL; if (!skb->sk) goto frees; rc = 0; newsk = skb->sk; /* attach connection to a new socket. */ llc_ui_sk_init(newsock, newsk); sock_reset_flag(newsk, SOCK_ZAPPED); newsk->sk_state = TCP_ESTABLISHED; newsock->state = SS_CONNECTED; llc = llc_sk(sk); newllc = llc_sk(newsk); memcpy(&newllc->addr, &llc->addr, sizeof(newllc->addr)); newllc->link = llc_ui_next_link_no(newllc->laddr.lsap); /* put original socket back into a clean listen state. */ sk->sk_state = TCP_LISTEN; sk->sk_ack_backlog--; dprintk("%s: ok success on %02X, client on %02X\n", __func__, llc_sk(sk)->addr.sllc_sap, newllc->daddr.lsap); frees: kfree_skb(skb); out: release_sock(sk); return rc; } /** * llc_ui_recvmsg - copy received data to the socket user. * @sock: Socket to copy data from. * @msg: Various user space related information. * @len: Size of user buffer. * @flags: User specified flags. * * Copy received data to the socket user. * Returns non-negative upon success, negative otherwise. */ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sockaddr_llc *uaddr = (struct sockaddr_llc *)msg->msg_name; const int nonblock = flags & MSG_DONTWAIT; struct sk_buff *skb = NULL; struct sock *sk = sock->sk; struct llc_sock *llc = llc_sk(sk); unsigned long cpu_flags; size_t copied = 0; u32 peek_seq = 0; u32 *seq; unsigned long used; int target; /* Read at least this many bytes */ long timeo; lock_sock(sk); copied = -ENOTCONN; if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN)) goto out; timeo = sock_rcvtimeo(sk, nonblock); seq = &llc->copied_seq; if (flags & MSG_PEEK) { peek_seq = llc->copied_seq; seq = &peek_seq; } target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); copied = 0; do { u32 offset; /* * We need to check signals first, to get correct SIGURG * handling. FIXME: Need to check this doesn't impact 1003.1g * and move it down to the bottom of the loop */ if (signal_pending(current)) { if (copied) break; copied = timeo ? sock_intr_errno(timeo) : -EAGAIN; break; } /* Next get a buffer. */ skb = skb_peek(&sk->sk_receive_queue); if (skb) { offset = *seq; goto found_ok_skb; } /* Well, if we have backlog, try to process it now yet. */ if (copied >= target && !sk->sk_backlog.tail) break; if (copied) { if (sk->sk_err || sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN) || !timeo || (flags & MSG_PEEK)) break; } else { if (sock_flag(sk, SOCK_DONE)) break; if (sk->sk_err) { copied = sock_error(sk); break; } if (sk->sk_shutdown & RCV_SHUTDOWN) break; if (sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_CLOSE) { if (!sock_flag(sk, SOCK_DONE)) { /* * This occurs when user tries to read * from never connected socket. */ copied = -ENOTCONN; break; } break; } if (!timeo) { copied = -EAGAIN; break; } } if (copied >= target) { /* Do not sleep, just process backlog. */ release_sock(sk); lock_sock(sk); } else sk_wait_data(sk, &timeo); if ((flags & MSG_PEEK) && peek_seq != llc->copied_seq) { net_dbg_ratelimited("LLC(%s:%d): Application bug, race in MSG_PEEK\n", current->comm, task_pid_nr(current)); peek_seq = llc->copied_seq; } continue; found_ok_skb: /* Ok so how much can we use? */ used = skb->len - offset; if (len < used) used = len; if (!(flags & MSG_TRUNC)) { int rc = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, used); if (rc) { /* Exception. Bailout! */ if (!copied) copied = -EFAULT; break; } } *seq += used; copied += used; len -= used; /* For non stream protcols we get one packet per recvmsg call */ if (sk->sk_type != SOCK_STREAM) goto copy_uaddr; if (!(flags & MSG_PEEK)) { spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); sk_eat_skb(sk, skb, false); spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); *seq = 0; } /* Partial read */ if (used + offset < skb->len) continue; } while (len > 0); out: release_sock(sk); return copied; copy_uaddr: if (uaddr != NULL && skb != NULL) { memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr)); msg->msg_namelen = sizeof(*uaddr); } if (llc_sk(sk)->cmsg_flags) llc_cmsg_rcv(msg, skb); if (!(flags & MSG_PEEK)) { spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); sk_eat_skb(sk, skb, false); spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); *seq = 0; } goto out; } /** * llc_ui_sendmsg - Transmit data provided by the socket user. * @sock: Socket to transmit data from. * @msg: Various user related information. * @len: Length of data to transmit. * * Transmit data provided by the socket user. * Returns non-negative upon success, negative otherwise. */ static int llc_ui_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct llc_sock *llc = llc_sk(sk); struct sockaddr_llc *addr = (struct sockaddr_llc *)msg->msg_name; int flags = msg->msg_flags; int noblock = flags & MSG_DONTWAIT; struct sk_buff *skb; size_t size = 0; int rc = -EINVAL, copied = 0, hdrlen; dprintk("%s: sending from %02X to %02X\n", __func__, llc->laddr.lsap, llc->daddr.lsap); lock_sock(sk); if (addr) { if (msg->msg_namelen < sizeof(*addr)) goto release; } else { if (llc_ui_addr_null(&llc->addr)) goto release; addr = &llc->addr; } /* must bind connection to sap if user hasn't done it. */ if (sock_flag(sk, SOCK_ZAPPED)) { /* bind to sap with null dev, exclusive. */ rc = llc_ui_autobind(sock, addr); if (rc) goto release; } hdrlen = llc->dev->hard_header_len + llc_ui_header_len(sk, addr); size = hdrlen + len; if (size > llc->dev->mtu) size = llc->dev->mtu; copied = size - hdrlen; release_sock(sk); skb = sock_alloc_send_skb(sk, size, noblock, &rc); lock_sock(sk); if (!skb) goto release; skb->dev = llc->dev; skb->protocol = llc_proto_type(addr->sllc_arphrd); skb_reserve(skb, hdrlen); rc = memcpy_fromiovec(skb_put(skb, copied), msg->msg_iov, copied); if (rc) goto out; if (sk->sk_type == SOCK_DGRAM || addr->sllc_ua) { llc_build_and_send_ui_pkt(llc->sap, skb, addr->sllc_mac, addr->sllc_sap); goto out; } if (addr->sllc_test) { llc_build_and_send_test_pkt(llc->sap, skb, addr->sllc_mac, addr->sllc_sap); goto out; } if (addr->sllc_xid) { llc_build_and_send_xid_pkt(llc->sap, skb, addr->sllc_mac, addr->sllc_sap); goto out; } rc = -ENOPROTOOPT; if (!(sk->sk_type == SOCK_STREAM && !addr->sllc_ua)) goto out; rc = llc_ui_send_data(sk, skb, noblock); out: if (rc) { kfree_skb(skb); release: dprintk("%s: failed sending from %02X to %02X: %d\n", __func__, llc->laddr.lsap, llc->daddr.lsap, rc); } release_sock(sk); return rc ? : copied; } /** * llc_ui_getname - return the address info of a socket * @sock: Socket to get address of. * @uaddr: Address structure to return information. * @uaddrlen: Length of address structure. * @peer: Does user want local or remote address information. * * Return the address information of a socket. */ static int llc_ui_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddrlen, int peer) { struct sockaddr_llc sllc; struct sock *sk = sock->sk; struct llc_sock *llc = llc_sk(sk); int rc = -EBADF; memset(&sllc, 0, sizeof(sllc)); lock_sock(sk); if (sock_flag(sk, SOCK_ZAPPED)) goto out; *uaddrlen = sizeof(sllc); if (peer) { rc = -ENOTCONN; if (sk->sk_state != TCP_ESTABLISHED) goto out; if(llc->dev) sllc.sllc_arphrd = llc->dev->type; sllc.sllc_sap = llc->daddr.lsap; memcpy(&sllc.sllc_mac, &llc->daddr.mac, IFHWADDRLEN); } else { rc = -EINVAL; if (!llc->sap) goto out; sllc.sllc_sap = llc->sap->laddr.lsap; if (llc->dev) { sllc.sllc_arphrd = llc->dev->type; memcpy(&sllc.sllc_mac, llc->dev->dev_addr, IFHWADDRLEN); } } rc = 0; sllc.sllc_family = AF_LLC; memcpy(uaddr, &sllc, sizeof(sllc)); out: release_sock(sk); return rc; } /** * llc_ui_ioctl - io controls for PF_LLC * @sock: Socket to get/set info * @cmd: command * @arg: optional argument for cmd * * get/set info on llc sockets */ static int llc_ui_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { return -ENOIOCTLCMD; } /** * llc_ui_setsockopt - set various connection specific parameters. * @sock: Socket to set options on. * @level: Socket level user is requesting operations on. * @optname: Operation name. * @optval: User provided operation data. * @optlen: Length of optval. * * Set various connection specific parameters. */ static int llc_ui_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct llc_sock *llc = llc_sk(sk); unsigned int opt; int rc = -EINVAL; lock_sock(sk); if (unlikely(level != SOL_LLC || optlen != sizeof(int))) goto out; rc = get_user(opt, (int __user *)optval); if (rc) goto out; rc = -EINVAL; switch (optname) { case LLC_OPT_RETRY: if (opt > LLC_OPT_MAX_RETRY) goto out; llc->n2 = opt; break; case LLC_OPT_SIZE: if (opt > LLC_OPT_MAX_SIZE) goto out; llc->n1 = opt; break; case LLC_OPT_ACK_TMR_EXP: if (opt > LLC_OPT_MAX_ACK_TMR_EXP) goto out; llc->ack_timer.expire = opt * HZ; break; case LLC_OPT_P_TMR_EXP: if (opt > LLC_OPT_MAX_P_TMR_EXP) goto out; llc->pf_cycle_timer.expire = opt * HZ; break; case LLC_OPT_REJ_TMR_EXP: if (opt > LLC_OPT_MAX_REJ_TMR_EXP) goto out; llc->rej_sent_timer.expire = opt * HZ; break; case LLC_OPT_BUSY_TMR_EXP: if (opt > LLC_OPT_MAX_BUSY_TMR_EXP) goto out; llc->busy_state_timer.expire = opt * HZ; break; case LLC_OPT_TX_WIN: if (opt > LLC_OPT_MAX_WIN) goto out; llc->k = opt; break; case LLC_OPT_RX_WIN: if (opt > LLC_OPT_MAX_WIN) goto out; llc->rw = opt; break; case LLC_OPT_PKTINFO: if (opt) llc->cmsg_flags |= LLC_CMSG_PKTINFO; else llc->cmsg_flags &= ~LLC_CMSG_PKTINFO; break; default: rc = -ENOPROTOOPT; goto out; } rc = 0; out: release_sock(sk); return rc; } /** * llc_ui_getsockopt - get connection specific socket info * @sock: Socket to get information from. * @level: Socket level user is requesting operations on. * @optname: Operation name. * @optval: Variable to return operation data in. * @optlen: Length of optval. * * Get connection specific socket information. */ static int llc_ui_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct llc_sock *llc = llc_sk(sk); int val = 0, len = 0, rc = -EINVAL; lock_sock(sk); if (unlikely(level != SOL_LLC)) goto out; rc = get_user(len, optlen); if (rc) goto out; rc = -EINVAL; if (len != sizeof(int)) goto out; switch (optname) { case LLC_OPT_RETRY: val = llc->n2; break; case LLC_OPT_SIZE: val = llc->n1; break; case LLC_OPT_ACK_TMR_EXP: val = llc->ack_timer.expire / HZ; break; case LLC_OPT_P_TMR_EXP: val = llc->pf_cycle_timer.expire / HZ; break; case LLC_OPT_REJ_TMR_EXP: val = llc->rej_sent_timer.expire / HZ; break; case LLC_OPT_BUSY_TMR_EXP: val = llc->busy_state_timer.expire / HZ; break; case LLC_OPT_TX_WIN: val = llc->k; break; case LLC_OPT_RX_WIN: val = llc->rw; break; case LLC_OPT_PKTINFO: val = (llc->cmsg_flags & LLC_CMSG_PKTINFO) != 0; break; default: rc = -ENOPROTOOPT; goto out; } rc = 0; if (put_user(len, optlen) || copy_to_user(optval, &val, len)) rc = -EFAULT; out: release_sock(sk); return rc; } static const struct net_proto_family llc_ui_family_ops = { .family = PF_LLC, .create = llc_ui_create, .owner = THIS_MODULE, }; static const struct proto_ops llc_ui_ops = { .family = PF_LLC, .owner = THIS_MODULE, .release = llc_ui_release, .bind = llc_ui_bind, .connect = llc_ui_connect, .socketpair = sock_no_socketpair, .accept = llc_ui_accept, .getname = llc_ui_getname, .poll = datagram_poll, .ioctl = llc_ui_ioctl, .listen = llc_ui_listen, .shutdown = llc_ui_shutdown, .setsockopt = llc_ui_setsockopt, .getsockopt = llc_ui_getsockopt, .sendmsg = llc_ui_sendmsg, .recvmsg = llc_ui_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static const char llc_proc_err_msg[] __initconst = KERN_CRIT "LLC: Unable to register the proc_fs entries\n"; static const char llc_sysctl_err_msg[] __initconst = KERN_CRIT "LLC: Unable to register the sysctl entries\n"; static const char llc_sock_err_msg[] __initconst = KERN_CRIT "LLC: Unable to register the network family\n"; static int __init llc2_init(void) { int rc = proto_register(&llc_proto, 0); if (rc != 0) goto out; llc_build_offset_table(); llc_station_init(); llc_ui_sap_last_autoport = LLC_SAP_DYN_START; rc = llc_proc_init(); if (rc != 0) { printk(llc_proc_err_msg); goto out_station; } rc = llc_sysctl_init(); if (rc) { printk(llc_sysctl_err_msg); goto out_proc; } rc = sock_register(&llc_ui_family_ops); if (rc) { printk(llc_sock_err_msg); goto out_sysctl; } llc_add_pack(LLC_DEST_SAP, llc_sap_handler); llc_add_pack(LLC_DEST_CONN, llc_conn_handler); out: return rc; out_sysctl: llc_sysctl_exit(); out_proc: llc_proc_exit(); out_station: llc_station_exit(); proto_unregister(&llc_proto); goto out; } static void __exit llc2_exit(void) { llc_station_exit(); llc_remove_pack(LLC_DEST_SAP); llc_remove_pack(LLC_DEST_CONN); sock_unregister(PF_LLC); llc_proc_exit(); llc_sysctl_exit(); proto_unregister(&llc_proto); } module_init(llc2_init); module_exit(llc2_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Procom 1997, Jay Schullist 2001, Arnaldo C. Melo 2001-2003"); MODULE_DESCRIPTION("IEEE 802.2 PF_LLC support"); MODULE_ALIAS_NETPROTO(PF_LLC);
./CrossVul/dataset_final_sorted/CWE-20/c/good_5845_20
crossvul-cpp_data_bad_1821_1
/* * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC * * PowerPC version * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * Copyright (C) 2001 IBM * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) * * Derived from "arch/i386/kernel/signal.c" * Copyright (C) 1991, 1992 Linus Torvalds * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/elf.h> #include <linux/ptrace.h> #include <linux/ratelimit.h> #ifdef CONFIG_PPC64 #include <linux/syscalls.h> #include <linux/compat.h> #else #include <linux/wait.h> #include <linux/unistd.h> #include <linux/stddef.h> #include <linux/tty.h> #include <linux/binfmts.h> #endif #include <asm/uaccess.h> #include <asm/cacheflush.h> #include <asm/syscalls.h> #include <asm/sigcontext.h> #include <asm/vdso.h> #include <asm/switch_to.h> #include <asm/tm.h> #ifdef CONFIG_PPC64 #include "ppc32.h" #include <asm/unistd.h> #else #include <asm/ucontext.h> #include <asm/pgtable.h> #endif #include "signal.h" #ifdef CONFIG_PPC64 #define sys_rt_sigreturn compat_sys_rt_sigreturn #define sys_swapcontext compat_sys_swapcontext #define sys_sigreturn compat_sys_sigreturn #define old_sigaction old_sigaction32 #define sigcontext sigcontext32 #define mcontext mcontext32 #define ucontext ucontext32 #define __save_altstack __compat_save_altstack /* * Userspace code may pass a ucontext which doesn't include VSX added * at the end. We need to check for this case. */ #define UCONTEXTSIZEWITHOUTVSX \ (sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32)) /* * Returning 0 means we return to userspace via * ret_from_except and thus restore all user * registers from *regs. This is what we need * to do when a signal has been delivered. */ #define GP_REGS_SIZE min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32)) #undef __SIGNAL_FRAMESIZE #define __SIGNAL_FRAMESIZE __SIGNAL_FRAMESIZE32 #undef ELF_NVRREG #define ELF_NVRREG ELF_NVRREG32 /* * Functions for flipping sigsets (thanks to brain dead generic * implementation that makes things simple for little endian only) */ static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set) { compat_sigset_t cset; switch (_NSIG_WORDS) { case 4: cset.sig[6] = set->sig[3] & 0xffffffffull; cset.sig[7] = set->sig[3] >> 32; case 3: cset.sig[4] = set->sig[2] & 0xffffffffull; cset.sig[5] = set->sig[2] >> 32; case 2: cset.sig[2] = set->sig[1] & 0xffffffffull; cset.sig[3] = set->sig[1] >> 32; case 1: cset.sig[0] = set->sig[0] & 0xffffffffull; cset.sig[1] = set->sig[0] >> 32; } return copy_to_user(uset, &cset, sizeof(*uset)); } static inline int get_sigset_t(sigset_t *set, const compat_sigset_t __user *uset) { compat_sigset_t s32; if (copy_from_user(&s32, uset, sizeof(*uset))) return -EFAULT; /* * Swap the 2 words of the 64-bit sigset_t (they are stored * in the "wrong" endian in 32-bit user storage). */ switch (_NSIG_WORDS) { case 4: set->sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32); case 3: set->sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32); case 2: set->sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32); case 1: set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32); } return 0; } #define to_user_ptr(p) ptr_to_compat(p) #define from_user_ptr(p) compat_ptr(p) static inline int save_general_regs(struct pt_regs *regs, struct mcontext __user *frame) { elf_greg_t64 *gregs = (elf_greg_t64 *)regs; int i; WARN_ON(!FULL_REGS(regs)); for (i = 0; i <= PT_RESULT; i ++) { if (i == 14 && !FULL_REGS(regs)) i = 32; if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i])) return -EFAULT; } return 0; } static inline int restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr) { elf_greg_t64 *gregs = (elf_greg_t64 *)regs; int i; for (i = 0; i <= PT_RESULT; i++) { if ((i == PT_MSR) || (i == PT_SOFTE)) continue; if (__get_user(gregs[i], &sr->mc_gregs[i])) return -EFAULT; } return 0; } #else /* CONFIG_PPC64 */ #define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs)) static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set) { return copy_to_user(uset, set, sizeof(*uset)); } static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset) { return copy_from_user(set, uset, sizeof(*uset)); } #define to_user_ptr(p) ((unsigned long)(p)) #define from_user_ptr(p) ((void __user *)(p)) static inline int save_general_regs(struct pt_regs *regs, struct mcontext __user *frame) { WARN_ON(!FULL_REGS(regs)); return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE); } static inline int restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr) { /* copy up to but not including MSR */ if (__copy_from_user(regs, &sr->mc_gregs, PT_MSR * sizeof(elf_greg_t))) return -EFAULT; /* copy from orig_r3 (the word after the MSR) up to the end */ if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3], GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t))) return -EFAULT; return 0; } #endif /* * When we have signals to deliver, we set up on the * user stack, going down from the original stack pointer: * an ABI gap of 56 words * an mcontext struct * a sigcontext struct * a gap of __SIGNAL_FRAMESIZE bytes * * Each of these things must be a multiple of 16 bytes in size. The following * structure represent all of this except the __SIGNAL_FRAMESIZE gap * */ struct sigframe { struct sigcontext sctx; /* the sigcontext */ struct mcontext mctx; /* all the register values */ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM struct sigcontext sctx_transact; struct mcontext mctx_transact; #endif /* * Programs using the rs6000/xcoff abi can save up to 19 gp * regs and 18 fp regs below sp before decrementing it. */ int abigap[56]; }; /* We use the mc_pad field for the signal return trampoline. */ #define tramp mc_pad /* * When we have rt signals to deliver, we set up on the * user stack, going down from the original stack pointer: * one rt_sigframe struct (siginfo + ucontext + ABI gap) * a gap of __SIGNAL_FRAMESIZE+16 bytes * (the +16 is to get the siginfo and ucontext in the same * positions as in older kernels). * * Each of these things must be a multiple of 16 bytes in size. * */ struct rt_sigframe { #ifdef CONFIG_PPC64 compat_siginfo_t info; #else struct siginfo info; #endif struct ucontext uc; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM struct ucontext uc_transact; #endif /* * Programs using the rs6000/xcoff abi can save up to 19 gp * regs and 18 fp regs below sp before decrementing it. */ int abigap[56]; }; #ifdef CONFIG_VSX unsigned long copy_fpr_to_user(void __user *to, struct task_struct *task) { u64 buf[ELF_NFPREG]; int i; /* save FPR copy to local buffer then write to the thread_struct */ for (i = 0; i < (ELF_NFPREG - 1) ; i++) buf[i] = task->thread.TS_FPR(i); buf[i] = task->thread.fp_state.fpscr; return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double)); } unsigned long copy_fpr_from_user(struct task_struct *task, void __user *from) { u64 buf[ELF_NFPREG]; int i; if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double))) return 1; for (i = 0; i < (ELF_NFPREG - 1) ; i++) task->thread.TS_FPR(i) = buf[i]; task->thread.fp_state.fpscr = buf[i]; return 0; } unsigned long copy_vsx_to_user(void __user *to, struct task_struct *task) { u64 buf[ELF_NVSRHALFREG]; int i; /* save FPR copy to local buffer then write to the thread_struct */ for (i = 0; i < ELF_NVSRHALFREG; i++) buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET]; return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double)); } unsigned long copy_vsx_from_user(struct task_struct *task, void __user *from) { u64 buf[ELF_NVSRHALFREG]; int i; if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double))) return 1; for (i = 0; i < ELF_NVSRHALFREG ; i++) task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i]; return 0; } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM unsigned long copy_transact_fpr_to_user(void __user *to, struct task_struct *task) { u64 buf[ELF_NFPREG]; int i; /* save FPR copy to local buffer then write to the thread_struct */ for (i = 0; i < (ELF_NFPREG - 1) ; i++) buf[i] = task->thread.TS_TRANS_FPR(i); buf[i] = task->thread.transact_fp.fpscr; return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double)); } unsigned long copy_transact_fpr_from_user(struct task_struct *task, void __user *from) { u64 buf[ELF_NFPREG]; int i; if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double))) return 1; for (i = 0; i < (ELF_NFPREG - 1) ; i++) task->thread.TS_TRANS_FPR(i) = buf[i]; task->thread.transact_fp.fpscr = buf[i]; return 0; } unsigned long copy_transact_vsx_to_user(void __user *to, struct task_struct *task) { u64 buf[ELF_NVSRHALFREG]; int i; /* save FPR copy to local buffer then write to the thread_struct */ for (i = 0; i < ELF_NVSRHALFREG; i++) buf[i] = task->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET]; return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double)); } unsigned long copy_transact_vsx_from_user(struct task_struct *task, void __user *from) { u64 buf[ELF_NVSRHALFREG]; int i; if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double))) return 1; for (i = 0; i < ELF_NVSRHALFREG ; i++) task->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = buf[i]; return 0; } #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ #else inline unsigned long copy_fpr_to_user(void __user *to, struct task_struct *task) { return __copy_to_user(to, task->thread.fp_state.fpr, ELF_NFPREG * sizeof(double)); } inline unsigned long copy_fpr_from_user(struct task_struct *task, void __user *from) { return __copy_from_user(task->thread.fp_state.fpr, from, ELF_NFPREG * sizeof(double)); } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM inline unsigned long copy_transact_fpr_to_user(void __user *to, struct task_struct *task) { return __copy_to_user(to, task->thread.transact_fp.fpr, ELF_NFPREG * sizeof(double)); } inline unsigned long copy_transact_fpr_from_user(struct task_struct *task, void __user *from) { return __copy_from_user(task->thread.transact_fp.fpr, from, ELF_NFPREG * sizeof(double)); } #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ #endif /* * Save the current user registers on the user stack. * We only save the altivec/spe registers if the process has used * altivec/spe instructions at some point. */ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, struct mcontext __user *tm_frame, int sigret, int ctx_has_vsx_region) { unsigned long msr = regs->msr; /* Make sure floating point registers are stored in regs */ flush_fp_to_thread(current); /* save general registers */ if (save_general_regs(regs, frame)) return 1; #ifdef CONFIG_ALTIVEC /* save altivec registers */ if (current->thread.used_vr) { flush_altivec_to_thread(current); if (__copy_to_user(&frame->mc_vregs, &current->thread.vr_state, ELF_NVRREG * sizeof(vector128))) return 1; /* set MSR_VEC in the saved MSR value to indicate that frame->mc_vregs contains valid data */ msr |= MSR_VEC; } /* else assert((regs->msr & MSR_VEC) == 0) */ /* We always copy to/from vrsave, it's 0 if we don't have or don't * use altivec. Since VSCR only contains 32 bits saved in the least * significant bits of a vector, we "cheat" and stuff VRSAVE in the * most significant bits of that same vector. --BenH * Note that the current VRSAVE value is in the SPR at this point. */ if (cpu_has_feature(CPU_FTR_ALTIVEC)) current->thread.vrsave = mfspr(SPRN_VRSAVE); if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32])) return 1; #endif /* CONFIG_ALTIVEC */ if (copy_fpr_to_user(&frame->mc_fregs, current)) return 1; /* * Clear the MSR VSX bit to indicate there is no valid state attached * to this context, except in the specific case below where we set it. */ msr &= ~MSR_VSX; #ifdef CONFIG_VSX /* * Copy VSR 0-31 upper half from thread_struct to local * buffer, then write that to userspace. Also set MSR_VSX in * the saved MSR value to indicate that frame->mc_vregs * contains valid data */ if (current->thread.used_vsr && ctx_has_vsx_region) { __giveup_vsx(current); if (copy_vsx_to_user(&frame->mc_vsregs, current)) return 1; msr |= MSR_VSX; } #endif /* CONFIG_VSX */ #ifdef CONFIG_SPE /* save spe registers */ if (current->thread.used_spe) { flush_spe_to_thread(current); if (__copy_to_user(&frame->mc_vregs, current->thread.evr, ELF_NEVRREG * sizeof(u32))) return 1; /* set MSR_SPE in the saved MSR value to indicate that frame->mc_vregs contains valid data */ msr |= MSR_SPE; } /* else assert((regs->msr & MSR_SPE) == 0) */ /* We always copy to/from spefscr */ if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG)) return 1; #endif /* CONFIG_SPE */ if (__put_user(msr, &frame->mc_gregs[PT_MSR])) return 1; /* We need to write 0 the MSR top 32 bits in the tm frame so that we * can check it on the restore to see if TM is active */ if (tm_frame && __put_user(0, &tm_frame->mc_gregs[PT_MSR])) return 1; if (sigret) { /* Set up the sigreturn trampoline: li r0,sigret; sc */ if (__put_user(0x38000000UL + sigret, &frame->tramp[0]) || __put_user(0x44000002UL, &frame->tramp[1])) return 1; flush_icache_range((unsigned long) &frame->tramp[0], (unsigned long) &frame->tramp[2]); } return 0; } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* * Save the current user registers on the user stack. * We only save the altivec/spe registers if the process has used * altivec/spe instructions at some point. * We also save the transactional registers to a second ucontext in the * frame. * * See save_user_regs() and signal_64.c:setup_tm_sigcontexts(). */ static int save_tm_user_regs(struct pt_regs *regs, struct mcontext __user *frame, struct mcontext __user *tm_frame, int sigret) { unsigned long msr = regs->msr; /* Remove TM bits from thread's MSR. The MSR in the sigcontext * just indicates to userland that we were doing a transaction, but we * don't want to return in transactional state. This also ensures * that flush_fp_to_thread won't set TIF_RESTORE_TM again. */ regs->msr &= ~MSR_TS_MASK; /* Make sure floating point registers are stored in regs */ flush_fp_to_thread(current); /* Save both sets of general registers */ if (save_general_regs(&current->thread.ckpt_regs, frame) || save_general_regs(regs, tm_frame)) return 1; /* Stash the top half of the 64bit MSR into the 32bit MSR word * of the transactional mcontext. This way we have a backward-compatible * MSR in the 'normal' (checkpointed) mcontext and additionally one can * also look at what type of transaction (T or S) was active at the * time of the signal. */ if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR])) return 1; #ifdef CONFIG_ALTIVEC /* save altivec registers */ if (current->thread.used_vr) { flush_altivec_to_thread(current); if (__copy_to_user(&frame->mc_vregs, &current->thread.vr_state, ELF_NVRREG * sizeof(vector128))) return 1; if (msr & MSR_VEC) { if (__copy_to_user(&tm_frame->mc_vregs, &current->thread.transact_vr, ELF_NVRREG * sizeof(vector128))) return 1; } else { if (__copy_to_user(&tm_frame->mc_vregs, &current->thread.vr_state, ELF_NVRREG * sizeof(vector128))) return 1; } /* set MSR_VEC in the saved MSR value to indicate that * frame->mc_vregs contains valid data */ msr |= MSR_VEC; } /* We always copy to/from vrsave, it's 0 if we don't have or don't * use altivec. Since VSCR only contains 32 bits saved in the least * significant bits of a vector, we "cheat" and stuff VRSAVE in the * most significant bits of that same vector. --BenH */ if (cpu_has_feature(CPU_FTR_ALTIVEC)) current->thread.vrsave = mfspr(SPRN_VRSAVE); if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32])) return 1; if (msr & MSR_VEC) { if (__put_user(current->thread.transact_vrsave, (u32 __user *)&tm_frame->mc_vregs[32])) return 1; } else { if (__put_user(current->thread.vrsave, (u32 __user *)&tm_frame->mc_vregs[32])) return 1; } #endif /* CONFIG_ALTIVEC */ if (copy_fpr_to_user(&frame->mc_fregs, current)) return 1; if (msr & MSR_FP) { if (copy_transact_fpr_to_user(&tm_frame->mc_fregs, current)) return 1; } else { if (copy_fpr_to_user(&tm_frame->mc_fregs, current)) return 1; } #ifdef CONFIG_VSX /* * Copy VSR 0-31 upper half from thread_struct to local * buffer, then write that to userspace. Also set MSR_VSX in * the saved MSR value to indicate that frame->mc_vregs * contains valid data */ if (current->thread.used_vsr) { __giveup_vsx(current); if (copy_vsx_to_user(&frame->mc_vsregs, current)) return 1; if (msr & MSR_VSX) { if (copy_transact_vsx_to_user(&tm_frame->mc_vsregs, current)) return 1; } else { if (copy_vsx_to_user(&tm_frame->mc_vsregs, current)) return 1; } msr |= MSR_VSX; } #endif /* CONFIG_VSX */ #ifdef CONFIG_SPE /* SPE regs are not checkpointed with TM, so this section is * simply the same as in save_user_regs(). */ if (current->thread.used_spe) { flush_spe_to_thread(current); if (__copy_to_user(&frame->mc_vregs, current->thread.evr, ELF_NEVRREG * sizeof(u32))) return 1; /* set MSR_SPE in the saved MSR value to indicate that * frame->mc_vregs contains valid data */ msr |= MSR_SPE; } /* We always copy to/from spefscr */ if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG)) return 1; #endif /* CONFIG_SPE */ if (__put_user(msr, &frame->mc_gregs[PT_MSR])) return 1; if (sigret) { /* Set up the sigreturn trampoline: li r0,sigret; sc */ if (__put_user(0x38000000UL + sigret, &frame->tramp[0]) || __put_user(0x44000002UL, &frame->tramp[1])) return 1; flush_icache_range((unsigned long) &frame->tramp[0], (unsigned long) &frame->tramp[2]); } return 0; } #endif /* * Restore the current user register values from the user stack, * (except for MSR). */ static long restore_user_regs(struct pt_regs *regs, struct mcontext __user *sr, int sig) { long err; unsigned int save_r2 = 0; unsigned long msr; #ifdef CONFIG_VSX int i; #endif /* * restore general registers but not including MSR or SOFTE. Also * take care of keeping r2 (TLS) intact if not a signal */ if (!sig) save_r2 = (unsigned int)regs->gpr[2]; err = restore_general_regs(regs, sr); regs->trap = 0; err |= __get_user(msr, &sr->mc_gregs[PT_MSR]); if (!sig) regs->gpr[2] = (unsigned long) save_r2; if (err) return 1; /* if doing signal return, restore the previous little-endian mode */ if (sig) regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); /* * Do this before updating the thread state in * current->thread.fpr/vr/evr. That way, if we get preempted * and another task grabs the FPU/Altivec/SPE, it won't be * tempted to save the current CPU state into the thread_struct * and corrupt what we are writing there. */ discard_lazy_cpu_state(); #ifdef CONFIG_ALTIVEC /* * Force the process to reload the altivec registers from * current->thread when it next does altivec instructions */ regs->msr &= ~MSR_VEC; if (msr & MSR_VEC) { /* restore altivec registers from the stack */ if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs, sizeof(sr->mc_vregs))) return 1; } else if (current->thread.used_vr) memset(&current->thread.vr_state, 0, ELF_NVRREG * sizeof(vector128)); /* Always get VRSAVE back */ if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32])) return 1; if (cpu_has_feature(CPU_FTR_ALTIVEC)) mtspr(SPRN_VRSAVE, current->thread.vrsave); #endif /* CONFIG_ALTIVEC */ if (copy_fpr_from_user(current, &sr->mc_fregs)) return 1; #ifdef CONFIG_VSX /* * Force the process to reload the VSX registers from * current->thread when it next does VSX instruction. */ regs->msr &= ~MSR_VSX; if (msr & MSR_VSX) { /* * Restore altivec registers from the stack to a local * buffer, then write this out to the thread_struct */ if (copy_vsx_from_user(current, &sr->mc_vsregs)) return 1; } else if (current->thread.used_vsr) for (i = 0; i < 32 ; i++) current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; #endif /* CONFIG_VSX */ /* * force the process to reload the FP registers from * current->thread when it next does FP instructions */ regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1); #ifdef CONFIG_SPE /* force the process to reload the spe registers from current->thread when it next does spe instructions */ regs->msr &= ~MSR_SPE; if (msr & MSR_SPE) { /* restore spe registers from the stack */ if (__copy_from_user(current->thread.evr, &sr->mc_vregs, ELF_NEVRREG * sizeof(u32))) return 1; } else if (current->thread.used_spe) memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32)); /* Always get SPEFSCR back */ if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG)) return 1; #endif /* CONFIG_SPE */ return 0; } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* * Restore the current user register values from the user stack, except for * MSR, and recheckpoint the original checkpointed register state for processes * in transactions. */ static long restore_tm_user_regs(struct pt_regs *regs, struct mcontext __user *sr, struct mcontext __user *tm_sr) { long err; unsigned long msr, msr_hi; #ifdef CONFIG_VSX int i; #endif /* * restore general registers but not including MSR or SOFTE. Also * take care of keeping r2 (TLS) intact if not a signal. * See comment in signal_64.c:restore_tm_sigcontexts(); * TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR * were set by the signal delivery. */ err = restore_general_regs(regs, tm_sr); err |= restore_general_regs(&current->thread.ckpt_regs, sr); err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]); err |= __get_user(msr, &sr->mc_gregs[PT_MSR]); if (err) return 1; /* Restore the previous little-endian mode */ regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); /* * Do this before updating the thread state in * current->thread.fpr/vr/evr. That way, if we get preempted * and another task grabs the FPU/Altivec/SPE, it won't be * tempted to save the current CPU state into the thread_struct * and corrupt what we are writing there. */ discard_lazy_cpu_state(); #ifdef CONFIG_ALTIVEC regs->msr &= ~MSR_VEC; if (msr & MSR_VEC) { /* restore altivec registers from the stack */ if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs, sizeof(sr->mc_vregs)) || __copy_from_user(&current->thread.transact_vr, &tm_sr->mc_vregs, sizeof(sr->mc_vregs))) return 1; } else if (current->thread.used_vr) { memset(&current->thread.vr_state, 0, ELF_NVRREG * sizeof(vector128)); memset(&current->thread.transact_vr, 0, ELF_NVRREG * sizeof(vector128)); } /* Always get VRSAVE back */ if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]) || __get_user(current->thread.transact_vrsave, (u32 __user *)&tm_sr->mc_vregs[32])) return 1; if (cpu_has_feature(CPU_FTR_ALTIVEC)) mtspr(SPRN_VRSAVE, current->thread.vrsave); #endif /* CONFIG_ALTIVEC */ regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1); if (copy_fpr_from_user(current, &sr->mc_fregs) || copy_transact_fpr_from_user(current, &tm_sr->mc_fregs)) return 1; #ifdef CONFIG_VSX regs->msr &= ~MSR_VSX; if (msr & MSR_VSX) { /* * Restore altivec registers from the stack to a local * buffer, then write this out to the thread_struct */ if (copy_vsx_from_user(current, &sr->mc_vsregs) || copy_transact_vsx_from_user(current, &tm_sr->mc_vsregs)) return 1; } else if (current->thread.used_vsr) for (i = 0; i < 32 ; i++) { current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; current->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = 0; } #endif /* CONFIG_VSX */ #ifdef CONFIG_SPE /* SPE regs are not checkpointed with TM, so this section is * simply the same as in restore_user_regs(). */ regs->msr &= ~MSR_SPE; if (msr & MSR_SPE) { if (__copy_from_user(current->thread.evr, &sr->mc_vregs, ELF_NEVRREG * sizeof(u32))) return 1; } else if (current->thread.used_spe) memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32)); /* Always get SPEFSCR back */ if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG)) return 1; #endif /* CONFIG_SPE */ /* Now, recheckpoint. This loads up all of the checkpointed (older) * registers, including FP and V[S]Rs. After recheckpointing, the * transactional versions should be loaded. */ tm_enable(); /* Make sure the transaction is marked as failed */ current->thread.tm_texasr |= TEXASR_FS; /* This loads the checkpointed FP/VEC state, if used */ tm_recheckpoint(&current->thread, msr); /* Get the top half of the MSR */ if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR])) return 1; /* Pull in MSR TM from user context */ regs->msr = (regs->msr & ~MSR_TS_MASK) | ((msr_hi<<32) & MSR_TS_MASK); /* This loads the speculative FP/VEC state, if used */ if (msr & MSR_FP) { do_load_up_transact_fpu(&current->thread); regs->msr |= (MSR_FP | current->thread.fpexc_mode); } #ifdef CONFIG_ALTIVEC if (msr & MSR_VEC) { do_load_up_transact_altivec(&current->thread); regs->msr |= MSR_VEC; } #endif return 0; } #endif #ifdef CONFIG_PPC64 int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s) { int err; if (!access_ok (VERIFY_WRITE, d, sizeof(*d))) return -EFAULT; /* If you change siginfo_t structure, please be sure * this code is fixed accordingly. * It should never copy any pad contained in the structure * to avoid security leaks, but must copy the generic * 3 ints plus the relevant union member. * This routine must convert siginfo from 64bit to 32bit as well * at the same time. */ err = __put_user(s->si_signo, &d->si_signo); err |= __put_user(s->si_errno, &d->si_errno); err |= __put_user((short)s->si_code, &d->si_code); if (s->si_code < 0) err |= __copy_to_user(&d->_sifields._pad, &s->_sifields._pad, SI_PAD_SIZE32); else switch(s->si_code >> 16) { case __SI_CHLD >> 16: err |= __put_user(s->si_pid, &d->si_pid); err |= __put_user(s->si_uid, &d->si_uid); err |= __put_user(s->si_utime, &d->si_utime); err |= __put_user(s->si_stime, &d->si_stime); err |= __put_user(s->si_status, &d->si_status); break; case __SI_FAULT >> 16: err |= __put_user((unsigned int)(unsigned long)s->si_addr, &d->si_addr); break; case __SI_POLL >> 16: err |= __put_user(s->si_band, &d->si_band); err |= __put_user(s->si_fd, &d->si_fd); break; case __SI_TIMER >> 16: err |= __put_user(s->si_tid, &d->si_tid); err |= __put_user(s->si_overrun, &d->si_overrun); err |= __put_user(s->si_int, &d->si_int); break; case __SI_SYS >> 16: err |= __put_user(ptr_to_compat(s->si_call_addr), &d->si_call_addr); err |= __put_user(s->si_syscall, &d->si_syscall); err |= __put_user(s->si_arch, &d->si_arch); break; case __SI_RT >> 16: /* This is not generated by the kernel as of now. */ case __SI_MESGQ >> 16: err |= __put_user(s->si_int, &d->si_int); /* fallthrough */ case __SI_KILL >> 16: default: err |= __put_user(s->si_pid, &d->si_pid); err |= __put_user(s->si_uid, &d->si_uid); break; } return err; } #define copy_siginfo_to_user copy_siginfo_to_user32 int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from) { if (copy_from_user(to, from, 3*sizeof(int)) || copy_from_user(to->_sifields._pad, from->_sifields._pad, SI_PAD_SIZE32)) return -EFAULT; return 0; } #endif /* CONFIG_PPC64 */ /* * Set up a signal frame for a "real-time" signal handler * (one which gets siginfo). */ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset, struct pt_regs *regs) { struct rt_sigframe __user *rt_sf; struct mcontext __user *frame; struct mcontext __user *tm_frame = NULL; void __user *addr; unsigned long newsp = 0; int sigret; unsigned long tramp; /* Set up Signal Frame */ /* Put a Real Time Context onto stack */ rt_sf = get_sigframe(ksig, get_tm_stackpointer(regs), sizeof(*rt_sf), 1); addr = rt_sf; if (unlikely(rt_sf == NULL)) goto badframe; /* Put the siginfo & fill in most of the ucontext */ if (copy_siginfo_to_user(&rt_sf->info, &ksig->info) || __put_user(0, &rt_sf->uc.uc_flags) || __save_altstack(&rt_sf->uc.uc_stack, regs->gpr[1]) || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext), &rt_sf->uc.uc_regs) || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset)) goto badframe; /* Save user registers on the stack */ frame = &rt_sf->uc.uc_mcontext; addr = frame; if (vdso32_rt_sigtramp && current->mm->context.vdso_base) { sigret = 0; tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp; } else { sigret = __NR_rt_sigreturn; tramp = (unsigned long) frame->tramp; } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM tm_frame = &rt_sf->uc_transact.uc_mcontext; if (MSR_TM_ACTIVE(regs->msr)) { if (__put_user((unsigned long)&rt_sf->uc_transact, &rt_sf->uc.uc_link) || __put_user((unsigned long)tm_frame, &rt_sf->uc_transact.uc_regs)) goto badframe; if (save_tm_user_regs(regs, frame, tm_frame, sigret)) goto badframe; } else #endif { if (__put_user(0, &rt_sf->uc.uc_link)) goto badframe; if (save_user_regs(regs, frame, tm_frame, sigret, 1)) goto badframe; } regs->link = tramp; current->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */ /* create a stack frame for the caller of the handler */ newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16); addr = (void __user *)regs->gpr[1]; if (put_user(regs->gpr[1], (u32 __user *)newsp)) goto badframe; /* Fill registers for signal handler */ regs->gpr[1] = newsp; regs->gpr[3] = ksig->sig; regs->gpr[4] = (unsigned long) &rt_sf->info; regs->gpr[5] = (unsigned long) &rt_sf->uc; regs->gpr[6] = (unsigned long) rt_sf; regs->nip = (unsigned long) ksig->ka.sa.sa_handler; /* enter the signal handler in native-endian mode */ regs->msr &= ~MSR_LE; regs->msr |= (MSR_KERNEL & MSR_LE); return 0; badframe: if (show_unhandled_signals) printk_ratelimited(KERN_INFO "%s[%d]: bad frame in handle_rt_signal32: " "%p nip %08lx lr %08lx\n", current->comm, current->pid, addr, regs->nip, regs->link); return 1; } static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig) { sigset_t set; struct mcontext __user *mcp; if (get_sigset_t(&set, &ucp->uc_sigmask)) return -EFAULT; #ifdef CONFIG_PPC64 { u32 cmcp; if (__get_user(cmcp, &ucp->uc_regs)) return -EFAULT; mcp = (struct mcontext __user *)(u64)cmcp; /* no need to check access_ok(mcp), since mcp < 4GB */ } #else if (__get_user(mcp, &ucp->uc_regs)) return -EFAULT; if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp))) return -EFAULT; #endif set_current_blocked(&set); if (restore_user_regs(regs, mcp, sig)) return -EFAULT; return 0; } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM static int do_setcontext_tm(struct ucontext __user *ucp, struct ucontext __user *tm_ucp, struct pt_regs *regs) { sigset_t set; struct mcontext __user *mcp; struct mcontext __user *tm_mcp; u32 cmcp; u32 tm_cmcp; if (get_sigset_t(&set, &ucp->uc_sigmask)) return -EFAULT; if (__get_user(cmcp, &ucp->uc_regs) || __get_user(tm_cmcp, &tm_ucp->uc_regs)) return -EFAULT; mcp = (struct mcontext __user *)(u64)cmcp; tm_mcp = (struct mcontext __user *)(u64)tm_cmcp; /* no need to check access_ok(mcp), since mcp < 4GB */ set_current_blocked(&set); if (restore_tm_user_regs(regs, mcp, tm_mcp)) return -EFAULT; return 0; } #endif long sys_swapcontext(struct ucontext __user *old_ctx, struct ucontext __user *new_ctx, int ctx_size, int r6, int r7, int r8, struct pt_regs *regs) { unsigned char tmp; int ctx_has_vsx_region = 0; #ifdef CONFIG_PPC64 unsigned long new_msr = 0; if (new_ctx) { struct mcontext __user *mcp; u32 cmcp; /* * Get pointer to the real mcontext. No need for * access_ok since we are dealing with compat * pointers. */ if (__get_user(cmcp, &new_ctx->uc_regs)) return -EFAULT; mcp = (struct mcontext __user *)(u64)cmcp; if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR])) return -EFAULT; } /* * Check that the context is not smaller than the original * size (with VMX but without VSX) */ if (ctx_size < UCONTEXTSIZEWITHOUTVSX) return -EINVAL; /* * If the new context state sets the MSR VSX bits but * it doesn't provide VSX state. */ if ((ctx_size < sizeof(struct ucontext)) && (new_msr & MSR_VSX)) return -EINVAL; /* Does the context have enough room to store VSX data? */ if (ctx_size >= sizeof(struct ucontext)) ctx_has_vsx_region = 1; #else /* Context size is for future use. Right now, we only make sure * we are passed something we understand */ if (ctx_size < sizeof(struct ucontext)) return -EINVAL; #endif if (old_ctx != NULL) { struct mcontext __user *mctx; /* * old_ctx might not be 16-byte aligned, in which * case old_ctx->uc_mcontext won't be either. * Because we have the old_ctx->uc_pad2 field * before old_ctx->uc_mcontext, we need to round down * from &old_ctx->uc_mcontext to a 16-byte boundary. */ mctx = (struct mcontext __user *) ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL); if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size) || save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region) || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked) || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs)) return -EFAULT; } if (new_ctx == NULL) return 0; if (!access_ok(VERIFY_READ, new_ctx, ctx_size) || __get_user(tmp, (u8 __user *) new_ctx) || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1)) return -EFAULT; /* * If we get a fault copying the context into the kernel's * image of the user's registers, we can't just return -EFAULT * because the user's registers will be corrupted. For instance * the NIP value may have been updated but not some of the * other registers. Given that we have done the access_ok * and successfully read the first and last bytes of the region * above, this should only happen in an out-of-memory situation * or if another thread unmaps the region containing the context. * We kill the task with a SIGSEGV in this situation. */ if (do_setcontext(new_ctx, regs, 0)) do_exit(SIGSEGV); set_thread_flag(TIF_RESTOREALL); return 0; } long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, struct pt_regs *regs) { struct rt_sigframe __user *rt_sf; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM struct ucontext __user *uc_transact; unsigned long msr_hi; unsigned long tmp; int tm_restore = 0; #endif /* Always make any pending restarted system calls return -EINTR */ current->restart_block.fn = do_no_restart_syscall; rt_sf = (struct rt_sigframe __user *) (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16); if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf))) goto bad; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM if (__get_user(tmp, &rt_sf->uc.uc_link)) goto bad; uc_transact = (struct ucontext __user *)(uintptr_t)tmp; if (uc_transact) { u32 cmcp; struct mcontext __user *mcp; if (__get_user(cmcp, &uc_transact->uc_regs)) return -EFAULT; mcp = (struct mcontext __user *)(u64)cmcp; /* The top 32 bits of the MSR are stashed in the transactional * ucontext. */ if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR])) goto bad; if (MSR_TM_ACTIVE(msr_hi<<32)) { /* We only recheckpoint on return if we're * transaction. */ tm_restore = 1; if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs)) goto bad; } } if (!tm_restore) /* Fall through, for non-TM restore */ #endif if (do_setcontext(&rt_sf->uc, regs, 1)) goto bad; /* * It's not clear whether or why it is desirable to save the * sigaltstack setting on signal delivery and restore it on * signal return. But other architectures do this and we have * always done it up until now so it is probably better not to * change it. -- paulus */ #ifdef CONFIG_PPC64 if (compat_restore_altstack(&rt_sf->uc.uc_stack)) goto bad; #else if (restore_altstack(&rt_sf->uc.uc_stack)) goto bad; #endif set_thread_flag(TIF_RESTOREALL); return 0; bad: if (show_unhandled_signals) printk_ratelimited(KERN_INFO "%s[%d]: bad frame in sys_rt_sigreturn: " "%p nip %08lx lr %08lx\n", current->comm, current->pid, rt_sf, regs->nip, regs->link); force_sig(SIGSEGV, current); return 0; } #ifdef CONFIG_PPC32 int sys_debug_setcontext(struct ucontext __user *ctx, int ndbg, struct sig_dbg_op __user *dbg, int r6, int r7, int r8, struct pt_regs *regs) { struct sig_dbg_op op; int i; unsigned char tmp; unsigned long new_msr = regs->msr; #ifdef CONFIG_PPC_ADV_DEBUG_REGS unsigned long new_dbcr0 = current->thread.debug.dbcr0; #endif for (i=0; i<ndbg; i++) { if (copy_from_user(&op, dbg + i, sizeof(op))) return -EFAULT; switch (op.dbg_type) { case SIG_DBG_SINGLE_STEPPING: #ifdef CONFIG_PPC_ADV_DEBUG_REGS if (op.dbg_value) { new_msr |= MSR_DE; new_dbcr0 |= (DBCR0_IDM | DBCR0_IC); } else { new_dbcr0 &= ~DBCR0_IC; if (!DBCR_ACTIVE_EVENTS(new_dbcr0, current->thread.debug.dbcr1)) { new_msr &= ~MSR_DE; new_dbcr0 &= ~DBCR0_IDM; } } #else if (op.dbg_value) new_msr |= MSR_SE; else new_msr &= ~MSR_SE; #endif break; case SIG_DBG_BRANCH_TRACING: #ifdef CONFIG_PPC_ADV_DEBUG_REGS return -EINVAL; #else if (op.dbg_value) new_msr |= MSR_BE; else new_msr &= ~MSR_BE; #endif break; default: return -EINVAL; } } /* We wait until here to actually install the values in the registers so if we fail in the above loop, it will not affect the contents of these registers. After this point, failure is a problem, anyway, and it's very unlikely unless the user is really doing something wrong. */ regs->msr = new_msr; #ifdef CONFIG_PPC_ADV_DEBUG_REGS current->thread.debug.dbcr0 = new_dbcr0; #endif if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx)) || __get_user(tmp, (u8 __user *) ctx) || __get_user(tmp, (u8 __user *) (ctx + 1) - 1)) return -EFAULT; /* * If we get a fault copying the context into the kernel's * image of the user's registers, we can't just return -EFAULT * because the user's registers will be corrupted. For instance * the NIP value may have been updated but not some of the * other registers. Given that we have done the access_ok * and successfully read the first and last bytes of the region * above, this should only happen in an out-of-memory situation * or if another thread unmaps the region containing the context. * We kill the task with a SIGSEGV in this situation. */ if (do_setcontext(ctx, regs, 1)) { if (show_unhandled_signals) printk_ratelimited(KERN_INFO "%s[%d]: bad frame in " "sys_debug_setcontext: %p nip %08lx " "lr %08lx\n", current->comm, current->pid, ctx, regs->nip, regs->link); force_sig(SIGSEGV, current); goto out; } /* * It's not clear whether or why it is desirable to save the * sigaltstack setting on signal delivery and restore it on * signal return. But other architectures do this and we have * always done it up until now so it is probably better not to * change it. -- paulus */ restore_altstack(&ctx->uc_stack); set_thread_flag(TIF_RESTOREALL); out: return 0; } #endif /* * OK, we're invoking a handler */ int handle_signal32(struct ksignal *ksig, sigset_t *oldset, struct pt_regs *regs) { struct sigcontext __user *sc; struct sigframe __user *frame; struct mcontext __user *tm_mctx = NULL; unsigned long newsp = 0; int sigret; unsigned long tramp; /* Set up Signal Frame */ frame = get_sigframe(ksig, get_tm_stackpointer(regs), sizeof(*frame), 1); if (unlikely(frame == NULL)) goto badframe; sc = (struct sigcontext __user *) &frame->sctx; #if _NSIG != 64 #error "Please adjust handle_signal()" #endif if (__put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler) || __put_user(oldset->sig[0], &sc->oldmask) #ifdef CONFIG_PPC64 || __put_user((oldset->sig[0] >> 32), &sc->_unused[3]) #else || __put_user(oldset->sig[1], &sc->_unused[3]) #endif || __put_user(to_user_ptr(&frame->mctx), &sc->regs) || __put_user(ksig->sig, &sc->signal)) goto badframe; if (vdso32_sigtramp && current->mm->context.vdso_base) { sigret = 0; tramp = current->mm->context.vdso_base + vdso32_sigtramp; } else { sigret = __NR_sigreturn; tramp = (unsigned long) frame->mctx.tramp; } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM tm_mctx = &frame->mctx_transact; if (MSR_TM_ACTIVE(regs->msr)) { if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact, sigret)) goto badframe; } else #endif { if (save_user_regs(regs, &frame->mctx, tm_mctx, sigret, 1)) goto badframe; } regs->link = tramp; current->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */ /* create a stack frame for the caller of the handler */ newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE; if (put_user(regs->gpr[1], (u32 __user *)newsp)) goto badframe; regs->gpr[1] = newsp; regs->gpr[3] = ksig->sig; regs->gpr[4] = (unsigned long) sc; regs->nip = (unsigned long) (unsigned long)ksig->ka.sa.sa_handler; /* enter the signal handler in big-endian mode */ regs->msr &= ~MSR_LE; return 0; badframe: if (show_unhandled_signals) printk_ratelimited(KERN_INFO "%s[%d]: bad frame in handle_signal32: " "%p nip %08lx lr %08lx\n", current->comm, current->pid, frame, regs->nip, regs->link); return 1; } /* * Do a signal return; undo the signal stack. */ long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, struct pt_regs *regs) { struct sigframe __user *sf; struct sigcontext __user *sc; struct sigcontext sigctx; struct mcontext __user *sr; void __user *addr; sigset_t set; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM struct mcontext __user *mcp, *tm_mcp; unsigned long msr_hi; #endif /* Always make any pending restarted system calls return -EINTR */ current->restart_block.fn = do_no_restart_syscall; sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE); sc = &sf->sctx; addr = sc; if (copy_from_user(&sigctx, sc, sizeof(sigctx))) goto badframe; #ifdef CONFIG_PPC64 /* * Note that PPC32 puts the upper 32 bits of the sigmask in the * unused part of the signal stackframe */ set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32); #else set.sig[0] = sigctx.oldmask; set.sig[1] = sigctx._unused[3]; #endif set_current_blocked(&set); #ifdef CONFIG_PPC_TRANSACTIONAL_MEM mcp = (struct mcontext __user *)&sf->mctx; tm_mcp = (struct mcontext __user *)&sf->mctx_transact; if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR])) goto badframe; if (MSR_TM_ACTIVE(msr_hi<<32)) { if (!cpu_has_feature(CPU_FTR_TM)) goto badframe; if (restore_tm_user_regs(regs, mcp, tm_mcp)) goto badframe; } else #endif { sr = (struct mcontext __user *)from_user_ptr(sigctx.regs); addr = sr; if (!access_ok(VERIFY_READ, sr, sizeof(*sr)) || restore_user_regs(regs, sr, 1)) goto badframe; } set_thread_flag(TIF_RESTOREALL); return 0; badframe: if (show_unhandled_signals) printk_ratelimited(KERN_INFO "%s[%d]: bad frame in sys_sigreturn: " "%p nip %08lx lr %08lx\n", current->comm, current->pid, addr, regs->nip, regs->link); force_sig(SIGSEGV, current); return 0; }
./CrossVul/dataset_final_sorted/CWE-20/c/bad_1821_1
crossvul-cpp_data_good_3371_0
/*** This file is part of systemd. Copyright 2014 Lennart Poettering systemd is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. systemd is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with systemd; If not, see <http://www.gnu.org/licenses/>. ***/ #include "alloc-util.h" #include "dns-domain.h" #include "resolved-dns-packet.h" #include "string-table.h" #include "strv.h" #include "unaligned.h" #include "utf8.h" #include "util.h" #define EDNS0_OPT_DO (1<<15) typedef struct DnsPacketRewinder { DnsPacket *packet; size_t saved_rindex; } DnsPacketRewinder; static void rewind_dns_packet(DnsPacketRewinder *rewinder) { if (rewinder->packet) dns_packet_rewind(rewinder->packet, rewinder->saved_rindex); } #define INIT_REWINDER(rewinder, p) do { rewinder.packet = p; rewinder.saved_rindex = p->rindex; } while (0) #define CANCEL_REWINDER(rewinder) do { rewinder.packet = NULL; } while (0) int dns_packet_new(DnsPacket **ret, DnsProtocol protocol, size_t mtu) { DnsPacket *p; size_t a; assert(ret); if (mtu <= UDP_PACKET_HEADER_SIZE) a = DNS_PACKET_SIZE_START; else a = mtu - UDP_PACKET_HEADER_SIZE; if (a < DNS_PACKET_HEADER_SIZE) a = DNS_PACKET_HEADER_SIZE; /* round up to next page size */ a = PAGE_ALIGN(ALIGN(sizeof(DnsPacket)) + a) - ALIGN(sizeof(DnsPacket)); /* make sure we never allocate more than useful */ if (a > DNS_PACKET_SIZE_MAX) a = DNS_PACKET_SIZE_MAX; p = malloc0(ALIGN(sizeof(DnsPacket)) + a); if (!p) return -ENOMEM; p->size = p->rindex = DNS_PACKET_HEADER_SIZE; p->allocated = a; p->protocol = protocol; p->opt_start = p->opt_size = (size_t) -1; p->n_ref = 1; *ret = p; return 0; } void dns_packet_set_flags(DnsPacket *p, bool dnssec_checking_disabled, bool truncated) { DnsPacketHeader *h; assert(p); h = DNS_PACKET_HEADER(p); switch(p->protocol) { case DNS_PROTOCOL_LLMNR: assert(!truncated); h->flags = htobe16(DNS_PACKET_MAKE_FLAGS(0 /* qr */, 0 /* opcode */, 0 /* c */, 0 /* tc */, 0 /* t */, 0 /* ra */, 0 /* ad */, 0 /* cd */, 0 /* rcode */)); break; case DNS_PROTOCOL_MDNS: h->flags = htobe16(DNS_PACKET_MAKE_FLAGS(0 /* qr */, 0 /* opcode */, 0 /* aa */, truncated /* tc */, 0 /* rd (ask for recursion) */, 0 /* ra */, 0 /* ad */, 0 /* cd */, 0 /* rcode */)); break; default: assert(!truncated); h->flags = htobe16(DNS_PACKET_MAKE_FLAGS(0 /* qr */, 0 /* opcode */, 0 /* aa */, 0 /* tc */, 1 /* rd (ask for recursion) */, 0 /* ra */, 0 /* ad */, dnssec_checking_disabled /* cd */, 0 /* rcode */)); } } int dns_packet_new_query(DnsPacket **ret, DnsProtocol protocol, size_t mtu, bool dnssec_checking_disabled) { DnsPacket *p; int r; assert(ret); r = dns_packet_new(&p, protocol, mtu); if (r < 0) return r; /* Always set the TC bit to 0 initially. * If there are multiple packets later, we'll update the bit shortly before sending. */ dns_packet_set_flags(p, dnssec_checking_disabled, false); *ret = p; return 0; } DnsPacket *dns_packet_ref(DnsPacket *p) { if (!p) return NULL; assert(!p->on_stack); assert(p->n_ref > 0); p->n_ref++; return p; } static void dns_packet_free(DnsPacket *p) { char *s; assert(p); dns_question_unref(p->question); dns_answer_unref(p->answer); dns_resource_record_unref(p->opt); while ((s = hashmap_steal_first_key(p->names))) free(s); hashmap_free(p->names); free(p->_data); if (!p->on_stack) free(p); } DnsPacket *dns_packet_unref(DnsPacket *p) { if (!p) return NULL; assert(p->n_ref > 0); dns_packet_unref(p->more); if (p->n_ref == 1) dns_packet_free(p); else p->n_ref--; return NULL; } int dns_packet_validate(DnsPacket *p) { assert(p); if (p->size < DNS_PACKET_HEADER_SIZE) return -EBADMSG; if (p->size > DNS_PACKET_SIZE_MAX) return -EBADMSG; return 1; } int dns_packet_validate_reply(DnsPacket *p) { int r; assert(p); r = dns_packet_validate(p); if (r < 0) return r; if (DNS_PACKET_QR(p) != 1) return 0; if (DNS_PACKET_OPCODE(p) != 0) return -EBADMSG; switch (p->protocol) { case DNS_PROTOCOL_LLMNR: /* RFC 4795, Section 2.1.1. says to discard all replies with QDCOUNT != 1 */ if (DNS_PACKET_QDCOUNT(p) != 1) return -EBADMSG; break; case DNS_PROTOCOL_MDNS: /* RFC 6762, Section 18 */ if (DNS_PACKET_RCODE(p) != 0) return -EBADMSG; break; default: break; } return 1; } int dns_packet_validate_query(DnsPacket *p) { int r; assert(p); r = dns_packet_validate(p); if (r < 0) return r; if (DNS_PACKET_QR(p) != 0) return 0; if (DNS_PACKET_OPCODE(p) != 0) return -EBADMSG; if (DNS_PACKET_TC(p)) return -EBADMSG; switch (p->protocol) { case DNS_PROTOCOL_LLMNR: case DNS_PROTOCOL_DNS: /* RFC 4795, Section 2.1.1. says to discard all queries with QDCOUNT != 1 */ if (DNS_PACKET_QDCOUNT(p) != 1) return -EBADMSG; /* RFC 4795, Section 2.1.1. says to discard all queries with ANCOUNT != 0 */ if (DNS_PACKET_ANCOUNT(p) > 0) return -EBADMSG; /* RFC 4795, Section 2.1.1. says to discard all queries with NSCOUNT != 0 */ if (DNS_PACKET_NSCOUNT(p) > 0) return -EBADMSG; break; case DNS_PROTOCOL_MDNS: /* RFC 6762, Section 18 */ if (DNS_PACKET_AA(p) != 0 || DNS_PACKET_RD(p) != 0 || DNS_PACKET_RA(p) != 0 || DNS_PACKET_AD(p) != 0 || DNS_PACKET_CD(p) != 0 || DNS_PACKET_RCODE(p) != 0) return -EBADMSG; break; default: break; } return 1; } static int dns_packet_extend(DnsPacket *p, size_t add, void **ret, size_t *start) { assert(p); if (p->size + add > p->allocated) { size_t a; a = PAGE_ALIGN((p->size + add) * 2); if (a > DNS_PACKET_SIZE_MAX) a = DNS_PACKET_SIZE_MAX; if (p->size + add > a) return -EMSGSIZE; if (p->_data) { void *d; d = realloc(p->_data, a); if (!d) return -ENOMEM; p->_data = d; } else { p->_data = malloc(a); if (!p->_data) return -ENOMEM; memcpy(p->_data, (uint8_t*) p + ALIGN(sizeof(DnsPacket)), p->size); memzero((uint8_t*) p->_data + p->size, a - p->size); } p->allocated = a; } if (start) *start = p->size; if (ret) *ret = (uint8_t*) DNS_PACKET_DATA(p) + p->size; p->size += add; return 0; } void dns_packet_truncate(DnsPacket *p, size_t sz) { Iterator i; char *s; void *n; assert(p); if (p->size <= sz) return; HASHMAP_FOREACH_KEY(n, s, p->names, i) { if (PTR_TO_SIZE(n) < sz) continue; hashmap_remove(p->names, s); free(s); } p->size = sz; } int dns_packet_append_blob(DnsPacket *p, const void *d, size_t l, size_t *start) { void *q; int r; assert(p); r = dns_packet_extend(p, l, &q, start); if (r < 0) return r; memcpy(q, d, l); return 0; } int dns_packet_append_uint8(DnsPacket *p, uint8_t v, size_t *start) { void *d; int r; assert(p); r = dns_packet_extend(p, sizeof(uint8_t), &d, start); if (r < 0) return r; ((uint8_t*) d)[0] = v; return 0; } int dns_packet_append_uint16(DnsPacket *p, uint16_t v, size_t *start) { void *d; int r; assert(p); r = dns_packet_extend(p, sizeof(uint16_t), &d, start); if (r < 0) return r; unaligned_write_be16(d, v); return 0; } int dns_packet_append_uint32(DnsPacket *p, uint32_t v, size_t *start) { void *d; int r; assert(p); r = dns_packet_extend(p, sizeof(uint32_t), &d, start); if (r < 0) return r; unaligned_write_be32(d, v); return 0; } int dns_packet_append_string(DnsPacket *p, const char *s, size_t *start) { assert(p); assert(s); return dns_packet_append_raw_string(p, s, strlen(s), start); } int dns_packet_append_raw_string(DnsPacket *p, const void *s, size_t size, size_t *start) { void *d; int r; assert(p); assert(s || size == 0); if (size > 255) return -E2BIG; r = dns_packet_extend(p, 1 + size, &d, start); if (r < 0) return r; ((uint8_t*) d)[0] = (uint8_t) size; memcpy_safe(((uint8_t*) d) + 1, s, size); return 0; } int dns_packet_append_label(DnsPacket *p, const char *d, size_t l, bool canonical_candidate, size_t *start) { uint8_t *w; int r; /* Append a label to a packet. Optionally, does this in DNSSEC * canonical form, if this label is marked as a candidate for * it, and the canonical form logic is enabled for the * packet */ assert(p); assert(d); if (l > DNS_LABEL_MAX) return -E2BIG; r = dns_packet_extend(p, 1 + l, (void**) &w, start); if (r < 0) return r; *(w++) = (uint8_t) l; if (p->canonical_form && canonical_candidate) { size_t i; /* Generate in canonical form, as defined by DNSSEC * RFC 4034, Section 6.2, i.e. all lower-case. */ for (i = 0; i < l; i++) w[i] = (uint8_t) ascii_tolower(d[i]); } else /* Otherwise, just copy the string unaltered. This is * essential for DNS-SD, where the casing of labels * matters and needs to be retained. */ memcpy(w, d, l); return 0; } int dns_packet_append_name( DnsPacket *p, const char *name, bool allow_compression, bool canonical_candidate, size_t *start) { size_t saved_size; int r; assert(p); assert(name); if (p->refuse_compression) allow_compression = false; saved_size = p->size; while (!dns_name_is_root(name)) { const char *z = name; char label[DNS_LABEL_MAX]; size_t n = 0; if (allow_compression) n = PTR_TO_SIZE(hashmap_get(p->names, name)); if (n > 0) { assert(n < p->size); if (n < 0x4000) { r = dns_packet_append_uint16(p, 0xC000 | n, NULL); if (r < 0) goto fail; goto done; } } r = dns_label_unescape(&name, label, sizeof(label)); if (r < 0) goto fail; r = dns_packet_append_label(p, label, r, canonical_candidate, &n); if (r < 0) goto fail; if (allow_compression) { _cleanup_free_ char *s = NULL; s = strdup(z); if (!s) { r = -ENOMEM; goto fail; } r = hashmap_ensure_allocated(&p->names, &dns_name_hash_ops); if (r < 0) goto fail; r = hashmap_put(p->names, s, SIZE_TO_PTR(n)); if (r < 0) goto fail; s = NULL; } } r = dns_packet_append_uint8(p, 0, NULL); if (r < 0) return r; done: if (start) *start = saved_size; return 0; fail: dns_packet_truncate(p, saved_size); return r; } int dns_packet_append_key(DnsPacket *p, const DnsResourceKey *k, const DnsAnswerFlags flags, size_t *start) { size_t saved_size; uint16_t class; int r; assert(p); assert(k); saved_size = p->size; r = dns_packet_append_name(p, dns_resource_key_name(k), true, true, NULL); if (r < 0) goto fail; r = dns_packet_append_uint16(p, k->type, NULL); if (r < 0) goto fail; class = flags & DNS_ANSWER_CACHE_FLUSH ? k->class | MDNS_RR_CACHE_FLUSH : k->class; r = dns_packet_append_uint16(p, class, NULL); if (r < 0) goto fail; if (start) *start = saved_size; return 0; fail: dns_packet_truncate(p, saved_size); return r; } static int dns_packet_append_type_window(DnsPacket *p, uint8_t window, uint8_t length, const uint8_t *types, size_t *start) { size_t saved_size; int r; assert(p); assert(types); assert(length > 0); saved_size = p->size; r = dns_packet_append_uint8(p, window, NULL); if (r < 0) goto fail; r = dns_packet_append_uint8(p, length, NULL); if (r < 0) goto fail; r = dns_packet_append_blob(p, types, length, NULL); if (r < 0) goto fail; if (start) *start = saved_size; return 0; fail: dns_packet_truncate(p, saved_size); return r; } static int dns_packet_append_types(DnsPacket *p, Bitmap *types, size_t *start) { Iterator i; uint8_t window = 0; uint8_t entry = 0; uint8_t bitmaps[32] = {}; unsigned n; size_t saved_size; int r; assert(p); saved_size = p->size; BITMAP_FOREACH(n, types, i) { assert(n <= 0xffff); if ((n >> 8) != window && bitmaps[entry / 8] != 0) { r = dns_packet_append_type_window(p, window, entry / 8 + 1, bitmaps, NULL); if (r < 0) goto fail; zero(bitmaps); } window = n >> 8; entry = n & 255; bitmaps[entry / 8] |= 1 << (7 - (entry % 8)); } if (bitmaps[entry / 8] != 0) { r = dns_packet_append_type_window(p, window, entry / 8 + 1, bitmaps, NULL); if (r < 0) goto fail; } if (start) *start = saved_size; return 0; fail: dns_packet_truncate(p, saved_size); return r; } /* Append the OPT pseudo-RR described in RFC6891 */ int dns_packet_append_opt(DnsPacket *p, uint16_t max_udp_size, bool edns0_do, int rcode, size_t *start) { size_t saved_size; int r; assert(p); /* we must never advertise supported packet size smaller than the legacy max */ assert(max_udp_size >= DNS_PACKET_UNICAST_SIZE_MAX); assert(rcode >= 0); assert(rcode <= _DNS_RCODE_MAX); if (p->opt_start != (size_t) -1) return -EBUSY; assert(p->opt_size == (size_t) -1); saved_size = p->size; /* empty name */ r = dns_packet_append_uint8(p, 0, NULL); if (r < 0) return r; /* type */ r = dns_packet_append_uint16(p, DNS_TYPE_OPT, NULL); if (r < 0) goto fail; /* class: maximum udp packet that can be received */ r = dns_packet_append_uint16(p, max_udp_size, NULL); if (r < 0) goto fail; /* extended RCODE and VERSION */ r = dns_packet_append_uint16(p, ((uint16_t) rcode & 0x0FF0) << 4, NULL); if (r < 0) goto fail; /* flags: DNSSEC OK (DO), see RFC3225 */ r = dns_packet_append_uint16(p, edns0_do ? EDNS0_OPT_DO : 0, NULL); if (r < 0) goto fail; /* RDLENGTH */ if (edns0_do && !DNS_PACKET_QR(p)) { /* If DO is on and this is not a reply, also append RFC6975 Algorithm data */ static const uint8_t rfc6975[] = { 0, 5, /* OPTION_CODE: DAU */ 0, 6, /* LIST_LENGTH */ DNSSEC_ALGORITHM_RSASHA1, DNSSEC_ALGORITHM_RSASHA1_NSEC3_SHA1, DNSSEC_ALGORITHM_RSASHA256, DNSSEC_ALGORITHM_RSASHA512, DNSSEC_ALGORITHM_ECDSAP256SHA256, DNSSEC_ALGORITHM_ECDSAP384SHA384, 0, 6, /* OPTION_CODE: DHU */ 0, 3, /* LIST_LENGTH */ DNSSEC_DIGEST_SHA1, DNSSEC_DIGEST_SHA256, DNSSEC_DIGEST_SHA384, 0, 7, /* OPTION_CODE: N3U */ 0, 1, /* LIST_LENGTH */ NSEC3_ALGORITHM_SHA1, }; r = dns_packet_append_uint16(p, sizeof(rfc6975), NULL); if (r < 0) goto fail; r = dns_packet_append_blob(p, rfc6975, sizeof(rfc6975), NULL); } else r = dns_packet_append_uint16(p, 0, NULL); if (r < 0) goto fail; DNS_PACKET_HEADER(p)->arcount = htobe16(DNS_PACKET_ARCOUNT(p) + 1); p->opt_start = saved_size; p->opt_size = p->size - saved_size; if (start) *start = saved_size; return 0; fail: dns_packet_truncate(p, saved_size); return r; } int dns_packet_truncate_opt(DnsPacket *p) { assert(p); if (p->opt_start == (size_t) -1) { assert(p->opt_size == (size_t) -1); return 0; } assert(p->opt_size != (size_t) -1); assert(DNS_PACKET_ARCOUNT(p) > 0); if (p->opt_start + p->opt_size != p->size) return -EBUSY; dns_packet_truncate(p, p->opt_start); DNS_PACKET_HEADER(p)->arcount = htobe16(DNS_PACKET_ARCOUNT(p) - 1); p->opt_start = p->opt_size = (size_t) -1; return 1; } int dns_packet_append_rr(DnsPacket *p, const DnsResourceRecord *rr, const DnsAnswerFlags flags, size_t *start, size_t *rdata_start) { size_t saved_size, rdlength_offset, end, rdlength, rds; uint32_t ttl; int r; assert(p); assert(rr); saved_size = p->size; r = dns_packet_append_key(p, rr->key, flags, NULL); if (r < 0) goto fail; ttl = flags & DNS_ANSWER_GOODBYE ? 0 : rr->ttl; r = dns_packet_append_uint32(p, ttl, NULL); if (r < 0) goto fail; /* Initially we write 0 here */ r = dns_packet_append_uint16(p, 0, &rdlength_offset); if (r < 0) goto fail; rds = p->size - saved_size; switch (rr->unparseable ? _DNS_TYPE_INVALID : rr->key->type) { case DNS_TYPE_SRV: r = dns_packet_append_uint16(p, rr->srv.priority, NULL); if (r < 0) goto fail; r = dns_packet_append_uint16(p, rr->srv.weight, NULL); if (r < 0) goto fail; r = dns_packet_append_uint16(p, rr->srv.port, NULL); if (r < 0) goto fail; r = dns_packet_append_name(p, rr->srv.name, true, false, NULL); break; case DNS_TYPE_PTR: case DNS_TYPE_NS: case DNS_TYPE_CNAME: case DNS_TYPE_DNAME: r = dns_packet_append_name(p, rr->ptr.name, true, false, NULL); break; case DNS_TYPE_HINFO: r = dns_packet_append_string(p, rr->hinfo.cpu, NULL); if (r < 0) goto fail; r = dns_packet_append_string(p, rr->hinfo.os, NULL); break; case DNS_TYPE_SPF: /* exactly the same as TXT */ case DNS_TYPE_TXT: if (!rr->txt.items) { /* RFC 6763, section 6.1 suggests to generate * single empty string for an empty array. */ r = dns_packet_append_raw_string(p, NULL, 0, NULL); if (r < 0) goto fail; } else { DnsTxtItem *i; LIST_FOREACH(items, i, rr->txt.items) { r = dns_packet_append_raw_string(p, i->data, i->length, NULL); if (r < 0) goto fail; } } r = 0; break; case DNS_TYPE_A: r = dns_packet_append_blob(p, &rr->a.in_addr, sizeof(struct in_addr), NULL); break; case DNS_TYPE_AAAA: r = dns_packet_append_blob(p, &rr->aaaa.in6_addr, sizeof(struct in6_addr), NULL); break; case DNS_TYPE_SOA: r = dns_packet_append_name(p, rr->soa.mname, true, false, NULL); if (r < 0) goto fail; r = dns_packet_append_name(p, rr->soa.rname, true, false, NULL); if (r < 0) goto fail; r = dns_packet_append_uint32(p, rr->soa.serial, NULL); if (r < 0) goto fail; r = dns_packet_append_uint32(p, rr->soa.refresh, NULL); if (r < 0) goto fail; r = dns_packet_append_uint32(p, rr->soa.retry, NULL); if (r < 0) goto fail; r = dns_packet_append_uint32(p, rr->soa.expire, NULL); if (r < 0) goto fail; r = dns_packet_append_uint32(p, rr->soa.minimum, NULL); break; case DNS_TYPE_MX: r = dns_packet_append_uint16(p, rr->mx.priority, NULL); if (r < 0) goto fail; r = dns_packet_append_name(p, rr->mx.exchange, true, false, NULL); break; case DNS_TYPE_LOC: r = dns_packet_append_uint8(p, rr->loc.version, NULL); if (r < 0) goto fail; r = dns_packet_append_uint8(p, rr->loc.size, NULL); if (r < 0) goto fail; r = dns_packet_append_uint8(p, rr->loc.horiz_pre, NULL); if (r < 0) goto fail; r = dns_packet_append_uint8(p, rr->loc.vert_pre, NULL); if (r < 0) goto fail; r = dns_packet_append_uint32(p, rr->loc.latitude, NULL); if (r < 0) goto fail; r = dns_packet_append_uint32(p, rr->loc.longitude, NULL); if (r < 0) goto fail; r = dns_packet_append_uint32(p, rr->loc.altitude, NULL); break; case DNS_TYPE_DS: r = dns_packet_append_uint16(p, rr->ds.key_tag, NULL); if (r < 0) goto fail; r = dns_packet_append_uint8(p, rr->ds.algorithm, NULL); if (r < 0) goto fail; r = dns_packet_append_uint8(p, rr->ds.digest_type, NULL); if (r < 0) goto fail; r = dns_packet_append_blob(p, rr->ds.digest, rr->ds.digest_size, NULL); break; case DNS_TYPE_SSHFP: r = dns_packet_append_uint8(p, rr->sshfp.algorithm, NULL); if (r < 0) goto fail; r = dns_packet_append_uint8(p, rr->sshfp.fptype, NULL); if (r < 0) goto fail; r = dns_packet_append_blob(p, rr->sshfp.fingerprint, rr->sshfp.fingerprint_size, NULL); break; case DNS_TYPE_DNSKEY: r = dns_packet_append_uint16(p, rr->dnskey.flags, NULL); if (r < 0) goto fail; r = dns_packet_append_uint8(p, rr->dnskey.protocol, NULL); if (r < 0) goto fail; r = dns_packet_append_uint8(p, rr->dnskey.algorithm, NULL); if (r < 0) goto fail; r = dns_packet_append_blob(p, rr->dnskey.key, rr->dnskey.key_size, NULL); break; case DNS_TYPE_RRSIG: r = dns_packet_append_uint16(p, rr->rrsig.type_covered, NULL); if (r < 0) goto fail; r = dns_packet_append_uint8(p, rr->rrsig.algorithm, NULL); if (r < 0) goto fail; r = dns_packet_append_uint8(p, rr->rrsig.labels, NULL); if (r < 0) goto fail; r = dns_packet_append_uint32(p, rr->rrsig.original_ttl, NULL); if (r < 0) goto fail; r = dns_packet_append_uint32(p, rr->rrsig.expiration, NULL); if (r < 0) goto fail; r = dns_packet_append_uint32(p, rr->rrsig.inception, NULL); if (r < 0) goto fail; r = dns_packet_append_uint16(p, rr->rrsig.key_tag, NULL); if (r < 0) goto fail; r = dns_packet_append_name(p, rr->rrsig.signer, false, true, NULL); if (r < 0) goto fail; r = dns_packet_append_blob(p, rr->rrsig.signature, rr->rrsig.signature_size, NULL); break; case DNS_TYPE_NSEC: r = dns_packet_append_name(p, rr->nsec.next_domain_name, false, false, NULL); if (r < 0) goto fail; r = dns_packet_append_types(p, rr->nsec.types, NULL); if (r < 0) goto fail; break; case DNS_TYPE_NSEC3: r = dns_packet_append_uint8(p, rr->nsec3.algorithm, NULL); if (r < 0) goto fail; r = dns_packet_append_uint8(p, rr->nsec3.flags, NULL); if (r < 0) goto fail; r = dns_packet_append_uint16(p, rr->nsec3.iterations, NULL); if (r < 0) goto fail; r = dns_packet_append_uint8(p, rr->nsec3.salt_size, NULL); if (r < 0) goto fail; r = dns_packet_append_blob(p, rr->nsec3.salt, rr->nsec3.salt_size, NULL); if (r < 0) goto fail; r = dns_packet_append_uint8(p, rr->nsec3.next_hashed_name_size, NULL); if (r < 0) goto fail; r = dns_packet_append_blob(p, rr->nsec3.next_hashed_name, rr->nsec3.next_hashed_name_size, NULL); if (r < 0) goto fail; r = dns_packet_append_types(p, rr->nsec3.types, NULL); if (r < 0) goto fail; break; case DNS_TYPE_TLSA: r = dns_packet_append_uint8(p, rr->tlsa.cert_usage, NULL); if (r < 0) goto fail; r = dns_packet_append_uint8(p, rr->tlsa.selector, NULL); if (r < 0) goto fail; r = dns_packet_append_uint8(p, rr->tlsa.matching_type, NULL); if (r < 0) goto fail; r = dns_packet_append_blob(p, rr->tlsa.data, rr->tlsa.data_size, NULL); break; case DNS_TYPE_CAA: r = dns_packet_append_uint8(p, rr->caa.flags, NULL); if (r < 0) goto fail; r = dns_packet_append_string(p, rr->caa.tag, NULL); if (r < 0) goto fail; r = dns_packet_append_blob(p, rr->caa.value, rr->caa.value_size, NULL); break; case DNS_TYPE_OPT: case DNS_TYPE_OPENPGPKEY: case _DNS_TYPE_INVALID: /* unparseable */ default: r = dns_packet_append_blob(p, rr->generic.data, rr->generic.data_size, NULL); break; } if (r < 0) goto fail; /* Let's calculate the actual data size and update the field */ rdlength = p->size - rdlength_offset - sizeof(uint16_t); if (rdlength > 0xFFFF) { r = -ENOSPC; goto fail; } end = p->size; p->size = rdlength_offset; r = dns_packet_append_uint16(p, rdlength, NULL); if (r < 0) goto fail; p->size = end; if (start) *start = saved_size; if (rdata_start) *rdata_start = rds; return 0; fail: dns_packet_truncate(p, saved_size); return r; } int dns_packet_append_question(DnsPacket *p, DnsQuestion *q) { DnsResourceKey *key; int r; assert(p); DNS_QUESTION_FOREACH(key, q) { r = dns_packet_append_key(p, key, 0, NULL); if (r < 0) return r; } return 0; } int dns_packet_append_answer(DnsPacket *p, DnsAnswer *a) { DnsResourceRecord *rr; DnsAnswerFlags flags; int r; assert(p); DNS_ANSWER_FOREACH_FLAGS(rr, flags, a) { r = dns_packet_append_rr(p, rr, flags, NULL, NULL); if (r < 0) return r; } return 0; } int dns_packet_read(DnsPacket *p, size_t sz, const void **ret, size_t *start) { assert(p); if (p->rindex + sz > p->size) return -EMSGSIZE; if (ret) *ret = (uint8_t*) DNS_PACKET_DATA(p) + p->rindex; if (start) *start = p->rindex; p->rindex += sz; return 0; } void dns_packet_rewind(DnsPacket *p, size_t idx) { assert(p); assert(idx <= p->size); assert(idx >= DNS_PACKET_HEADER_SIZE); p->rindex = idx; } int dns_packet_read_blob(DnsPacket *p, void *d, size_t sz, size_t *start) { const void *q; int r; assert(p); assert(d); r = dns_packet_read(p, sz, &q, start); if (r < 0) return r; memcpy(d, q, sz); return 0; } static int dns_packet_read_memdup( DnsPacket *p, size_t size, void **ret, size_t *ret_size, size_t *ret_start) { const void *src; size_t start; int r; assert(p); assert(ret); r = dns_packet_read(p, size, &src, &start); if (r < 0) return r; if (size <= 0) *ret = NULL; else { void *copy; copy = memdup(src, size); if (!copy) return -ENOMEM; *ret = copy; } if (ret_size) *ret_size = size; if (ret_start) *ret_start = start; return 0; } int dns_packet_read_uint8(DnsPacket *p, uint8_t *ret, size_t *start) { const void *d; int r; assert(p); r = dns_packet_read(p, sizeof(uint8_t), &d, start); if (r < 0) return r; *ret = ((uint8_t*) d)[0]; return 0; } int dns_packet_read_uint16(DnsPacket *p, uint16_t *ret, size_t *start) { const void *d; int r; assert(p); r = dns_packet_read(p, sizeof(uint16_t), &d, start); if (r < 0) return r; *ret = unaligned_read_be16(d); return 0; } int dns_packet_read_uint32(DnsPacket *p, uint32_t *ret, size_t *start) { const void *d; int r; assert(p); r = dns_packet_read(p, sizeof(uint32_t), &d, start); if (r < 0) return r; *ret = unaligned_read_be32(d); return 0; } int dns_packet_read_string(DnsPacket *p, char **ret, size_t *start) { _cleanup_(rewind_dns_packet) DnsPacketRewinder rewinder; const void *d; char *t; uint8_t c; int r; assert(p); INIT_REWINDER(rewinder, p); r = dns_packet_read_uint8(p, &c, NULL); if (r < 0) return r; r = dns_packet_read(p, c, &d, NULL); if (r < 0) return r; if (memchr(d, 0, c)) return -EBADMSG; t = strndup(d, c); if (!t) return -ENOMEM; if (!utf8_is_valid(t)) { free(t); return -EBADMSG; } *ret = t; if (start) *start = rewinder.saved_rindex; CANCEL_REWINDER(rewinder); return 0; } int dns_packet_read_raw_string(DnsPacket *p, const void **ret, size_t *size, size_t *start) { _cleanup_(rewind_dns_packet) DnsPacketRewinder rewinder; uint8_t c; int r; assert(p); INIT_REWINDER(rewinder, p); r = dns_packet_read_uint8(p, &c, NULL); if (r < 0) return r; r = dns_packet_read(p, c, ret, NULL); if (r < 0) return r; if (size) *size = c; if (start) *start = rewinder.saved_rindex; CANCEL_REWINDER(rewinder); return 0; } int dns_packet_read_name( DnsPacket *p, char **_ret, bool allow_compression, size_t *start) { _cleanup_(rewind_dns_packet) DnsPacketRewinder rewinder; size_t after_rindex = 0, jump_barrier; _cleanup_free_ char *ret = NULL; size_t n = 0, allocated = 0; bool first = true; int r; assert(p); assert(_ret); INIT_REWINDER(rewinder, p); jump_barrier = p->rindex; if (p->refuse_compression) allow_compression = false; for (;;) { uint8_t c, d; r = dns_packet_read_uint8(p, &c, NULL); if (r < 0) return r; if (c == 0) /* End of name */ break; else if (c <= 63) { const char *label; /* Literal label */ r = dns_packet_read(p, c, (const void**) &label, NULL); if (r < 0) return r; if (!GREEDY_REALLOC(ret, allocated, n + !first + DNS_LABEL_ESCAPED_MAX)) return -ENOMEM; if (first) first = false; else ret[n++] = '.'; r = dns_label_escape(label, c, ret + n, DNS_LABEL_ESCAPED_MAX); if (r < 0) return r; n += r; continue; } else if (allow_compression && (c & 0xc0) == 0xc0) { uint16_t ptr; /* Pointer */ r = dns_packet_read_uint8(p, &d, NULL); if (r < 0) return r; ptr = (uint16_t) (c & ~0xc0) << 8 | (uint16_t) d; if (ptr < DNS_PACKET_HEADER_SIZE || ptr >= jump_barrier) return -EBADMSG; if (after_rindex == 0) after_rindex = p->rindex; /* Jumps are limited to a "prior occurrence" (RFC-1035 4.1.4) */ jump_barrier = ptr; p->rindex = ptr; } else return -EBADMSG; } if (!GREEDY_REALLOC(ret, allocated, n + 1)) return -ENOMEM; ret[n] = 0; if (after_rindex != 0) p->rindex= after_rindex; *_ret = ret; ret = NULL; if (start) *start = rewinder.saved_rindex; CANCEL_REWINDER(rewinder); return 0; } static int dns_packet_read_type_window(DnsPacket *p, Bitmap **types, size_t *start) { uint8_t window; uint8_t length; const uint8_t *bitmap; uint8_t bit = 0; unsigned i; bool found = false; _cleanup_(rewind_dns_packet) DnsPacketRewinder rewinder; int r; assert(p); assert(types); INIT_REWINDER(rewinder, p); r = bitmap_ensure_allocated(types); if (r < 0) return r; r = dns_packet_read_uint8(p, &window, NULL); if (r < 0) return r; r = dns_packet_read_uint8(p, &length, NULL); if (r < 0) return r; if (length == 0 || length > 32) return -EBADMSG; r = dns_packet_read(p, length, (const void **)&bitmap, NULL); if (r < 0) return r; for (i = 0; i < length; i++) { uint8_t bitmask = 1 << 7; if (!bitmap[i]) { found = false; bit += 8; continue; } found = true; while (bitmask) { if (bitmap[i] & bitmask) { uint16_t n; n = (uint16_t) window << 8 | (uint16_t) bit; /* Ignore pseudo-types. see RFC4034 section 4.1.2 */ if (dns_type_is_pseudo(n)) continue; r = bitmap_set(*types, n); if (r < 0) return r; } bit++; bitmask >>= 1; } } if (!found) return -EBADMSG; if (start) *start = rewinder.saved_rindex; CANCEL_REWINDER(rewinder); return 0; } static int dns_packet_read_type_windows(DnsPacket *p, Bitmap **types, size_t size, size_t *start) { _cleanup_(rewind_dns_packet) DnsPacketRewinder rewinder; int r; INIT_REWINDER(rewinder, p); while (p->rindex < rewinder.saved_rindex + size) { r = dns_packet_read_type_window(p, types, NULL); if (r < 0) return r; /* don't read past end of current RR */ if (p->rindex > rewinder.saved_rindex + size) return -EBADMSG; } if (p->rindex != rewinder.saved_rindex + size) return -EBADMSG; if (start) *start = rewinder.saved_rindex; CANCEL_REWINDER(rewinder); return 0; } int dns_packet_read_key(DnsPacket *p, DnsResourceKey **ret, bool *ret_cache_flush, size_t *start) { _cleanup_(rewind_dns_packet) DnsPacketRewinder rewinder; _cleanup_free_ char *name = NULL; bool cache_flush = false; uint16_t class, type; DnsResourceKey *key; int r; assert(p); assert(ret); INIT_REWINDER(rewinder, p); r = dns_packet_read_name(p, &name, true, NULL); if (r < 0) return r; r = dns_packet_read_uint16(p, &type, NULL); if (r < 0) return r; r = dns_packet_read_uint16(p, &class, NULL); if (r < 0) return r; if (p->protocol == DNS_PROTOCOL_MDNS) { /* See RFC6762, Section 10.2 */ if (type != DNS_TYPE_OPT && (class & MDNS_RR_CACHE_FLUSH)) { class &= ~MDNS_RR_CACHE_FLUSH; cache_flush = true; } } key = dns_resource_key_new_consume(class, type, name); if (!key) return -ENOMEM; name = NULL; *ret = key; if (ret_cache_flush) *ret_cache_flush = cache_flush; if (start) *start = rewinder.saved_rindex; CANCEL_REWINDER(rewinder); return 0; } static bool loc_size_ok(uint8_t size) { uint8_t m = size >> 4, e = size & 0xF; return m <= 9 && e <= 9 && (m > 0 || e == 0); } int dns_packet_read_rr(DnsPacket *p, DnsResourceRecord **ret, bool *ret_cache_flush, size_t *start) { _cleanup_(dns_resource_record_unrefp) DnsResourceRecord *rr = NULL; _cleanup_(dns_resource_key_unrefp) DnsResourceKey *key = NULL; _cleanup_(rewind_dns_packet) DnsPacketRewinder rewinder; size_t offset; uint16_t rdlength; bool cache_flush; int r; assert(p); assert(ret); INIT_REWINDER(rewinder, p); r = dns_packet_read_key(p, &key, &cache_flush, NULL); if (r < 0) return r; if (!dns_class_is_valid_rr(key->class) || !dns_type_is_valid_rr(key->type)) return -EBADMSG; rr = dns_resource_record_new(key); if (!rr) return -ENOMEM; r = dns_packet_read_uint32(p, &rr->ttl, NULL); if (r < 0) return r; /* RFC 2181, Section 8, suggests to * treat a TTL with the MSB set as a zero TTL. */ if (rr->ttl & UINT32_C(0x80000000)) rr->ttl = 0; r = dns_packet_read_uint16(p, &rdlength, NULL); if (r < 0) return r; if (p->rindex + rdlength > p->size) return -EBADMSG; offset = p->rindex; switch (rr->key->type) { case DNS_TYPE_SRV: r = dns_packet_read_uint16(p, &rr->srv.priority, NULL); if (r < 0) return r; r = dns_packet_read_uint16(p, &rr->srv.weight, NULL); if (r < 0) return r; r = dns_packet_read_uint16(p, &rr->srv.port, NULL); if (r < 0) return r; r = dns_packet_read_name(p, &rr->srv.name, true, NULL); break; case DNS_TYPE_PTR: case DNS_TYPE_NS: case DNS_TYPE_CNAME: case DNS_TYPE_DNAME: r = dns_packet_read_name(p, &rr->ptr.name, true, NULL); break; case DNS_TYPE_HINFO: r = dns_packet_read_string(p, &rr->hinfo.cpu, NULL); if (r < 0) return r; r = dns_packet_read_string(p, &rr->hinfo.os, NULL); break; case DNS_TYPE_SPF: /* exactly the same as TXT */ case DNS_TYPE_TXT: if (rdlength <= 0) { DnsTxtItem *i; /* RFC 6763, section 6.1 suggests to treat * empty TXT RRs as equivalent to a TXT record * with a single empty string. */ i = malloc0(offsetof(DnsTxtItem, data) + 1); /* for safety reasons we add an extra NUL byte */ if (!i) return -ENOMEM; rr->txt.items = i; } else { DnsTxtItem *last = NULL; while (p->rindex < offset + rdlength) { DnsTxtItem *i; const void *data; size_t sz; r = dns_packet_read_raw_string(p, &data, &sz, NULL); if (r < 0) return r; i = malloc0(offsetof(DnsTxtItem, data) + sz + 1); /* extra NUL byte at the end */ if (!i) return -ENOMEM; memcpy(i->data, data, sz); i->length = sz; LIST_INSERT_AFTER(items, rr->txt.items, last, i); last = i; } } r = 0; break; case DNS_TYPE_A: r = dns_packet_read_blob(p, &rr->a.in_addr, sizeof(struct in_addr), NULL); break; case DNS_TYPE_AAAA: r = dns_packet_read_blob(p, &rr->aaaa.in6_addr, sizeof(struct in6_addr), NULL); break; case DNS_TYPE_SOA: r = dns_packet_read_name(p, &rr->soa.mname, true, NULL); if (r < 0) return r; r = dns_packet_read_name(p, &rr->soa.rname, true, NULL); if (r < 0) return r; r = dns_packet_read_uint32(p, &rr->soa.serial, NULL); if (r < 0) return r; r = dns_packet_read_uint32(p, &rr->soa.refresh, NULL); if (r < 0) return r; r = dns_packet_read_uint32(p, &rr->soa.retry, NULL); if (r < 0) return r; r = dns_packet_read_uint32(p, &rr->soa.expire, NULL); if (r < 0) return r; r = dns_packet_read_uint32(p, &rr->soa.minimum, NULL); break; case DNS_TYPE_MX: r = dns_packet_read_uint16(p, &rr->mx.priority, NULL); if (r < 0) return r; r = dns_packet_read_name(p, &rr->mx.exchange, true, NULL); break; case DNS_TYPE_LOC: { uint8_t t; size_t pos; r = dns_packet_read_uint8(p, &t, &pos); if (r < 0) return r; if (t == 0) { rr->loc.version = t; r = dns_packet_read_uint8(p, &rr->loc.size, NULL); if (r < 0) return r; if (!loc_size_ok(rr->loc.size)) return -EBADMSG; r = dns_packet_read_uint8(p, &rr->loc.horiz_pre, NULL); if (r < 0) return r; if (!loc_size_ok(rr->loc.horiz_pre)) return -EBADMSG; r = dns_packet_read_uint8(p, &rr->loc.vert_pre, NULL); if (r < 0) return r; if (!loc_size_ok(rr->loc.vert_pre)) return -EBADMSG; r = dns_packet_read_uint32(p, &rr->loc.latitude, NULL); if (r < 0) return r; r = dns_packet_read_uint32(p, &rr->loc.longitude, NULL); if (r < 0) return r; r = dns_packet_read_uint32(p, &rr->loc.altitude, NULL); if (r < 0) return r; break; } else { dns_packet_rewind(p, pos); rr->unparseable = true; goto unparseable; } } case DNS_TYPE_DS: r = dns_packet_read_uint16(p, &rr->ds.key_tag, NULL); if (r < 0) return r; r = dns_packet_read_uint8(p, &rr->ds.algorithm, NULL); if (r < 0) return r; r = dns_packet_read_uint8(p, &rr->ds.digest_type, NULL); if (r < 0) return r; r = dns_packet_read_memdup(p, rdlength - 4, &rr->ds.digest, &rr->ds.digest_size, NULL); if (r < 0) return r; if (rr->ds.digest_size <= 0) /* the accepted size depends on the algorithm, but for now just ensure that the value is greater than zero */ return -EBADMSG; break; case DNS_TYPE_SSHFP: r = dns_packet_read_uint8(p, &rr->sshfp.algorithm, NULL); if (r < 0) return r; r = dns_packet_read_uint8(p, &rr->sshfp.fptype, NULL); if (r < 0) return r; r = dns_packet_read_memdup(p, rdlength - 2, &rr->sshfp.fingerprint, &rr->sshfp.fingerprint_size, NULL); if (rr->sshfp.fingerprint_size <= 0) /* the accepted size depends on the algorithm, but for now just ensure that the value is greater than zero */ return -EBADMSG; break; case DNS_TYPE_DNSKEY: r = dns_packet_read_uint16(p, &rr->dnskey.flags, NULL); if (r < 0) return r; r = dns_packet_read_uint8(p, &rr->dnskey.protocol, NULL); if (r < 0) return r; r = dns_packet_read_uint8(p, &rr->dnskey.algorithm, NULL); if (r < 0) return r; r = dns_packet_read_memdup(p, rdlength - 4, &rr->dnskey.key, &rr->dnskey.key_size, NULL); if (rr->dnskey.key_size <= 0) /* the accepted size depends on the algorithm, but for now just ensure that the value is greater than zero */ return -EBADMSG; break; case DNS_TYPE_RRSIG: r = dns_packet_read_uint16(p, &rr->rrsig.type_covered, NULL); if (r < 0) return r; r = dns_packet_read_uint8(p, &rr->rrsig.algorithm, NULL); if (r < 0) return r; r = dns_packet_read_uint8(p, &rr->rrsig.labels, NULL); if (r < 0) return r; r = dns_packet_read_uint32(p, &rr->rrsig.original_ttl, NULL); if (r < 0) return r; r = dns_packet_read_uint32(p, &rr->rrsig.expiration, NULL); if (r < 0) return r; r = dns_packet_read_uint32(p, &rr->rrsig.inception, NULL); if (r < 0) return r; r = dns_packet_read_uint16(p, &rr->rrsig.key_tag, NULL); if (r < 0) return r; r = dns_packet_read_name(p, &rr->rrsig.signer, false, NULL); if (r < 0) return r; r = dns_packet_read_memdup(p, offset + rdlength - p->rindex, &rr->rrsig.signature, &rr->rrsig.signature_size, NULL); if (rr->rrsig.signature_size <= 0) /* the accepted size depends on the algorithm, but for now just ensure that the value is greater than zero */ return -EBADMSG; break; case DNS_TYPE_NSEC: { /* * RFC6762, section 18.14 explictly states mDNS should use name compression. * This contradicts RFC3845, section 2.1.1 */ bool allow_compressed = p->protocol == DNS_PROTOCOL_MDNS; r = dns_packet_read_name(p, &rr->nsec.next_domain_name, allow_compressed, NULL); if (r < 0) return r; r = dns_packet_read_type_windows(p, &rr->nsec.types, offset + rdlength - p->rindex, NULL); /* We accept empty NSEC bitmaps. The bit indicating the presence of the NSEC record itself * is redundant and in e.g., RFC4956 this fact is used to define a use for NSEC records * without the NSEC bit set. */ break; } case DNS_TYPE_NSEC3: { uint8_t size; r = dns_packet_read_uint8(p, &rr->nsec3.algorithm, NULL); if (r < 0) return r; r = dns_packet_read_uint8(p, &rr->nsec3.flags, NULL); if (r < 0) return r; r = dns_packet_read_uint16(p, &rr->nsec3.iterations, NULL); if (r < 0) return r; /* this may be zero */ r = dns_packet_read_uint8(p, &size, NULL); if (r < 0) return r; r = dns_packet_read_memdup(p, size, &rr->nsec3.salt, &rr->nsec3.salt_size, NULL); if (r < 0) return r; r = dns_packet_read_uint8(p, &size, NULL); if (r < 0) return r; if (size <= 0) return -EBADMSG; r = dns_packet_read_memdup(p, size, &rr->nsec3.next_hashed_name, &rr->nsec3.next_hashed_name_size, NULL); if (r < 0) return r; r = dns_packet_read_type_windows(p, &rr->nsec3.types, offset + rdlength - p->rindex, NULL); /* empty non-terminals can have NSEC3 records, so empty bitmaps are allowed */ break; } case DNS_TYPE_TLSA: r = dns_packet_read_uint8(p, &rr->tlsa.cert_usage, NULL); if (r < 0) return r; r = dns_packet_read_uint8(p, &rr->tlsa.selector, NULL); if (r < 0) return r; r = dns_packet_read_uint8(p, &rr->tlsa.matching_type, NULL); if (r < 0) return r; r = dns_packet_read_memdup(p, rdlength - 3, &rr->tlsa.data, &rr->tlsa.data_size, NULL); if (rr->tlsa.data_size <= 0) /* the accepted size depends on the algorithm, but for now just ensure that the value is greater than zero */ return -EBADMSG; break; case DNS_TYPE_CAA: r = dns_packet_read_uint8(p, &rr->caa.flags, NULL); if (r < 0) return r; r = dns_packet_read_string(p, &rr->caa.tag, NULL); if (r < 0) return r; r = dns_packet_read_memdup(p, rdlength + offset - p->rindex, &rr->caa.value, &rr->caa.value_size, NULL); break; case DNS_TYPE_OPT: /* we only care about the header of OPT for now. */ case DNS_TYPE_OPENPGPKEY: default: unparseable: r = dns_packet_read_memdup(p, rdlength, &rr->generic.data, &rr->generic.data_size, NULL); break; } if (r < 0) return r; if (p->rindex != offset + rdlength) return -EBADMSG; *ret = rr; rr = NULL; if (ret_cache_flush) *ret_cache_flush = cache_flush; if (start) *start = rewinder.saved_rindex; CANCEL_REWINDER(rewinder); return 0; } static bool opt_is_good(DnsResourceRecord *rr, bool *rfc6975) { const uint8_t* p; bool found_dau_dhu_n3u = false; size_t l; /* Checks whether the specified OPT RR is well-formed and whether it contains RFC6975 data (which is not OK in * a reply). */ assert(rr); assert(rr->key->type == DNS_TYPE_OPT); /* Check that the version is 0 */ if (((rr->ttl >> 16) & UINT32_C(0xFF)) != 0) { *rfc6975 = false; return true; /* if it's not version 0, it's OK, but we will ignore the OPT field contents */ } p = rr->opt.data; l = rr->opt.data_size; while (l > 0) { uint16_t option_code, option_length; /* At least four bytes for OPTION-CODE and OPTION-LENGTH are required */ if (l < 4U) return false; option_code = unaligned_read_be16(p); option_length = unaligned_read_be16(p + 2); if (l < option_length + 4U) return false; /* RFC 6975 DAU, DHU or N3U fields found. */ if (IN_SET(option_code, 5, 6, 7)) found_dau_dhu_n3u = true; p += option_length + 4U; l -= option_length + 4U; } *rfc6975 = found_dau_dhu_n3u; return true; } int dns_packet_extract(DnsPacket *p) { _cleanup_(dns_question_unrefp) DnsQuestion *question = NULL; _cleanup_(dns_answer_unrefp) DnsAnswer *answer = NULL; _cleanup_(rewind_dns_packet) DnsPacketRewinder rewinder = {}; unsigned n, i; int r; if (p->extracted) return 0; INIT_REWINDER(rewinder, p); dns_packet_rewind(p, DNS_PACKET_HEADER_SIZE); n = DNS_PACKET_QDCOUNT(p); if (n > 0) { question = dns_question_new(n); if (!question) return -ENOMEM; for (i = 0; i < n; i++) { _cleanup_(dns_resource_key_unrefp) DnsResourceKey *key = NULL; bool cache_flush; r = dns_packet_read_key(p, &key, &cache_flush, NULL); if (r < 0) return r; if (cache_flush) return -EBADMSG; if (!dns_type_is_valid_query(key->type)) return -EBADMSG; r = dns_question_add(question, key); if (r < 0) return r; } } n = DNS_PACKET_RRCOUNT(p); if (n > 0) { _cleanup_(dns_resource_record_unrefp) DnsResourceRecord *previous = NULL; bool bad_opt = false; answer = dns_answer_new(n); if (!answer) return -ENOMEM; for (i = 0; i < n; i++) { _cleanup_(dns_resource_record_unrefp) DnsResourceRecord *rr = NULL; bool cache_flush = false; r = dns_packet_read_rr(p, &rr, &cache_flush, NULL); if (r < 0) return r; /* Try to reduce memory usage a bit */ if (previous) dns_resource_key_reduce(&rr->key, &previous->key); if (rr->key->type == DNS_TYPE_OPT) { bool has_rfc6975; if (p->opt || bad_opt) { /* Multiple OPT RRs? if so, let's ignore all, because there's something wrong * with the server, and if one is valid we wouldn't know which one. */ log_debug("Multiple OPT RRs detected, ignoring all."); bad_opt = true; continue; } if (!dns_name_is_root(dns_resource_key_name(rr->key))) { /* If the OPT RR is not owned by the root domain, then it is bad, let's ignore * it. */ log_debug("OPT RR is not owned by root domain, ignoring."); bad_opt = true; continue; } if (i < DNS_PACKET_ANCOUNT(p) + DNS_PACKET_NSCOUNT(p)) { /* OPT RR is in the wrong section? Some Belkin routers do this. This is a hint * the EDNS implementation is borked, like the Belkin one is, hence ignore * it. */ log_debug("OPT RR in wrong section, ignoring."); bad_opt = true; continue; } if (!opt_is_good(rr, &has_rfc6975)) { log_debug("Malformed OPT RR, ignoring."); bad_opt = true; continue; } if (DNS_PACKET_QR(p)) { /* Additional checks for responses */ if (!DNS_RESOURCE_RECORD_OPT_VERSION_SUPPORTED(rr)) { /* If this is a reply and we don't know the EDNS version then something * is weird... */ log_debug("EDNS version newer that our request, bad server."); return -EBADMSG; } if (has_rfc6975) { /* If the OPT RR contains RFC6975 algorithm data, then this is indication that * the server just copied the OPT it got from us (which contained that data) * back into the reply. If so, then it doesn't properly support EDNS, as * RFC6975 makes it very clear that the algorithm data should only be contained * in questions, never in replies. Crappy Belkin routers copy the OPT data for * example, hence let's detect this so that we downgrade early. */ log_debug("OPT RR contained RFC6975 data, ignoring."); bad_opt = true; continue; } } p->opt = dns_resource_record_ref(rr); } else { /* According to RFC 4795, section 2.9. only the RRs from the Answer section shall be * cached. Hence mark only those RRs as cacheable by default, but not the ones from the * Additional or Authority sections. */ r = dns_answer_add(answer, rr, p->ifindex, (i < DNS_PACKET_ANCOUNT(p) ? DNS_ANSWER_CACHEABLE : 0) | (p->protocol == DNS_PROTOCOL_MDNS && !cache_flush ? DNS_ANSWER_SHARED_OWNER : 0)); if (r < 0) return r; } /* Remember this RR, so that we potentically can merge it's ->key object with the next RR. Note * that we only do this if we actually decided to keep the RR around. */ dns_resource_record_unref(previous); previous = dns_resource_record_ref(rr); } if (bad_opt) p->opt = dns_resource_record_unref(p->opt); } p->question = question; question = NULL; p->answer = answer; answer = NULL; p->extracted = true; /* no CANCEL, always rewind */ return 0; } int dns_packet_is_reply_for(DnsPacket *p, const DnsResourceKey *key) { int r; assert(p); assert(key); /* Checks if the specified packet is a reply for the specified * key and the specified key is the only one in the question * section. */ if (DNS_PACKET_QR(p) != 1) return 0; /* Let's unpack the packet, if that hasn't happened yet. */ r = dns_packet_extract(p); if (r < 0) return r; if (!p->question) return 0; if (p->question->n_keys != 1) return 0; return dns_resource_key_equal(p->question->keys[0], key); } static const char* const dns_rcode_table[_DNS_RCODE_MAX_DEFINED] = { [DNS_RCODE_SUCCESS] = "SUCCESS", [DNS_RCODE_FORMERR] = "FORMERR", [DNS_RCODE_SERVFAIL] = "SERVFAIL", [DNS_RCODE_NXDOMAIN] = "NXDOMAIN", [DNS_RCODE_NOTIMP] = "NOTIMP", [DNS_RCODE_REFUSED] = "REFUSED", [DNS_RCODE_YXDOMAIN] = "YXDOMAIN", [DNS_RCODE_YXRRSET] = "YRRSET", [DNS_RCODE_NXRRSET] = "NXRRSET", [DNS_RCODE_NOTAUTH] = "NOTAUTH", [DNS_RCODE_NOTZONE] = "NOTZONE", [DNS_RCODE_BADVERS] = "BADVERS", [DNS_RCODE_BADKEY] = "BADKEY", [DNS_RCODE_BADTIME] = "BADTIME", [DNS_RCODE_BADMODE] = "BADMODE", [DNS_RCODE_BADNAME] = "BADNAME", [DNS_RCODE_BADALG] = "BADALG", [DNS_RCODE_BADTRUNC] = "BADTRUNC", [DNS_RCODE_BADCOOKIE] = "BADCOOKIE", }; DEFINE_STRING_TABLE_LOOKUP(dns_rcode, int); static const char* const dns_protocol_table[_DNS_PROTOCOL_MAX] = { [DNS_PROTOCOL_DNS] = "dns", [DNS_PROTOCOL_MDNS] = "mdns", [DNS_PROTOCOL_LLMNR] = "llmnr", }; DEFINE_STRING_TABLE_LOOKUP(dns_protocol, DnsProtocol);
./CrossVul/dataset_final_sorted/CWE-20/c/good_3371_0
crossvul-cpp_data_good_5799_0
/* * Adaptec AAC series RAID controller driver * (c) Copyright 2001 Red Hat Inc. * * based on the old aacraid driver that is.. * Adaptec aacraid device driver for Linux. * * Copyright (c) 2000-2010 Adaptec, Inc. * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * * Module Name: * commctrl.c * * Abstract: Contains all routines for control of the AFA comm layer * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/completion.h> #include <linux/dma-mapping.h> #include <linux/blkdev.h> #include <linux/delay.h> /* ssleep prototype */ #include <linux/kthread.h> #include <linux/semaphore.h> #include <asm/uaccess.h> #include <scsi/scsi_host.h> #include "aacraid.h" /** * ioctl_send_fib - send a FIB from userspace * @dev: adapter is being processed * @arg: arguments to the ioctl call * * This routine sends a fib to the adapter on behalf of a user level * program. */ # define AAC_DEBUG_PREAMBLE KERN_INFO # define AAC_DEBUG_POSTAMBLE static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) { struct hw_fib * kfib; struct fib *fibptr; struct hw_fib * hw_fib = (struct hw_fib *)0; dma_addr_t hw_fib_pa = (dma_addr_t)0LL; unsigned size; int retval; if (dev->in_reset) { return -EBUSY; } fibptr = aac_fib_alloc(dev); if(fibptr == NULL) { return -ENOMEM; } kfib = fibptr->hw_fib_va; /* * First copy in the header so that we can check the size field. */ if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) { aac_fib_free(fibptr); return -EFAULT; } /* * Since we copy based on the fib header size, make sure that we * will not overrun the buffer when we copy the memory. Return * an error if we would. */ size = le16_to_cpu(kfib->header.Size) + sizeof(struct aac_fibhdr); if (size < le16_to_cpu(kfib->header.SenderSize)) size = le16_to_cpu(kfib->header.SenderSize); if (size > dev->max_fib_size) { dma_addr_t daddr; if (size > 2048) { retval = -EINVAL; goto cleanup; } kfib = pci_alloc_consistent(dev->pdev, size, &daddr); if (!kfib) { retval = -ENOMEM; goto cleanup; } /* Highjack the hw_fib */ hw_fib = fibptr->hw_fib_va; hw_fib_pa = fibptr->hw_fib_pa; fibptr->hw_fib_va = kfib; fibptr->hw_fib_pa = daddr; memset(((char *)kfib) + dev->max_fib_size, 0, size - dev->max_fib_size); memcpy(kfib, hw_fib, dev->max_fib_size); } if (copy_from_user(kfib, arg, size)) { retval = -EFAULT; goto cleanup; } if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) { aac_adapter_interrupt(dev); /* * Since we didn't really send a fib, zero out the state to allow * cleanup code not to assert. */ kfib->header.XferState = 0; } else { retval = aac_fib_send(le16_to_cpu(kfib->header.Command), fibptr, le16_to_cpu(kfib->header.Size) , FsaNormal, 1, 1, NULL, NULL); if (retval) { goto cleanup; } if (aac_fib_complete(fibptr) != 0) { retval = -EINVAL; goto cleanup; } } /* * Make sure that the size returned by the adapter (which includes * the header) is less than or equal to the size of a fib, so we * don't corrupt application data. Then copy that size to the user * buffer. (Don't try to add the header information again, since it * was already included by the adapter.) */ retval = 0; if (copy_to_user(arg, (void *)kfib, size)) retval = -EFAULT; cleanup: if (hw_fib) { pci_free_consistent(dev->pdev, size, kfib, fibptr->hw_fib_pa); fibptr->hw_fib_pa = hw_fib_pa; fibptr->hw_fib_va = hw_fib; } if (retval != -ERESTARTSYS) aac_fib_free(fibptr); return retval; } /** * open_getadapter_fib - Get the next fib * * This routine will get the next Fib, if available, from the AdapterFibContext * passed in from the user. */ static int open_getadapter_fib(struct aac_dev * dev, void __user *arg) { struct aac_fib_context * fibctx; int status; fibctx = kmalloc(sizeof(struct aac_fib_context), GFP_KERNEL); if (fibctx == NULL) { status = -ENOMEM; } else { unsigned long flags; struct list_head * entry; struct aac_fib_context * context; fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT; fibctx->size = sizeof(struct aac_fib_context); /* * Yes yes, I know this could be an index, but we have a * better guarantee of uniqueness for the locked loop below. * Without the aid of a persistent history, this also helps * reduce the chance that the opaque context would be reused. */ fibctx->unique = (u32)((ulong)fibctx & 0xFFFFFFFF); /* * Initialize the mutex used to wait for the next AIF. */ sema_init(&fibctx->wait_sem, 0); fibctx->wait = 0; /* * Initialize the fibs and set the count of fibs on * the list to 0. */ fibctx->count = 0; INIT_LIST_HEAD(&fibctx->fib_list); fibctx->jiffies = jiffies/HZ; /* * Now add this context onto the adapter's * AdapterFibContext list. */ spin_lock_irqsave(&dev->fib_lock, flags); /* Ensure that we have a unique identifier */ entry = dev->fib_list.next; while (entry != &dev->fib_list) { context = list_entry(entry, struct aac_fib_context, next); if (context->unique == fibctx->unique) { /* Not unique (32 bits) */ fibctx->unique++; entry = dev->fib_list.next; } else { entry = entry->next; } } list_add_tail(&fibctx->next, &dev->fib_list); spin_unlock_irqrestore(&dev->fib_lock, flags); if (copy_to_user(arg, &fibctx->unique, sizeof(fibctx->unique))) { status = -EFAULT; } else { status = 0; } } return status; } /** * next_getadapter_fib - get the next fib * @dev: adapter to use * @arg: ioctl argument * * This routine will get the next Fib, if available, from the AdapterFibContext * passed in from the user. */ static int next_getadapter_fib(struct aac_dev * dev, void __user *arg) { struct fib_ioctl f; struct fib *fib; struct aac_fib_context *fibctx; int status; struct list_head * entry; unsigned long flags; if(copy_from_user((void *)&f, arg, sizeof(struct fib_ioctl))) return -EFAULT; /* * Verify that the HANDLE passed in was a valid AdapterFibContext * * Search the list of AdapterFibContext addresses on the adapter * to be sure this is a valid address */ spin_lock_irqsave(&dev->fib_lock, flags); entry = dev->fib_list.next; fibctx = NULL; while (entry != &dev->fib_list) { fibctx = list_entry(entry, struct aac_fib_context, next); /* * Extract the AdapterFibContext from the Input parameters. */ if (fibctx->unique == f.fibctx) { /* We found a winner */ break; } entry = entry->next; fibctx = NULL; } if (!fibctx) { spin_unlock_irqrestore(&dev->fib_lock, flags); dprintk ((KERN_INFO "Fib Context not found\n")); return -EINVAL; } if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) || (fibctx->size != sizeof(struct aac_fib_context))) { spin_unlock_irqrestore(&dev->fib_lock, flags); dprintk ((KERN_INFO "Fib Context corrupt?\n")); return -EINVAL; } status = 0; /* * If there are no fibs to send back, then either wait or return * -EAGAIN */ return_fib: if (!list_empty(&fibctx->fib_list)) { /* * Pull the next fib from the fibs */ entry = fibctx->fib_list.next; list_del(entry); fib = list_entry(entry, struct fib, fiblink); fibctx->count--; spin_unlock_irqrestore(&dev->fib_lock, flags); if (copy_to_user(f.fib, fib->hw_fib_va, sizeof(struct hw_fib))) { kfree(fib->hw_fib_va); kfree(fib); return -EFAULT; } /* * Free the space occupied by this copy of the fib. */ kfree(fib->hw_fib_va); kfree(fib); status = 0; } else { spin_unlock_irqrestore(&dev->fib_lock, flags); /* If someone killed the AIF aacraid thread, restart it */ status = !dev->aif_thread; if (status && !dev->in_reset && dev->queues && dev->fsa_dev) { /* Be paranoid, be very paranoid! */ kthread_stop(dev->thread); ssleep(1); dev->aif_thread = 0; dev->thread = kthread_run(aac_command_thread, dev, "%s", dev->name); ssleep(1); } if (f.wait) { if(down_interruptible(&fibctx->wait_sem) < 0) { status = -ERESTARTSYS; } else { /* Lock again and retry */ spin_lock_irqsave(&dev->fib_lock, flags); goto return_fib; } } else { status = -EAGAIN; } } fibctx->jiffies = jiffies/HZ; return status; } int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx) { struct fib *fib; /* * First free any FIBs that have not been consumed. */ while (!list_empty(&fibctx->fib_list)) { struct list_head * entry; /* * Pull the next fib from the fibs */ entry = fibctx->fib_list.next; list_del(entry); fib = list_entry(entry, struct fib, fiblink); fibctx->count--; /* * Free the space occupied by this copy of the fib. */ kfree(fib->hw_fib_va); kfree(fib); } /* * Remove the Context from the AdapterFibContext List */ list_del(&fibctx->next); /* * Invalidate context */ fibctx->type = 0; /* * Free the space occupied by the Context */ kfree(fibctx); return 0; } /** * close_getadapter_fib - close down user fib context * @dev: adapter * @arg: ioctl arguments * * This routine will close down the fibctx passed in from the user. */ static int close_getadapter_fib(struct aac_dev * dev, void __user *arg) { struct aac_fib_context *fibctx; int status; unsigned long flags; struct list_head * entry; /* * Verify that the HANDLE passed in was a valid AdapterFibContext * * Search the list of AdapterFibContext addresses on the adapter * to be sure this is a valid address */ entry = dev->fib_list.next; fibctx = NULL; while(entry != &dev->fib_list) { fibctx = list_entry(entry, struct aac_fib_context, next); /* * Extract the fibctx from the input parameters */ if (fibctx->unique == (u32)(uintptr_t)arg) /* We found a winner */ break; entry = entry->next; fibctx = NULL; } if (!fibctx) return 0; /* Already gone */ if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) || (fibctx->size != sizeof(struct aac_fib_context))) return -EINVAL; spin_lock_irqsave(&dev->fib_lock, flags); status = aac_close_fib_context(dev, fibctx); spin_unlock_irqrestore(&dev->fib_lock, flags); return status; } /** * check_revision - close down user fib context * @dev: adapter * @arg: ioctl arguments * * This routine returns the driver version. * Under Linux, there have been no version incompatibilities, so this is * simple! */ static int check_revision(struct aac_dev *dev, void __user *arg) { struct revision response; char *driver_version = aac_driver_version; u32 version; response.compat = 1; version = (simple_strtol(driver_version, &driver_version, 10) << 24) | 0x00000400; version += simple_strtol(driver_version + 1, &driver_version, 10) << 16; version += simple_strtol(driver_version + 1, NULL, 10); response.version = cpu_to_le32(version); # ifdef AAC_DRIVER_BUILD response.build = cpu_to_le32(AAC_DRIVER_BUILD); # else response.build = cpu_to_le32(9999); # endif if (copy_to_user(arg, &response, sizeof(response))) return -EFAULT; return 0; } /** * * aac_send_raw_scb * */ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) { struct fib* srbfib; int status; struct aac_srb *srbcmd = NULL; struct user_aac_srb *user_srbcmd = NULL; struct user_aac_srb __user *user_srb = arg; struct aac_srb_reply __user *user_reply; struct aac_srb_reply* reply; u32 fibsize = 0; u32 flags = 0; s32 rcode = 0; u32 data_dir; void __user *sg_user[32]; void *sg_list[32]; u32 sg_indx = 0; u32 byte_count = 0; u32 actual_fibsize64, actual_fibsize = 0; int i; if (dev->in_reset) { dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n")); return -EBUSY; } if (!capable(CAP_SYS_ADMIN)){ dprintk((KERN_DEBUG"aacraid: No permission to send raw srb\n")); return -EPERM; } /* * Allocate and initialize a Fib then setup a SRB command */ if (!(srbfib = aac_fib_alloc(dev))) { return -ENOMEM; } aac_fib_init(srbfib); /* raw_srb FIB is not FastResponseCapable */ srbfib->hw_fib_va->header.XferState &= ~cpu_to_le32(FastResponseCapable); srbcmd = (struct aac_srb*) fib_data(srbfib); memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */ if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){ dprintk((KERN_DEBUG"aacraid: Could not copy data size from user\n")); rcode = -EFAULT; goto cleanup; } if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) || (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) { rcode = -EINVAL; goto cleanup; } user_srbcmd = kmalloc(fibsize, GFP_KERNEL); if (!user_srbcmd) { dprintk((KERN_DEBUG"aacraid: Could not make a copy of the srb\n")); rcode = -ENOMEM; goto cleanup; } if(copy_from_user(user_srbcmd, user_srb,fibsize)){ dprintk((KERN_DEBUG"aacraid: Could not copy srb from user\n")); rcode = -EFAULT; goto cleanup; } user_reply = arg+fibsize; flags = user_srbcmd->flags; /* from user in cpu order */ // Fix up srb for endian and force some values srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this srbcmd->channel = cpu_to_le32(user_srbcmd->channel); srbcmd->id = cpu_to_le32(user_srbcmd->id); srbcmd->lun = cpu_to_le32(user_srbcmd->lun); srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout); srbcmd->flags = cpu_to_le32(flags); srbcmd->retry_limit = 0; // Obsolete parameter srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size); memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb)); switch (flags & (SRB_DataIn | SRB_DataOut)) { case SRB_DataOut: data_dir = DMA_TO_DEVICE; break; case (SRB_DataIn | SRB_DataOut): data_dir = DMA_BIDIRECTIONAL; break; case SRB_DataIn: data_dir = DMA_FROM_DEVICE; break; default: data_dir = DMA_NONE; } if (user_srbcmd->sg.count > ARRAY_SIZE(sg_list)) { dprintk((KERN_DEBUG"aacraid: too many sg entries %d\n", le32_to_cpu(srbcmd->sg.count))); rcode = -EINVAL; goto cleanup; } actual_fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) + ((user_srbcmd->sg.count & 0xff) * sizeof(struct sgentry)); actual_fibsize64 = actual_fibsize + (user_srbcmd->sg.count & 0xff) * (sizeof(struct sgentry64) - sizeof(struct sgentry)); /* User made a mistake - should not continue */ if ((actual_fibsize != fibsize) && (actual_fibsize64 != fibsize)) { dprintk((KERN_DEBUG"aacraid: Bad Size specified in " "Raw SRB command calculated fibsize=%lu;%lu " "user_srbcmd->sg.count=%d aac_srb=%lu sgentry=%lu;%lu " "issued fibsize=%d\n", actual_fibsize, actual_fibsize64, user_srbcmd->sg.count, sizeof(struct aac_srb), sizeof(struct sgentry), sizeof(struct sgentry64), fibsize)); rcode = -EINVAL; goto cleanup; } if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) { dprintk((KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n")); rcode = -EINVAL; goto cleanup; } byte_count = 0; if (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) { struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg; struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg; /* * This should also catch if user used the 32 bit sgmap */ if (actual_fibsize64 == fibsize) { actual_fibsize = actual_fibsize64; for (i = 0; i < upsg->count; i++) { u64 addr; void* p; if (upsg->sg[i].count > ((dev->adapter_info.options & AAC_OPT_NEW_COMM) ? (dev->scsi_host_ptr->max_sectors << 9) : 65536)) { rcode = -EINVAL; goto cleanup; } /* Does this really need to be GFP_DMA? */ p = kmalloc(upsg->sg[i].count,GFP_KERNEL|__GFP_DMA); if(!p) { dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", upsg->sg[i].count,i,upsg->count)); rcode = -ENOMEM; goto cleanup; } addr = (u64)upsg->sg[i].addr[0]; addr += ((u64)upsg->sg[i].addr[1]) << 32; sg_user[i] = (void __user *)(uintptr_t)addr; sg_list[i] = p; // save so we can clean up later sg_indx = i; if (flags & SRB_DataOut) { if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){ dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); rcode = -EFAULT; goto cleanup; } } addr = pci_map_single(dev->pdev, p, upsg->sg[i].count, data_dir); psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff); psg->sg[i].addr[1] = cpu_to_le32(addr>>32); byte_count += upsg->sg[i].count; psg->sg[i].count = cpu_to_le32(upsg->sg[i].count); } } else { struct user_sgmap* usg; usg = kmalloc(actual_fibsize - sizeof(struct aac_srb) + sizeof(struct sgmap), GFP_KERNEL); if (!usg) { dprintk((KERN_DEBUG"aacraid: Allocation error in Raw SRB command\n")); rcode = -ENOMEM; goto cleanup; } memcpy (usg, upsg, actual_fibsize - sizeof(struct aac_srb) + sizeof(struct sgmap)); actual_fibsize = actual_fibsize64; for (i = 0; i < usg->count; i++) { u64 addr; void* p; if (usg->sg[i].count > ((dev->adapter_info.options & AAC_OPT_NEW_COMM) ? (dev->scsi_host_ptr->max_sectors << 9) : 65536)) { kfree(usg); rcode = -EINVAL; goto cleanup; } /* Does this really need to be GFP_DMA? */ p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); if(!p) { dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", usg->sg[i].count,i,usg->count)); kfree(usg); rcode = -ENOMEM; goto cleanup; } sg_user[i] = (void __user *)(uintptr_t)usg->sg[i].addr; sg_list[i] = p; // save so we can clean up later sg_indx = i; if (flags & SRB_DataOut) { if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){ kfree (usg); dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); rcode = -EFAULT; goto cleanup; } } addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir); psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff); psg->sg[i].addr[1] = cpu_to_le32(addr>>32); byte_count += usg->sg[i].count; psg->sg[i].count = cpu_to_le32(usg->sg[i].count); } kfree (usg); } srbcmd->count = cpu_to_le32(byte_count); psg->count = cpu_to_le32(sg_indx+1); status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL); } else { struct user_sgmap* upsg = &user_srbcmd->sg; struct sgmap* psg = &srbcmd->sg; if (actual_fibsize64 == fibsize) { struct user_sgmap64* usg = (struct user_sgmap64 *)upsg; for (i = 0; i < upsg->count; i++) { uintptr_t addr; void* p; if (usg->sg[i].count > ((dev->adapter_info.options & AAC_OPT_NEW_COMM) ? (dev->scsi_host_ptr->max_sectors << 9) : 65536)) { rcode = -EINVAL; goto cleanup; } /* Does this really need to be GFP_DMA? */ p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA); if(!p) { dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", usg->sg[i].count,i,usg->count)); rcode = -ENOMEM; goto cleanup; } addr = (u64)usg->sg[i].addr[0]; addr += ((u64)usg->sg[i].addr[1]) << 32; sg_user[i] = (void __user *)addr; sg_list[i] = p; // save so we can clean up later sg_indx = i; if (flags & SRB_DataOut) { if(copy_from_user(p,sg_user[i],usg->sg[i].count)){ dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); rcode = -EFAULT; goto cleanup; } } addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir); psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff); byte_count += usg->sg[i].count; psg->sg[i].count = cpu_to_le32(usg->sg[i].count); } } else { for (i = 0; i < upsg->count; i++) { dma_addr_t addr; void* p; if (upsg->sg[i].count > ((dev->adapter_info.options & AAC_OPT_NEW_COMM) ? (dev->scsi_host_ptr->max_sectors << 9) : 65536)) { rcode = -EINVAL; goto cleanup; } p = kmalloc(upsg->sg[i].count, GFP_KERNEL); if (!p) { dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n", upsg->sg[i].count, i, upsg->count)); rcode = -ENOMEM; goto cleanup; } sg_user[i] = (void __user *)(uintptr_t)upsg->sg[i].addr; sg_list[i] = p; // save so we can clean up later sg_indx = i; if (flags & SRB_DataOut) { if(copy_from_user(p, sg_user[i], upsg->sg[i].count)) { dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n")); rcode = -EFAULT; goto cleanup; } } addr = pci_map_single(dev->pdev, p, upsg->sg[i].count, data_dir); psg->sg[i].addr = cpu_to_le32(addr); byte_count += upsg->sg[i].count; psg->sg[i].count = cpu_to_le32(upsg->sg[i].count); } } srbcmd->count = cpu_to_le32(byte_count); psg->count = cpu_to_le32(sg_indx+1); status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); } if (status == -ERESTARTSYS) { rcode = -ERESTARTSYS; goto cleanup; } if (status != 0){ dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n")); rcode = -ENXIO; goto cleanup; } if (flags & SRB_DataIn) { for(i = 0 ; i <= sg_indx; i++){ byte_count = le32_to_cpu( (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) ? ((struct sgmap64*)&srbcmd->sg)->sg[i].count : srbcmd->sg.sg[i].count); if(copy_to_user(sg_user[i], sg_list[i], byte_count)){ dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n")); rcode = -EFAULT; goto cleanup; } } } reply = (struct aac_srb_reply *) fib_data(srbfib); if(copy_to_user(user_reply,reply,sizeof(struct aac_srb_reply))){ dprintk((KERN_DEBUG"aacraid: Could not copy reply to user\n")); rcode = -EFAULT; goto cleanup; } cleanup: kfree(user_srbcmd); for(i=0; i <= sg_indx; i++){ kfree(sg_list[i]); } if (rcode != -ERESTARTSYS) { aac_fib_complete(srbfib); aac_fib_free(srbfib); } return rcode; } struct aac_pci_info { u32 bus; u32 slot; }; static int aac_get_pci_info(struct aac_dev* dev, void __user *arg) { struct aac_pci_info pci_info; pci_info.bus = dev->pdev->bus->number; pci_info.slot = PCI_SLOT(dev->pdev->devfn); if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) { dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n")); return -EFAULT; } return 0; } int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg) { int status; /* * HBA gets first crack */ status = aac_dev_ioctl(dev, cmd, arg); if (status != -ENOTTY) return status; switch (cmd) { case FSACTL_MINIPORT_REV_CHECK: status = check_revision(dev, arg); break; case FSACTL_SEND_LARGE_FIB: case FSACTL_SENDFIB: status = ioctl_send_fib(dev, arg); break; case FSACTL_OPEN_GET_ADAPTER_FIB: status = open_getadapter_fib(dev, arg); break; case FSACTL_GET_NEXT_ADAPTER_FIB: status = next_getadapter_fib(dev, arg); break; case FSACTL_CLOSE_GET_ADAPTER_FIB: status = close_getadapter_fib(dev, arg); break; case FSACTL_SEND_RAW_SRB: status = aac_send_raw_srb(dev,arg); break; case FSACTL_GET_PCI_INFO: status = aac_get_pci_info(dev,arg); break; default: status = -ENOTTY; break; } return status; }
./CrossVul/dataset_final_sorted/CWE-20/c/good_5799_0
crossvul-cpp_data_good_1821_2
/* * PowerPC version * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Derived from "arch/i386/kernel/signal.c" * Copyright (C) 1991, 1992 Linus Torvalds * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/wait.h> #include <linux/unistd.h> #include <linux/stddef.h> #include <linux/elf.h> #include <linux/ptrace.h> #include <linux/ratelimit.h> #include <asm/sigcontext.h> #include <asm/ucontext.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/unistd.h> #include <asm/cacheflush.h> #include <asm/syscalls.h> #include <asm/vdso.h> #include <asm/switch_to.h> #include <asm/tm.h> #include "signal.h" #define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs)) #define FP_REGS_SIZE sizeof(elf_fpregset_t) #define TRAMP_TRACEBACK 3 #define TRAMP_SIZE 6 /* * When we have signals to deliver, we set up on the user stack, * going down from the original stack pointer: * 1) a rt_sigframe struct which contains the ucontext * 2) a gap of __SIGNAL_FRAMESIZE bytes which acts as a dummy caller * frame for the signal handler. */ struct rt_sigframe { /* sys_rt_sigreturn requires the ucontext be the first field */ struct ucontext uc; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM struct ucontext uc_transact; #endif unsigned long _unused[2]; unsigned int tramp[TRAMP_SIZE]; struct siginfo __user *pinfo; void __user *puc; struct siginfo info; /* New 64 bit little-endian ABI allows redzone of 512 bytes below sp */ char abigap[USER_REDZONE_SIZE]; } __attribute__ ((aligned (16))); static const char fmt32[] = KERN_INFO \ "%s[%d]: bad frame in %s: %08lx nip %08lx lr %08lx\n"; static const char fmt64[] = KERN_INFO \ "%s[%d]: bad frame in %s: %016lx nip %016lx lr %016lx\n"; /* * This computes a quad word aligned pointer inside the vmx_reserve array * element. For historical reasons sigcontext might not be quad word aligned, * but the location we write the VMX regs to must be. See the comment in * sigcontext for more detail. */ #ifdef CONFIG_ALTIVEC static elf_vrreg_t __user *sigcontext_vmx_regs(struct sigcontext __user *sc) { return (elf_vrreg_t __user *) (((unsigned long)sc->vmx_reserve + 15) & ~0xful); } #endif /* * Set up the sigcontext for the signal frame. */ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, int signr, sigset_t *set, unsigned long handler, int ctx_has_vsx_region) { /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the * process never used altivec yet (MSR_VEC is zero in pt_regs of * the context). This is very important because we must ensure we * don't lose the VRSAVE content that may have been set prior to * the process doing its first vector operation * Userland shall check AT_HWCAP to know whether it can rely on the * v_regs pointer or not */ #ifdef CONFIG_ALTIVEC elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc); #endif unsigned long msr = regs->msr; long err = 0; #ifdef CONFIG_ALTIVEC err |= __put_user(v_regs, &sc->v_regs); /* save altivec registers */ if (current->thread.used_vr) { flush_altivec_to_thread(current); /* Copy 33 vec registers (vr0..31 and vscr) to the stack */ err |= __copy_to_user(v_regs, &current->thread.vr_state, 33 * sizeof(vector128)); /* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg) * contains valid data. */ msr |= MSR_VEC; } /* We always copy to/from vrsave, it's 0 if we don't have or don't * use altivec. */ if (cpu_has_feature(CPU_FTR_ALTIVEC)) current->thread.vrsave = mfspr(SPRN_VRSAVE); err |= __put_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); #else /* CONFIG_ALTIVEC */ err |= __put_user(0, &sc->v_regs); #endif /* CONFIG_ALTIVEC */ flush_fp_to_thread(current); /* copy fpr regs and fpscr */ err |= copy_fpr_to_user(&sc->fp_regs, current); /* * Clear the MSR VSX bit to indicate there is no valid state attached * to this context, except in the specific case below where we set it. */ msr &= ~MSR_VSX; #ifdef CONFIG_VSX /* * Copy VSX low doubleword to local buffer for formatting, * then out to userspace. Update v_regs to point after the * VMX data. */ if (current->thread.used_vsr && ctx_has_vsx_region) { __giveup_vsx(current); v_regs += ELF_NVRREG; err |= copy_vsx_to_user(v_regs, current); /* set MSR_VSX in the MSR value in the frame to * indicate that sc->vs_reg) contains valid data. */ msr |= MSR_VSX; } #endif /* CONFIG_VSX */ err |= __put_user(&sc->gp_regs, &sc->regs); WARN_ON(!FULL_REGS(regs)); err |= __copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE); err |= __put_user(msr, &sc->gp_regs[PT_MSR]); err |= __put_user(signr, &sc->signal); err |= __put_user(handler, &sc->handler); if (set != NULL) err |= __put_user(set->sig[0], &sc->oldmask); return err; } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* * As above, but Transactional Memory is in use, so deliver sigcontexts * containing checkpointed and transactional register states. * * To do this, we treclaim (done before entering here) to gather both sets of * registers and set up the 'normal' sigcontext registers with rolled-back * register values such that a simple signal handler sees a correct * checkpointed register state. If interested, a TM-aware sighandler can * examine the transactional registers in the 2nd sigcontext to determine the * real origin of the signal. */ static long setup_tm_sigcontexts(struct sigcontext __user *sc, struct sigcontext __user *tm_sc, struct pt_regs *regs, int signr, sigset_t *set, unsigned long handler) { /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the * process never used altivec yet (MSR_VEC is zero in pt_regs of * the context). This is very important because we must ensure we * don't lose the VRSAVE content that may have been set prior to * the process doing its first vector operation * Userland shall check AT_HWCAP to know wether it can rely on the * v_regs pointer or not. */ #ifdef CONFIG_ALTIVEC elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc); elf_vrreg_t __user *tm_v_regs = sigcontext_vmx_regs(tm_sc); #endif unsigned long msr = regs->msr; long err = 0; BUG_ON(!MSR_TM_ACTIVE(regs->msr)); /* Remove TM bits from thread's MSR. The MSR in the sigcontext * just indicates to userland that we were doing a transaction, but we * don't want to return in transactional state. This also ensures * that flush_fp_to_thread won't set TIF_RESTORE_TM again. */ regs->msr &= ~MSR_TS_MASK; flush_fp_to_thread(current); #ifdef CONFIG_ALTIVEC err |= __put_user(v_regs, &sc->v_regs); err |= __put_user(tm_v_regs, &tm_sc->v_regs); /* save altivec registers */ if (current->thread.used_vr) { flush_altivec_to_thread(current); /* Copy 33 vec registers (vr0..31 and vscr) to the stack */ err |= __copy_to_user(v_regs, &current->thread.vr_state, 33 * sizeof(vector128)); /* If VEC was enabled there are transactional VRs valid too, * else they're a copy of the checkpointed VRs. */ if (msr & MSR_VEC) err |= __copy_to_user(tm_v_regs, &current->thread.transact_vr, 33 * sizeof(vector128)); else err |= __copy_to_user(tm_v_regs, &current->thread.vr_state, 33 * sizeof(vector128)); /* set MSR_VEC in the MSR value in the frame to indicate * that sc->v_reg contains valid data. */ msr |= MSR_VEC; } /* We always copy to/from vrsave, it's 0 if we don't have or don't * use altivec. */ if (cpu_has_feature(CPU_FTR_ALTIVEC)) current->thread.vrsave = mfspr(SPRN_VRSAVE); err |= __put_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); if (msr & MSR_VEC) err |= __put_user(current->thread.transact_vrsave, (u32 __user *)&tm_v_regs[33]); else err |= __put_user(current->thread.vrsave, (u32 __user *)&tm_v_regs[33]); #else /* CONFIG_ALTIVEC */ err |= __put_user(0, &sc->v_regs); err |= __put_user(0, &tm_sc->v_regs); #endif /* CONFIG_ALTIVEC */ /* copy fpr regs and fpscr */ err |= copy_fpr_to_user(&sc->fp_regs, current); if (msr & MSR_FP) err |= copy_transact_fpr_to_user(&tm_sc->fp_regs, current); else err |= copy_fpr_to_user(&tm_sc->fp_regs, current); #ifdef CONFIG_VSX /* * Copy VSX low doubleword to local buffer for formatting, * then out to userspace. Update v_regs to point after the * VMX data. */ if (current->thread.used_vsr) { __giveup_vsx(current); v_regs += ELF_NVRREG; tm_v_regs += ELF_NVRREG; err |= copy_vsx_to_user(v_regs, current); if (msr & MSR_VSX) err |= copy_transact_vsx_to_user(tm_v_regs, current); else err |= copy_vsx_to_user(tm_v_regs, current); /* set MSR_VSX in the MSR value in the frame to * indicate that sc->vs_reg) contains valid data. */ msr |= MSR_VSX; } #endif /* CONFIG_VSX */ err |= __put_user(&sc->gp_regs, &sc->regs); err |= __put_user(&tm_sc->gp_regs, &tm_sc->regs); WARN_ON(!FULL_REGS(regs)); err |= __copy_to_user(&tm_sc->gp_regs, regs, GP_REGS_SIZE); err |= __copy_to_user(&sc->gp_regs, &current->thread.ckpt_regs, GP_REGS_SIZE); err |= __put_user(msr, &tm_sc->gp_regs[PT_MSR]); err |= __put_user(msr, &sc->gp_regs[PT_MSR]); err |= __put_user(signr, &sc->signal); err |= __put_user(handler, &sc->handler); if (set != NULL) err |= __put_user(set->sig[0], &sc->oldmask); return err; } #endif /* * Restore the sigcontext from the signal frame. */ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig, struct sigcontext __user *sc) { #ifdef CONFIG_ALTIVEC elf_vrreg_t __user *v_regs; #endif unsigned long err = 0; unsigned long save_r13 = 0; unsigned long msr; #ifdef CONFIG_VSX int i; #endif /* If this is not a signal return, we preserve the TLS in r13 */ if (!sig) save_r13 = regs->gpr[13]; /* copy the GPRs */ err |= __copy_from_user(regs->gpr, sc->gp_regs, sizeof(regs->gpr)); err |= __get_user(regs->nip, &sc->gp_regs[PT_NIP]); /* get MSR separately, transfer the LE bit if doing signal return */ err |= __get_user(msr, &sc->gp_regs[PT_MSR]); if (sig) regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); err |= __get_user(regs->orig_gpr3, &sc->gp_regs[PT_ORIG_R3]); err |= __get_user(regs->ctr, &sc->gp_regs[PT_CTR]); err |= __get_user(regs->link, &sc->gp_regs[PT_LNK]); err |= __get_user(regs->xer, &sc->gp_regs[PT_XER]); err |= __get_user(regs->ccr, &sc->gp_regs[PT_CCR]); /* skip SOFTE */ regs->trap = 0; err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]); err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]); err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]); if (!sig) regs->gpr[13] = save_r13; if (set != NULL) err |= __get_user(set->sig[0], &sc->oldmask); /* * Do this before updating the thread state in * current->thread.fpr/vr. That way, if we get preempted * and another task grabs the FPU/Altivec, it won't be * tempted to save the current CPU state into the thread_struct * and corrupt what we are writing there. */ discard_lazy_cpu_state(); /* * Force reload of FP/VEC. * This has to be done before copying stuff into current->thread.fpr/vr * for the reasons explained in the previous comment. */ regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX); #ifdef CONFIG_ALTIVEC err |= __get_user(v_regs, &sc->v_regs); if (err) return err; if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128))) return -EFAULT; /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ if (v_regs != NULL && (msr & MSR_VEC) != 0) err |= __copy_from_user(&current->thread.vr_state, v_regs, 33 * sizeof(vector128)); else if (current->thread.used_vr) memset(&current->thread.vr_state, 0, 33 * sizeof(vector128)); /* Always get VRSAVE back */ if (v_regs != NULL) err |= __get_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); else current->thread.vrsave = 0; if (cpu_has_feature(CPU_FTR_ALTIVEC)) mtspr(SPRN_VRSAVE, current->thread.vrsave); #endif /* CONFIG_ALTIVEC */ /* restore floating point */ err |= copy_fpr_from_user(current, &sc->fp_regs); #ifdef CONFIG_VSX /* * Get additional VSX data. Update v_regs to point after the * VMX data. Copy VSX low doubleword from userspace to local * buffer for formatting, then into the taskstruct. */ v_regs += ELF_NVRREG; if ((msr & MSR_VSX) != 0) err |= copy_vsx_from_user(current, v_regs); else for (i = 0; i < 32 ; i++) current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; #endif return err; } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* * Restore the two sigcontexts from the frame of a transactional processes. */ static long restore_tm_sigcontexts(struct pt_regs *regs, struct sigcontext __user *sc, struct sigcontext __user *tm_sc) { #ifdef CONFIG_ALTIVEC elf_vrreg_t __user *v_regs, *tm_v_regs; #endif unsigned long err = 0; unsigned long msr; #ifdef CONFIG_VSX int i; #endif /* copy the GPRs */ err |= __copy_from_user(regs->gpr, tm_sc->gp_regs, sizeof(regs->gpr)); err |= __copy_from_user(&current->thread.ckpt_regs, sc->gp_regs, sizeof(regs->gpr)); /* * TFHAR is restored from the checkpointed 'wound-back' ucontext's NIP. * TEXASR was set by the signal delivery reclaim, as was TFIAR. * Users doing anything abhorrent like thread-switching w/ signals for * TM-Suspended code will have to back TEXASR/TFIAR up themselves. * For the case of getting a signal and simply returning from it, * we don't need to re-copy them here. */ err |= __get_user(regs->nip, &tm_sc->gp_regs[PT_NIP]); err |= __get_user(current->thread.tm_tfhar, &sc->gp_regs[PT_NIP]); /* get MSR separately, transfer the LE bit if doing signal return */ err |= __get_user(msr, &sc->gp_regs[PT_MSR]); /* Don't allow reserved mode. */ if (MSR_TM_RESV(msr)) return -EINVAL; /* pull in MSR TM from user context */ regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK); /* pull in MSR LE from user context */ regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); /* The following non-GPR non-FPR non-VR state is also checkpointed: */ err |= __get_user(regs->ctr, &tm_sc->gp_regs[PT_CTR]); err |= __get_user(regs->link, &tm_sc->gp_regs[PT_LNK]); err |= __get_user(regs->xer, &tm_sc->gp_regs[PT_XER]); err |= __get_user(regs->ccr, &tm_sc->gp_regs[PT_CCR]); err |= __get_user(current->thread.ckpt_regs.ctr, &sc->gp_regs[PT_CTR]); err |= __get_user(current->thread.ckpt_regs.link, &sc->gp_regs[PT_LNK]); err |= __get_user(current->thread.ckpt_regs.xer, &sc->gp_regs[PT_XER]); err |= __get_user(current->thread.ckpt_regs.ccr, &sc->gp_regs[PT_CCR]); /* These regs are not checkpointed; they can go in 'regs'. */ err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]); err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]); err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]); err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]); /* * Do this before updating the thread state in * current->thread.fpr/vr. That way, if we get preempted * and another task grabs the FPU/Altivec, it won't be * tempted to save the current CPU state into the thread_struct * and corrupt what we are writing there. */ discard_lazy_cpu_state(); /* * Force reload of FP/VEC. * This has to be done before copying stuff into current->thread.fpr/vr * for the reasons explained in the previous comment. */ regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX); #ifdef CONFIG_ALTIVEC err |= __get_user(v_regs, &sc->v_regs); err |= __get_user(tm_v_regs, &tm_sc->v_regs); if (err) return err; if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128))) return -EFAULT; if (tm_v_regs && !access_ok(VERIFY_READ, tm_v_regs, 34 * sizeof(vector128))) return -EFAULT; /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ if (v_regs != NULL && tm_v_regs != NULL && (msr & MSR_VEC) != 0) { err |= __copy_from_user(&current->thread.vr_state, v_regs, 33 * sizeof(vector128)); err |= __copy_from_user(&current->thread.transact_vr, tm_v_regs, 33 * sizeof(vector128)); } else if (current->thread.used_vr) { memset(&current->thread.vr_state, 0, 33 * sizeof(vector128)); memset(&current->thread.transact_vr, 0, 33 * sizeof(vector128)); } /* Always get VRSAVE back */ if (v_regs != NULL && tm_v_regs != NULL) { err |= __get_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); err |= __get_user(current->thread.transact_vrsave, (u32 __user *)&tm_v_regs[33]); } else { current->thread.vrsave = 0; current->thread.transact_vrsave = 0; } if (cpu_has_feature(CPU_FTR_ALTIVEC)) mtspr(SPRN_VRSAVE, current->thread.vrsave); #endif /* CONFIG_ALTIVEC */ /* restore floating point */ err |= copy_fpr_from_user(current, &sc->fp_regs); err |= copy_transact_fpr_from_user(current, &tm_sc->fp_regs); #ifdef CONFIG_VSX /* * Get additional VSX data. Update v_regs to point after the * VMX data. Copy VSX low doubleword from userspace to local * buffer for formatting, then into the taskstruct. */ if (v_regs && ((msr & MSR_VSX) != 0)) { v_regs += ELF_NVRREG; tm_v_regs += ELF_NVRREG; err |= copy_vsx_from_user(current, v_regs); err |= copy_transact_vsx_from_user(current, tm_v_regs); } else { for (i = 0; i < 32 ; i++) { current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; current->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = 0; } } #endif tm_enable(); /* Make sure the transaction is marked as failed */ current->thread.tm_texasr |= TEXASR_FS; /* This loads the checkpointed FP/VEC state, if used */ tm_recheckpoint(&current->thread, msr); /* This loads the speculative FP/VEC state, if used */ if (msr & MSR_FP) { do_load_up_transact_fpu(&current->thread); regs->msr |= (MSR_FP | current->thread.fpexc_mode); } #ifdef CONFIG_ALTIVEC if (msr & MSR_VEC) { do_load_up_transact_altivec(&current->thread); regs->msr |= MSR_VEC; } #endif return err; } #endif /* * Setup the trampoline code on the stack */ static long setup_trampoline(unsigned int syscall, unsigned int __user *tramp) { int i; long err = 0; /* addi r1, r1, __SIGNAL_FRAMESIZE # Pop the dummy stackframe */ err |= __put_user(0x38210000UL | (__SIGNAL_FRAMESIZE & 0xffff), &tramp[0]); /* li r0, __NR_[rt_]sigreturn| */ err |= __put_user(0x38000000UL | (syscall & 0xffff), &tramp[1]); /* sc */ err |= __put_user(0x44000002UL, &tramp[2]); /* Minimal traceback info */ for (i=TRAMP_TRACEBACK; i < TRAMP_SIZE ;i++) err |= __put_user(0, &tramp[i]); if (!err) flush_icache_range((unsigned long) &tramp[0], (unsigned long) &tramp[TRAMP_SIZE]); return err; } /* * Userspace code may pass a ucontext which doesn't include VSX added * at the end. We need to check for this case. */ #define UCONTEXTSIZEWITHOUTVSX \ (sizeof(struct ucontext) - 32*sizeof(long)) /* * Handle {get,set,swap}_context operations */ int sys_swapcontext(struct ucontext __user *old_ctx, struct ucontext __user *new_ctx, long ctx_size, long r6, long r7, long r8, struct pt_regs *regs) { unsigned char tmp; sigset_t set; unsigned long new_msr = 0; int ctx_has_vsx_region = 0; if (new_ctx && get_user(new_msr, &new_ctx->uc_mcontext.gp_regs[PT_MSR])) return -EFAULT; /* * Check that the context is not smaller than the original * size (with VMX but without VSX) */ if (ctx_size < UCONTEXTSIZEWITHOUTVSX) return -EINVAL; /* * If the new context state sets the MSR VSX bits but * it doesn't provide VSX state. */ if ((ctx_size < sizeof(struct ucontext)) && (new_msr & MSR_VSX)) return -EINVAL; /* Does the context have enough room to store VSX data? */ if (ctx_size >= sizeof(struct ucontext)) ctx_has_vsx_region = 1; if (old_ctx != NULL) { if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size) || setup_sigcontext(&old_ctx->uc_mcontext, regs, 0, NULL, 0, ctx_has_vsx_region) || __copy_to_user(&old_ctx->uc_sigmask, &current->blocked, sizeof(sigset_t))) return -EFAULT; } if (new_ctx == NULL) return 0; if (!access_ok(VERIFY_READ, new_ctx, ctx_size) || __get_user(tmp, (u8 __user *) new_ctx) || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1)) return -EFAULT; /* * If we get a fault copying the context into the kernel's * image of the user's registers, we can't just return -EFAULT * because the user's registers will be corrupted. For instance * the NIP value may have been updated but not some of the * other registers. Given that we have done the access_ok * and successfully read the first and last bytes of the region * above, this should only happen in an out-of-memory situation * or if another thread unmaps the region containing the context. * We kill the task with a SIGSEGV in this situation. */ if (__copy_from_user(&set, &new_ctx->uc_sigmask, sizeof(set))) do_exit(SIGSEGV); set_current_blocked(&set); if (restore_sigcontext(regs, NULL, 0, &new_ctx->uc_mcontext)) do_exit(SIGSEGV); /* This returns like rt_sigreturn */ set_thread_flag(TIF_RESTOREALL); return 0; } /* * Do a signal return; undo the signal stack. */ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, unsigned long r8, struct pt_regs *regs) { struct ucontext __user *uc = (struct ucontext __user *)regs->gpr[1]; sigset_t set; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM unsigned long msr; #endif /* Always make any pending restarted system calls return -EINTR */ current->restart_block.fn = do_no_restart_syscall; if (!access_ok(VERIFY_READ, uc, sizeof(*uc))) goto badframe; if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set))) goto badframe; set_current_blocked(&set); #ifdef CONFIG_PPC_TRANSACTIONAL_MEM if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR])) goto badframe; if (MSR_TM_ACTIVE(msr)) { /* We recheckpoint on return. */ struct ucontext __user *uc_transact; if (__get_user(uc_transact, &uc->uc_link)) goto badframe; if (restore_tm_sigcontexts(regs, &uc->uc_mcontext, &uc_transact->uc_mcontext)) goto badframe; } else /* Fall through, for non-TM restore */ #endif if (restore_sigcontext(regs, NULL, 1, &uc->uc_mcontext)) goto badframe; if (restore_altstack(&uc->uc_stack)) goto badframe; set_thread_flag(TIF_RESTOREALL); return 0; badframe: if (show_unhandled_signals) printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, current->comm, current->pid, "rt_sigreturn", (long)uc, regs->nip, regs->link); force_sig(SIGSEGV, current); return 0; } int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) { struct rt_sigframe __user *frame; unsigned long newsp = 0; long err = 0; frame = get_sigframe(ksig, get_tm_stackpointer(regs), sizeof(*frame), 0); if (unlikely(frame == NULL)) goto badframe; err |= __put_user(&frame->info, &frame->pinfo); err |= __put_user(&frame->uc, &frame->puc); err |= copy_siginfo_to_user(&frame->info, &ksig->info); if (err) goto badframe; /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __save_altstack(&frame->uc.uc_stack, regs->gpr[1]); #ifdef CONFIG_PPC_TRANSACTIONAL_MEM if (MSR_TM_ACTIVE(regs->msr)) { /* The ucontext_t passed to userland points to the second * ucontext_t (for transactional state) with its uc_link ptr. */ err |= __put_user(&frame->uc_transact, &frame->uc.uc_link); err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext, &frame->uc_transact.uc_mcontext, regs, ksig->sig, NULL, (unsigned long)ksig->ka.sa.sa_handler); } else #endif { err |= __put_user(0, &frame->uc.uc_link); err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, ksig->sig, NULL, (unsigned long)ksig->ka.sa.sa_handler, 1); } err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) goto badframe; /* Make sure signal handler doesn't get spurious FP exceptions */ current->thread.fp_state.fpscr = 0; /* Set up to return from userspace. */ if (vdso64_rt_sigtramp && current->mm->context.vdso_base) { regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp; } else { err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]); if (err) goto badframe; regs->link = (unsigned long) &frame->tramp[0]; } /* Allocate a dummy caller frame for the signal handler. */ newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE; err |= put_user(regs->gpr[1], (unsigned long __user *)newsp); /* Set up "regs" so we "return" to the signal handler. */ if (is_elf2_task()) { regs->nip = (unsigned long) ksig->ka.sa.sa_handler; regs->gpr[12] = regs->nip; } else { /* Handler is *really* a pointer to the function descriptor for * the signal routine. The first entry in the function * descriptor is the entry address of signal and the second * entry is the TOC value we need to use. */ func_descr_t __user *funct_desc_ptr = (func_descr_t __user *) ksig->ka.sa.sa_handler; err |= get_user(regs->nip, &funct_desc_ptr->entry); err |= get_user(regs->gpr[2], &funct_desc_ptr->toc); } /* enter the signal handler in native-endian mode */ regs->msr &= ~MSR_LE; regs->msr |= (MSR_KERNEL & MSR_LE); regs->gpr[1] = newsp; regs->gpr[3] = ksig->sig; regs->result = 0; if (ksig->ka.sa.sa_flags & SA_SIGINFO) { err |= get_user(regs->gpr[4], (unsigned long __user *)&frame->pinfo); err |= get_user(regs->gpr[5], (unsigned long __user *)&frame->puc); regs->gpr[6] = (unsigned long) frame; } else { regs->gpr[4] = (unsigned long)&frame->uc.uc_mcontext; } if (err) goto badframe; return 0; badframe: if (show_unhandled_signals) printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, current->comm, current->pid, "setup_rt_frame", (long)frame, regs->nip, regs->link); return 1; }
./CrossVul/dataset_final_sorted/CWE-20/c/good_1821_2
crossvul-cpp_data_good_1821_1
/* * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC * * PowerPC version * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * Copyright (C) 2001 IBM * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) * * Derived from "arch/i386/kernel/signal.c" * Copyright (C) 1991, 1992 Linus Torvalds * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/elf.h> #include <linux/ptrace.h> #include <linux/ratelimit.h> #ifdef CONFIG_PPC64 #include <linux/syscalls.h> #include <linux/compat.h> #else #include <linux/wait.h> #include <linux/unistd.h> #include <linux/stddef.h> #include <linux/tty.h> #include <linux/binfmts.h> #endif #include <asm/uaccess.h> #include <asm/cacheflush.h> #include <asm/syscalls.h> #include <asm/sigcontext.h> #include <asm/vdso.h> #include <asm/switch_to.h> #include <asm/tm.h> #ifdef CONFIG_PPC64 #include "ppc32.h" #include <asm/unistd.h> #else #include <asm/ucontext.h> #include <asm/pgtable.h> #endif #include "signal.h" #ifdef CONFIG_PPC64 #define sys_rt_sigreturn compat_sys_rt_sigreturn #define sys_swapcontext compat_sys_swapcontext #define sys_sigreturn compat_sys_sigreturn #define old_sigaction old_sigaction32 #define sigcontext sigcontext32 #define mcontext mcontext32 #define ucontext ucontext32 #define __save_altstack __compat_save_altstack /* * Userspace code may pass a ucontext which doesn't include VSX added * at the end. We need to check for this case. */ #define UCONTEXTSIZEWITHOUTVSX \ (sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32)) /* * Returning 0 means we return to userspace via * ret_from_except and thus restore all user * registers from *regs. This is what we need * to do when a signal has been delivered. */ #define GP_REGS_SIZE min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32)) #undef __SIGNAL_FRAMESIZE #define __SIGNAL_FRAMESIZE __SIGNAL_FRAMESIZE32 #undef ELF_NVRREG #define ELF_NVRREG ELF_NVRREG32 /* * Functions for flipping sigsets (thanks to brain dead generic * implementation that makes things simple for little endian only) */ static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set) { compat_sigset_t cset; switch (_NSIG_WORDS) { case 4: cset.sig[6] = set->sig[3] & 0xffffffffull; cset.sig[7] = set->sig[3] >> 32; case 3: cset.sig[4] = set->sig[2] & 0xffffffffull; cset.sig[5] = set->sig[2] >> 32; case 2: cset.sig[2] = set->sig[1] & 0xffffffffull; cset.sig[3] = set->sig[1] >> 32; case 1: cset.sig[0] = set->sig[0] & 0xffffffffull; cset.sig[1] = set->sig[0] >> 32; } return copy_to_user(uset, &cset, sizeof(*uset)); } static inline int get_sigset_t(sigset_t *set, const compat_sigset_t __user *uset) { compat_sigset_t s32; if (copy_from_user(&s32, uset, sizeof(*uset))) return -EFAULT; /* * Swap the 2 words of the 64-bit sigset_t (they are stored * in the "wrong" endian in 32-bit user storage). */ switch (_NSIG_WORDS) { case 4: set->sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32); case 3: set->sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32); case 2: set->sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32); case 1: set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32); } return 0; } #define to_user_ptr(p) ptr_to_compat(p) #define from_user_ptr(p) compat_ptr(p) static inline int save_general_regs(struct pt_regs *regs, struct mcontext __user *frame) { elf_greg_t64 *gregs = (elf_greg_t64 *)regs; int i; WARN_ON(!FULL_REGS(regs)); for (i = 0; i <= PT_RESULT; i ++) { if (i == 14 && !FULL_REGS(regs)) i = 32; if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i])) return -EFAULT; } return 0; } static inline int restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr) { elf_greg_t64 *gregs = (elf_greg_t64 *)regs; int i; for (i = 0; i <= PT_RESULT; i++) { if ((i == PT_MSR) || (i == PT_SOFTE)) continue; if (__get_user(gregs[i], &sr->mc_gregs[i])) return -EFAULT; } return 0; } #else /* CONFIG_PPC64 */ #define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs)) static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set) { return copy_to_user(uset, set, sizeof(*uset)); } static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset) { return copy_from_user(set, uset, sizeof(*uset)); } #define to_user_ptr(p) ((unsigned long)(p)) #define from_user_ptr(p) ((void __user *)(p)) static inline int save_general_regs(struct pt_regs *regs, struct mcontext __user *frame) { WARN_ON(!FULL_REGS(regs)); return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE); } static inline int restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr) { /* copy up to but not including MSR */ if (__copy_from_user(regs, &sr->mc_gregs, PT_MSR * sizeof(elf_greg_t))) return -EFAULT; /* copy from orig_r3 (the word after the MSR) up to the end */ if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3], GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t))) return -EFAULT; return 0; } #endif /* * When we have signals to deliver, we set up on the * user stack, going down from the original stack pointer: * an ABI gap of 56 words * an mcontext struct * a sigcontext struct * a gap of __SIGNAL_FRAMESIZE bytes * * Each of these things must be a multiple of 16 bytes in size. The following * structure represent all of this except the __SIGNAL_FRAMESIZE gap * */ struct sigframe { struct sigcontext sctx; /* the sigcontext */ struct mcontext mctx; /* all the register values */ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM struct sigcontext sctx_transact; struct mcontext mctx_transact; #endif /* * Programs using the rs6000/xcoff abi can save up to 19 gp * regs and 18 fp regs below sp before decrementing it. */ int abigap[56]; }; /* We use the mc_pad field for the signal return trampoline. */ #define tramp mc_pad /* * When we have rt signals to deliver, we set up on the * user stack, going down from the original stack pointer: * one rt_sigframe struct (siginfo + ucontext + ABI gap) * a gap of __SIGNAL_FRAMESIZE+16 bytes * (the +16 is to get the siginfo and ucontext in the same * positions as in older kernels). * * Each of these things must be a multiple of 16 bytes in size. * */ struct rt_sigframe { #ifdef CONFIG_PPC64 compat_siginfo_t info; #else struct siginfo info; #endif struct ucontext uc; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM struct ucontext uc_transact; #endif /* * Programs using the rs6000/xcoff abi can save up to 19 gp * regs and 18 fp regs below sp before decrementing it. */ int abigap[56]; }; #ifdef CONFIG_VSX unsigned long copy_fpr_to_user(void __user *to, struct task_struct *task) { u64 buf[ELF_NFPREG]; int i; /* save FPR copy to local buffer then write to the thread_struct */ for (i = 0; i < (ELF_NFPREG - 1) ; i++) buf[i] = task->thread.TS_FPR(i); buf[i] = task->thread.fp_state.fpscr; return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double)); } unsigned long copy_fpr_from_user(struct task_struct *task, void __user *from) { u64 buf[ELF_NFPREG]; int i; if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double))) return 1; for (i = 0; i < (ELF_NFPREG - 1) ; i++) task->thread.TS_FPR(i) = buf[i]; task->thread.fp_state.fpscr = buf[i]; return 0; } unsigned long copy_vsx_to_user(void __user *to, struct task_struct *task) { u64 buf[ELF_NVSRHALFREG]; int i; /* save FPR copy to local buffer then write to the thread_struct */ for (i = 0; i < ELF_NVSRHALFREG; i++) buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET]; return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double)); } unsigned long copy_vsx_from_user(struct task_struct *task, void __user *from) { u64 buf[ELF_NVSRHALFREG]; int i; if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double))) return 1; for (i = 0; i < ELF_NVSRHALFREG ; i++) task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i]; return 0; } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM unsigned long copy_transact_fpr_to_user(void __user *to, struct task_struct *task) { u64 buf[ELF_NFPREG]; int i; /* save FPR copy to local buffer then write to the thread_struct */ for (i = 0; i < (ELF_NFPREG - 1) ; i++) buf[i] = task->thread.TS_TRANS_FPR(i); buf[i] = task->thread.transact_fp.fpscr; return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double)); } unsigned long copy_transact_fpr_from_user(struct task_struct *task, void __user *from) { u64 buf[ELF_NFPREG]; int i; if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double))) return 1; for (i = 0; i < (ELF_NFPREG - 1) ; i++) task->thread.TS_TRANS_FPR(i) = buf[i]; task->thread.transact_fp.fpscr = buf[i]; return 0; } unsigned long copy_transact_vsx_to_user(void __user *to, struct task_struct *task) { u64 buf[ELF_NVSRHALFREG]; int i; /* save FPR copy to local buffer then write to the thread_struct */ for (i = 0; i < ELF_NVSRHALFREG; i++) buf[i] = task->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET]; return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double)); } unsigned long copy_transact_vsx_from_user(struct task_struct *task, void __user *from) { u64 buf[ELF_NVSRHALFREG]; int i; if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double))) return 1; for (i = 0; i < ELF_NVSRHALFREG ; i++) task->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = buf[i]; return 0; } #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ #else inline unsigned long copy_fpr_to_user(void __user *to, struct task_struct *task) { return __copy_to_user(to, task->thread.fp_state.fpr, ELF_NFPREG * sizeof(double)); } inline unsigned long copy_fpr_from_user(struct task_struct *task, void __user *from) { return __copy_from_user(task->thread.fp_state.fpr, from, ELF_NFPREG * sizeof(double)); } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM inline unsigned long copy_transact_fpr_to_user(void __user *to, struct task_struct *task) { return __copy_to_user(to, task->thread.transact_fp.fpr, ELF_NFPREG * sizeof(double)); } inline unsigned long copy_transact_fpr_from_user(struct task_struct *task, void __user *from) { return __copy_from_user(task->thread.transact_fp.fpr, from, ELF_NFPREG * sizeof(double)); } #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ #endif /* * Save the current user registers on the user stack. * We only save the altivec/spe registers if the process has used * altivec/spe instructions at some point. */ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, struct mcontext __user *tm_frame, int sigret, int ctx_has_vsx_region) { unsigned long msr = regs->msr; /* Make sure floating point registers are stored in regs */ flush_fp_to_thread(current); /* save general registers */ if (save_general_regs(regs, frame)) return 1; #ifdef CONFIG_ALTIVEC /* save altivec registers */ if (current->thread.used_vr) { flush_altivec_to_thread(current); if (__copy_to_user(&frame->mc_vregs, &current->thread.vr_state, ELF_NVRREG * sizeof(vector128))) return 1; /* set MSR_VEC in the saved MSR value to indicate that frame->mc_vregs contains valid data */ msr |= MSR_VEC; } /* else assert((regs->msr & MSR_VEC) == 0) */ /* We always copy to/from vrsave, it's 0 if we don't have or don't * use altivec. Since VSCR only contains 32 bits saved in the least * significant bits of a vector, we "cheat" and stuff VRSAVE in the * most significant bits of that same vector. --BenH * Note that the current VRSAVE value is in the SPR at this point. */ if (cpu_has_feature(CPU_FTR_ALTIVEC)) current->thread.vrsave = mfspr(SPRN_VRSAVE); if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32])) return 1; #endif /* CONFIG_ALTIVEC */ if (copy_fpr_to_user(&frame->mc_fregs, current)) return 1; /* * Clear the MSR VSX bit to indicate there is no valid state attached * to this context, except in the specific case below where we set it. */ msr &= ~MSR_VSX; #ifdef CONFIG_VSX /* * Copy VSR 0-31 upper half from thread_struct to local * buffer, then write that to userspace. Also set MSR_VSX in * the saved MSR value to indicate that frame->mc_vregs * contains valid data */ if (current->thread.used_vsr && ctx_has_vsx_region) { __giveup_vsx(current); if (copy_vsx_to_user(&frame->mc_vsregs, current)) return 1; msr |= MSR_VSX; } #endif /* CONFIG_VSX */ #ifdef CONFIG_SPE /* save spe registers */ if (current->thread.used_spe) { flush_spe_to_thread(current); if (__copy_to_user(&frame->mc_vregs, current->thread.evr, ELF_NEVRREG * sizeof(u32))) return 1; /* set MSR_SPE in the saved MSR value to indicate that frame->mc_vregs contains valid data */ msr |= MSR_SPE; } /* else assert((regs->msr & MSR_SPE) == 0) */ /* We always copy to/from spefscr */ if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG)) return 1; #endif /* CONFIG_SPE */ if (__put_user(msr, &frame->mc_gregs[PT_MSR])) return 1; /* We need to write 0 the MSR top 32 bits in the tm frame so that we * can check it on the restore to see if TM is active */ if (tm_frame && __put_user(0, &tm_frame->mc_gregs[PT_MSR])) return 1; if (sigret) { /* Set up the sigreturn trampoline: li r0,sigret; sc */ if (__put_user(0x38000000UL + sigret, &frame->tramp[0]) || __put_user(0x44000002UL, &frame->tramp[1])) return 1; flush_icache_range((unsigned long) &frame->tramp[0], (unsigned long) &frame->tramp[2]); } return 0; } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* * Save the current user registers on the user stack. * We only save the altivec/spe registers if the process has used * altivec/spe instructions at some point. * We also save the transactional registers to a second ucontext in the * frame. * * See save_user_regs() and signal_64.c:setup_tm_sigcontexts(). */ static int save_tm_user_regs(struct pt_regs *regs, struct mcontext __user *frame, struct mcontext __user *tm_frame, int sigret) { unsigned long msr = regs->msr; /* Remove TM bits from thread's MSR. The MSR in the sigcontext * just indicates to userland that we were doing a transaction, but we * don't want to return in transactional state. This also ensures * that flush_fp_to_thread won't set TIF_RESTORE_TM again. */ regs->msr &= ~MSR_TS_MASK; /* Make sure floating point registers are stored in regs */ flush_fp_to_thread(current); /* Save both sets of general registers */ if (save_general_regs(&current->thread.ckpt_regs, frame) || save_general_regs(regs, tm_frame)) return 1; /* Stash the top half of the 64bit MSR into the 32bit MSR word * of the transactional mcontext. This way we have a backward-compatible * MSR in the 'normal' (checkpointed) mcontext and additionally one can * also look at what type of transaction (T or S) was active at the * time of the signal. */ if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR])) return 1; #ifdef CONFIG_ALTIVEC /* save altivec registers */ if (current->thread.used_vr) { flush_altivec_to_thread(current); if (__copy_to_user(&frame->mc_vregs, &current->thread.vr_state, ELF_NVRREG * sizeof(vector128))) return 1; if (msr & MSR_VEC) { if (__copy_to_user(&tm_frame->mc_vregs, &current->thread.transact_vr, ELF_NVRREG * sizeof(vector128))) return 1; } else { if (__copy_to_user(&tm_frame->mc_vregs, &current->thread.vr_state, ELF_NVRREG * sizeof(vector128))) return 1; } /* set MSR_VEC in the saved MSR value to indicate that * frame->mc_vregs contains valid data */ msr |= MSR_VEC; } /* We always copy to/from vrsave, it's 0 if we don't have or don't * use altivec. Since VSCR only contains 32 bits saved in the least * significant bits of a vector, we "cheat" and stuff VRSAVE in the * most significant bits of that same vector. --BenH */ if (cpu_has_feature(CPU_FTR_ALTIVEC)) current->thread.vrsave = mfspr(SPRN_VRSAVE); if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32])) return 1; if (msr & MSR_VEC) { if (__put_user(current->thread.transact_vrsave, (u32 __user *)&tm_frame->mc_vregs[32])) return 1; } else { if (__put_user(current->thread.vrsave, (u32 __user *)&tm_frame->mc_vregs[32])) return 1; } #endif /* CONFIG_ALTIVEC */ if (copy_fpr_to_user(&frame->mc_fregs, current)) return 1; if (msr & MSR_FP) { if (copy_transact_fpr_to_user(&tm_frame->mc_fregs, current)) return 1; } else { if (copy_fpr_to_user(&tm_frame->mc_fregs, current)) return 1; } #ifdef CONFIG_VSX /* * Copy VSR 0-31 upper half from thread_struct to local * buffer, then write that to userspace. Also set MSR_VSX in * the saved MSR value to indicate that frame->mc_vregs * contains valid data */ if (current->thread.used_vsr) { __giveup_vsx(current); if (copy_vsx_to_user(&frame->mc_vsregs, current)) return 1; if (msr & MSR_VSX) { if (copy_transact_vsx_to_user(&tm_frame->mc_vsregs, current)) return 1; } else { if (copy_vsx_to_user(&tm_frame->mc_vsregs, current)) return 1; } msr |= MSR_VSX; } #endif /* CONFIG_VSX */ #ifdef CONFIG_SPE /* SPE regs are not checkpointed with TM, so this section is * simply the same as in save_user_regs(). */ if (current->thread.used_spe) { flush_spe_to_thread(current); if (__copy_to_user(&frame->mc_vregs, current->thread.evr, ELF_NEVRREG * sizeof(u32))) return 1; /* set MSR_SPE in the saved MSR value to indicate that * frame->mc_vregs contains valid data */ msr |= MSR_SPE; } /* We always copy to/from spefscr */ if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG)) return 1; #endif /* CONFIG_SPE */ if (__put_user(msr, &frame->mc_gregs[PT_MSR])) return 1; if (sigret) { /* Set up the sigreturn trampoline: li r0,sigret; sc */ if (__put_user(0x38000000UL + sigret, &frame->tramp[0]) || __put_user(0x44000002UL, &frame->tramp[1])) return 1; flush_icache_range((unsigned long) &frame->tramp[0], (unsigned long) &frame->tramp[2]); } return 0; } #endif /* * Restore the current user register values from the user stack, * (except for MSR). */ static long restore_user_regs(struct pt_regs *regs, struct mcontext __user *sr, int sig) { long err; unsigned int save_r2 = 0; unsigned long msr; #ifdef CONFIG_VSX int i; #endif /* * restore general registers but not including MSR or SOFTE. Also * take care of keeping r2 (TLS) intact if not a signal */ if (!sig) save_r2 = (unsigned int)regs->gpr[2]; err = restore_general_regs(regs, sr); regs->trap = 0; err |= __get_user(msr, &sr->mc_gregs[PT_MSR]); if (!sig) regs->gpr[2] = (unsigned long) save_r2; if (err) return 1; /* if doing signal return, restore the previous little-endian mode */ if (sig) regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); /* * Do this before updating the thread state in * current->thread.fpr/vr/evr. That way, if we get preempted * and another task grabs the FPU/Altivec/SPE, it won't be * tempted to save the current CPU state into the thread_struct * and corrupt what we are writing there. */ discard_lazy_cpu_state(); #ifdef CONFIG_ALTIVEC /* * Force the process to reload the altivec registers from * current->thread when it next does altivec instructions */ regs->msr &= ~MSR_VEC; if (msr & MSR_VEC) { /* restore altivec registers from the stack */ if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs, sizeof(sr->mc_vregs))) return 1; } else if (current->thread.used_vr) memset(&current->thread.vr_state, 0, ELF_NVRREG * sizeof(vector128)); /* Always get VRSAVE back */ if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32])) return 1; if (cpu_has_feature(CPU_FTR_ALTIVEC)) mtspr(SPRN_VRSAVE, current->thread.vrsave); #endif /* CONFIG_ALTIVEC */ if (copy_fpr_from_user(current, &sr->mc_fregs)) return 1; #ifdef CONFIG_VSX /* * Force the process to reload the VSX registers from * current->thread when it next does VSX instruction. */ regs->msr &= ~MSR_VSX; if (msr & MSR_VSX) { /* * Restore altivec registers from the stack to a local * buffer, then write this out to the thread_struct */ if (copy_vsx_from_user(current, &sr->mc_vsregs)) return 1; } else if (current->thread.used_vsr) for (i = 0; i < 32 ; i++) current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; #endif /* CONFIG_VSX */ /* * force the process to reload the FP registers from * current->thread when it next does FP instructions */ regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1); #ifdef CONFIG_SPE /* force the process to reload the spe registers from current->thread when it next does spe instructions */ regs->msr &= ~MSR_SPE; if (msr & MSR_SPE) { /* restore spe registers from the stack */ if (__copy_from_user(current->thread.evr, &sr->mc_vregs, ELF_NEVRREG * sizeof(u32))) return 1; } else if (current->thread.used_spe) memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32)); /* Always get SPEFSCR back */ if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG)) return 1; #endif /* CONFIG_SPE */ return 0; } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* * Restore the current user register values from the user stack, except for * MSR, and recheckpoint the original checkpointed register state for processes * in transactions. */ static long restore_tm_user_regs(struct pt_regs *regs, struct mcontext __user *sr, struct mcontext __user *tm_sr) { long err; unsigned long msr, msr_hi; #ifdef CONFIG_VSX int i; #endif /* * restore general registers but not including MSR or SOFTE. Also * take care of keeping r2 (TLS) intact if not a signal. * See comment in signal_64.c:restore_tm_sigcontexts(); * TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR * were set by the signal delivery. */ err = restore_general_regs(regs, tm_sr); err |= restore_general_regs(&current->thread.ckpt_regs, sr); err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]); err |= __get_user(msr, &sr->mc_gregs[PT_MSR]); if (err) return 1; /* Restore the previous little-endian mode */ regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); /* * Do this before updating the thread state in * current->thread.fpr/vr/evr. That way, if we get preempted * and another task grabs the FPU/Altivec/SPE, it won't be * tempted to save the current CPU state into the thread_struct * and corrupt what we are writing there. */ discard_lazy_cpu_state(); #ifdef CONFIG_ALTIVEC regs->msr &= ~MSR_VEC; if (msr & MSR_VEC) { /* restore altivec registers from the stack */ if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs, sizeof(sr->mc_vregs)) || __copy_from_user(&current->thread.transact_vr, &tm_sr->mc_vregs, sizeof(sr->mc_vregs))) return 1; } else if (current->thread.used_vr) { memset(&current->thread.vr_state, 0, ELF_NVRREG * sizeof(vector128)); memset(&current->thread.transact_vr, 0, ELF_NVRREG * sizeof(vector128)); } /* Always get VRSAVE back */ if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]) || __get_user(current->thread.transact_vrsave, (u32 __user *)&tm_sr->mc_vregs[32])) return 1; if (cpu_has_feature(CPU_FTR_ALTIVEC)) mtspr(SPRN_VRSAVE, current->thread.vrsave); #endif /* CONFIG_ALTIVEC */ regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1); if (copy_fpr_from_user(current, &sr->mc_fregs) || copy_transact_fpr_from_user(current, &tm_sr->mc_fregs)) return 1; #ifdef CONFIG_VSX regs->msr &= ~MSR_VSX; if (msr & MSR_VSX) { /* * Restore altivec registers from the stack to a local * buffer, then write this out to the thread_struct */ if (copy_vsx_from_user(current, &sr->mc_vsregs) || copy_transact_vsx_from_user(current, &tm_sr->mc_vsregs)) return 1; } else if (current->thread.used_vsr) for (i = 0; i < 32 ; i++) { current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0; current->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = 0; } #endif /* CONFIG_VSX */ #ifdef CONFIG_SPE /* SPE regs are not checkpointed with TM, so this section is * simply the same as in restore_user_regs(). */ regs->msr &= ~MSR_SPE; if (msr & MSR_SPE) { if (__copy_from_user(current->thread.evr, &sr->mc_vregs, ELF_NEVRREG * sizeof(u32))) return 1; } else if (current->thread.used_spe) memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32)); /* Always get SPEFSCR back */ if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG)) return 1; #endif /* CONFIG_SPE */ /* Get the top half of the MSR from the user context */ if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR])) return 1; msr_hi <<= 32; /* If TM bits are set to the reserved value, it's an invalid context */ if (MSR_TM_RESV(msr_hi)) return 1; /* Pull in the MSR TM bits from the user context */ regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK); /* Now, recheckpoint. This loads up all of the checkpointed (older) * registers, including FP and V[S]Rs. After recheckpointing, the * transactional versions should be loaded. */ tm_enable(); /* Make sure the transaction is marked as failed */ current->thread.tm_texasr |= TEXASR_FS; /* This loads the checkpointed FP/VEC state, if used */ tm_recheckpoint(&current->thread, msr); /* This loads the speculative FP/VEC state, if used */ if (msr & MSR_FP) { do_load_up_transact_fpu(&current->thread); regs->msr |= (MSR_FP | current->thread.fpexc_mode); } #ifdef CONFIG_ALTIVEC if (msr & MSR_VEC) { do_load_up_transact_altivec(&current->thread); regs->msr |= MSR_VEC; } #endif return 0; } #endif #ifdef CONFIG_PPC64 int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s) { int err; if (!access_ok (VERIFY_WRITE, d, sizeof(*d))) return -EFAULT; /* If you change siginfo_t structure, please be sure * this code is fixed accordingly. * It should never copy any pad contained in the structure * to avoid security leaks, but must copy the generic * 3 ints plus the relevant union member. * This routine must convert siginfo from 64bit to 32bit as well * at the same time. */ err = __put_user(s->si_signo, &d->si_signo); err |= __put_user(s->si_errno, &d->si_errno); err |= __put_user((short)s->si_code, &d->si_code); if (s->si_code < 0) err |= __copy_to_user(&d->_sifields._pad, &s->_sifields._pad, SI_PAD_SIZE32); else switch(s->si_code >> 16) { case __SI_CHLD >> 16: err |= __put_user(s->si_pid, &d->si_pid); err |= __put_user(s->si_uid, &d->si_uid); err |= __put_user(s->si_utime, &d->si_utime); err |= __put_user(s->si_stime, &d->si_stime); err |= __put_user(s->si_status, &d->si_status); break; case __SI_FAULT >> 16: err |= __put_user((unsigned int)(unsigned long)s->si_addr, &d->si_addr); break; case __SI_POLL >> 16: err |= __put_user(s->si_band, &d->si_band); err |= __put_user(s->si_fd, &d->si_fd); break; case __SI_TIMER >> 16: err |= __put_user(s->si_tid, &d->si_tid); err |= __put_user(s->si_overrun, &d->si_overrun); err |= __put_user(s->si_int, &d->si_int); break; case __SI_SYS >> 16: err |= __put_user(ptr_to_compat(s->si_call_addr), &d->si_call_addr); err |= __put_user(s->si_syscall, &d->si_syscall); err |= __put_user(s->si_arch, &d->si_arch); break; case __SI_RT >> 16: /* This is not generated by the kernel as of now. */ case __SI_MESGQ >> 16: err |= __put_user(s->si_int, &d->si_int); /* fallthrough */ case __SI_KILL >> 16: default: err |= __put_user(s->si_pid, &d->si_pid); err |= __put_user(s->si_uid, &d->si_uid); break; } return err; } #define copy_siginfo_to_user copy_siginfo_to_user32 int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from) { if (copy_from_user(to, from, 3*sizeof(int)) || copy_from_user(to->_sifields._pad, from->_sifields._pad, SI_PAD_SIZE32)) return -EFAULT; return 0; } #endif /* CONFIG_PPC64 */ /* * Set up a signal frame for a "real-time" signal handler * (one which gets siginfo). */ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset, struct pt_regs *regs) { struct rt_sigframe __user *rt_sf; struct mcontext __user *frame; struct mcontext __user *tm_frame = NULL; void __user *addr; unsigned long newsp = 0; int sigret; unsigned long tramp; /* Set up Signal Frame */ /* Put a Real Time Context onto stack */ rt_sf = get_sigframe(ksig, get_tm_stackpointer(regs), sizeof(*rt_sf), 1); addr = rt_sf; if (unlikely(rt_sf == NULL)) goto badframe; /* Put the siginfo & fill in most of the ucontext */ if (copy_siginfo_to_user(&rt_sf->info, &ksig->info) || __put_user(0, &rt_sf->uc.uc_flags) || __save_altstack(&rt_sf->uc.uc_stack, regs->gpr[1]) || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext), &rt_sf->uc.uc_regs) || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset)) goto badframe; /* Save user registers on the stack */ frame = &rt_sf->uc.uc_mcontext; addr = frame; if (vdso32_rt_sigtramp && current->mm->context.vdso_base) { sigret = 0; tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp; } else { sigret = __NR_rt_sigreturn; tramp = (unsigned long) frame->tramp; } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM tm_frame = &rt_sf->uc_transact.uc_mcontext; if (MSR_TM_ACTIVE(regs->msr)) { if (__put_user((unsigned long)&rt_sf->uc_transact, &rt_sf->uc.uc_link) || __put_user((unsigned long)tm_frame, &rt_sf->uc_transact.uc_regs)) goto badframe; if (save_tm_user_regs(regs, frame, tm_frame, sigret)) goto badframe; } else #endif { if (__put_user(0, &rt_sf->uc.uc_link)) goto badframe; if (save_user_regs(regs, frame, tm_frame, sigret, 1)) goto badframe; } regs->link = tramp; current->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */ /* create a stack frame for the caller of the handler */ newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16); addr = (void __user *)regs->gpr[1]; if (put_user(regs->gpr[1], (u32 __user *)newsp)) goto badframe; /* Fill registers for signal handler */ regs->gpr[1] = newsp; regs->gpr[3] = ksig->sig; regs->gpr[4] = (unsigned long) &rt_sf->info; regs->gpr[5] = (unsigned long) &rt_sf->uc; regs->gpr[6] = (unsigned long) rt_sf; regs->nip = (unsigned long) ksig->ka.sa.sa_handler; /* enter the signal handler in native-endian mode */ regs->msr &= ~MSR_LE; regs->msr |= (MSR_KERNEL & MSR_LE); return 0; badframe: if (show_unhandled_signals) printk_ratelimited(KERN_INFO "%s[%d]: bad frame in handle_rt_signal32: " "%p nip %08lx lr %08lx\n", current->comm, current->pid, addr, regs->nip, regs->link); return 1; } static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig) { sigset_t set; struct mcontext __user *mcp; if (get_sigset_t(&set, &ucp->uc_sigmask)) return -EFAULT; #ifdef CONFIG_PPC64 { u32 cmcp; if (__get_user(cmcp, &ucp->uc_regs)) return -EFAULT; mcp = (struct mcontext __user *)(u64)cmcp; /* no need to check access_ok(mcp), since mcp < 4GB */ } #else if (__get_user(mcp, &ucp->uc_regs)) return -EFAULT; if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp))) return -EFAULT; #endif set_current_blocked(&set); if (restore_user_regs(regs, mcp, sig)) return -EFAULT; return 0; } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM static int do_setcontext_tm(struct ucontext __user *ucp, struct ucontext __user *tm_ucp, struct pt_regs *regs) { sigset_t set; struct mcontext __user *mcp; struct mcontext __user *tm_mcp; u32 cmcp; u32 tm_cmcp; if (get_sigset_t(&set, &ucp->uc_sigmask)) return -EFAULT; if (__get_user(cmcp, &ucp->uc_regs) || __get_user(tm_cmcp, &tm_ucp->uc_regs)) return -EFAULT; mcp = (struct mcontext __user *)(u64)cmcp; tm_mcp = (struct mcontext __user *)(u64)tm_cmcp; /* no need to check access_ok(mcp), since mcp < 4GB */ set_current_blocked(&set); if (restore_tm_user_regs(regs, mcp, tm_mcp)) return -EFAULT; return 0; } #endif long sys_swapcontext(struct ucontext __user *old_ctx, struct ucontext __user *new_ctx, int ctx_size, int r6, int r7, int r8, struct pt_regs *regs) { unsigned char tmp; int ctx_has_vsx_region = 0; #ifdef CONFIG_PPC64 unsigned long new_msr = 0; if (new_ctx) { struct mcontext __user *mcp; u32 cmcp; /* * Get pointer to the real mcontext. No need for * access_ok since we are dealing with compat * pointers. */ if (__get_user(cmcp, &new_ctx->uc_regs)) return -EFAULT; mcp = (struct mcontext __user *)(u64)cmcp; if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR])) return -EFAULT; } /* * Check that the context is not smaller than the original * size (with VMX but without VSX) */ if (ctx_size < UCONTEXTSIZEWITHOUTVSX) return -EINVAL; /* * If the new context state sets the MSR VSX bits but * it doesn't provide VSX state. */ if ((ctx_size < sizeof(struct ucontext)) && (new_msr & MSR_VSX)) return -EINVAL; /* Does the context have enough room to store VSX data? */ if (ctx_size >= sizeof(struct ucontext)) ctx_has_vsx_region = 1; #else /* Context size is for future use. Right now, we only make sure * we are passed something we understand */ if (ctx_size < sizeof(struct ucontext)) return -EINVAL; #endif if (old_ctx != NULL) { struct mcontext __user *mctx; /* * old_ctx might not be 16-byte aligned, in which * case old_ctx->uc_mcontext won't be either. * Because we have the old_ctx->uc_pad2 field * before old_ctx->uc_mcontext, we need to round down * from &old_ctx->uc_mcontext to a 16-byte boundary. */ mctx = (struct mcontext __user *) ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL); if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size) || save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region) || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked) || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs)) return -EFAULT; } if (new_ctx == NULL) return 0; if (!access_ok(VERIFY_READ, new_ctx, ctx_size) || __get_user(tmp, (u8 __user *) new_ctx) || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1)) return -EFAULT; /* * If we get a fault copying the context into the kernel's * image of the user's registers, we can't just return -EFAULT * because the user's registers will be corrupted. For instance * the NIP value may have been updated but not some of the * other registers. Given that we have done the access_ok * and successfully read the first and last bytes of the region * above, this should only happen in an out-of-memory situation * or if another thread unmaps the region containing the context. * We kill the task with a SIGSEGV in this situation. */ if (do_setcontext(new_ctx, regs, 0)) do_exit(SIGSEGV); set_thread_flag(TIF_RESTOREALL); return 0; } long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, struct pt_regs *regs) { struct rt_sigframe __user *rt_sf; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM struct ucontext __user *uc_transact; unsigned long msr_hi; unsigned long tmp; int tm_restore = 0; #endif /* Always make any pending restarted system calls return -EINTR */ current->restart_block.fn = do_no_restart_syscall; rt_sf = (struct rt_sigframe __user *) (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16); if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf))) goto bad; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM if (__get_user(tmp, &rt_sf->uc.uc_link)) goto bad; uc_transact = (struct ucontext __user *)(uintptr_t)tmp; if (uc_transact) { u32 cmcp; struct mcontext __user *mcp; if (__get_user(cmcp, &uc_transact->uc_regs)) return -EFAULT; mcp = (struct mcontext __user *)(u64)cmcp; /* The top 32 bits of the MSR are stashed in the transactional * ucontext. */ if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR])) goto bad; if (MSR_TM_ACTIVE(msr_hi<<32)) { /* We only recheckpoint on return if we're * transaction. */ tm_restore = 1; if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs)) goto bad; } } if (!tm_restore) /* Fall through, for non-TM restore */ #endif if (do_setcontext(&rt_sf->uc, regs, 1)) goto bad; /* * It's not clear whether or why it is desirable to save the * sigaltstack setting on signal delivery and restore it on * signal return. But other architectures do this and we have * always done it up until now so it is probably better not to * change it. -- paulus */ #ifdef CONFIG_PPC64 if (compat_restore_altstack(&rt_sf->uc.uc_stack)) goto bad; #else if (restore_altstack(&rt_sf->uc.uc_stack)) goto bad; #endif set_thread_flag(TIF_RESTOREALL); return 0; bad: if (show_unhandled_signals) printk_ratelimited(KERN_INFO "%s[%d]: bad frame in sys_rt_sigreturn: " "%p nip %08lx lr %08lx\n", current->comm, current->pid, rt_sf, regs->nip, regs->link); force_sig(SIGSEGV, current); return 0; } #ifdef CONFIG_PPC32 int sys_debug_setcontext(struct ucontext __user *ctx, int ndbg, struct sig_dbg_op __user *dbg, int r6, int r7, int r8, struct pt_regs *regs) { struct sig_dbg_op op; int i; unsigned char tmp; unsigned long new_msr = regs->msr; #ifdef CONFIG_PPC_ADV_DEBUG_REGS unsigned long new_dbcr0 = current->thread.debug.dbcr0; #endif for (i=0; i<ndbg; i++) { if (copy_from_user(&op, dbg + i, sizeof(op))) return -EFAULT; switch (op.dbg_type) { case SIG_DBG_SINGLE_STEPPING: #ifdef CONFIG_PPC_ADV_DEBUG_REGS if (op.dbg_value) { new_msr |= MSR_DE; new_dbcr0 |= (DBCR0_IDM | DBCR0_IC); } else { new_dbcr0 &= ~DBCR0_IC; if (!DBCR_ACTIVE_EVENTS(new_dbcr0, current->thread.debug.dbcr1)) { new_msr &= ~MSR_DE; new_dbcr0 &= ~DBCR0_IDM; } } #else if (op.dbg_value) new_msr |= MSR_SE; else new_msr &= ~MSR_SE; #endif break; case SIG_DBG_BRANCH_TRACING: #ifdef CONFIG_PPC_ADV_DEBUG_REGS return -EINVAL; #else if (op.dbg_value) new_msr |= MSR_BE; else new_msr &= ~MSR_BE; #endif break; default: return -EINVAL; } } /* We wait until here to actually install the values in the registers so if we fail in the above loop, it will not affect the contents of these registers. After this point, failure is a problem, anyway, and it's very unlikely unless the user is really doing something wrong. */ regs->msr = new_msr; #ifdef CONFIG_PPC_ADV_DEBUG_REGS current->thread.debug.dbcr0 = new_dbcr0; #endif if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx)) || __get_user(tmp, (u8 __user *) ctx) || __get_user(tmp, (u8 __user *) (ctx + 1) - 1)) return -EFAULT; /* * If we get a fault copying the context into the kernel's * image of the user's registers, we can't just return -EFAULT * because the user's registers will be corrupted. For instance * the NIP value may have been updated but not some of the * other registers. Given that we have done the access_ok * and successfully read the first and last bytes of the region * above, this should only happen in an out-of-memory situation * or if another thread unmaps the region containing the context. * We kill the task with a SIGSEGV in this situation. */ if (do_setcontext(ctx, regs, 1)) { if (show_unhandled_signals) printk_ratelimited(KERN_INFO "%s[%d]: bad frame in " "sys_debug_setcontext: %p nip %08lx " "lr %08lx\n", current->comm, current->pid, ctx, regs->nip, regs->link); force_sig(SIGSEGV, current); goto out; } /* * It's not clear whether or why it is desirable to save the * sigaltstack setting on signal delivery and restore it on * signal return. But other architectures do this and we have * always done it up until now so it is probably better not to * change it. -- paulus */ restore_altstack(&ctx->uc_stack); set_thread_flag(TIF_RESTOREALL); out: return 0; } #endif /* * OK, we're invoking a handler */ int handle_signal32(struct ksignal *ksig, sigset_t *oldset, struct pt_regs *regs) { struct sigcontext __user *sc; struct sigframe __user *frame; struct mcontext __user *tm_mctx = NULL; unsigned long newsp = 0; int sigret; unsigned long tramp; /* Set up Signal Frame */ frame = get_sigframe(ksig, get_tm_stackpointer(regs), sizeof(*frame), 1); if (unlikely(frame == NULL)) goto badframe; sc = (struct sigcontext __user *) &frame->sctx; #if _NSIG != 64 #error "Please adjust handle_signal()" #endif if (__put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler) || __put_user(oldset->sig[0], &sc->oldmask) #ifdef CONFIG_PPC64 || __put_user((oldset->sig[0] >> 32), &sc->_unused[3]) #else || __put_user(oldset->sig[1], &sc->_unused[3]) #endif || __put_user(to_user_ptr(&frame->mctx), &sc->regs) || __put_user(ksig->sig, &sc->signal)) goto badframe; if (vdso32_sigtramp && current->mm->context.vdso_base) { sigret = 0; tramp = current->mm->context.vdso_base + vdso32_sigtramp; } else { sigret = __NR_sigreturn; tramp = (unsigned long) frame->mctx.tramp; } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM tm_mctx = &frame->mctx_transact; if (MSR_TM_ACTIVE(regs->msr)) { if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact, sigret)) goto badframe; } else #endif { if (save_user_regs(regs, &frame->mctx, tm_mctx, sigret, 1)) goto badframe; } regs->link = tramp; current->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */ /* create a stack frame for the caller of the handler */ newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE; if (put_user(regs->gpr[1], (u32 __user *)newsp)) goto badframe; regs->gpr[1] = newsp; regs->gpr[3] = ksig->sig; regs->gpr[4] = (unsigned long) sc; regs->nip = (unsigned long) (unsigned long)ksig->ka.sa.sa_handler; /* enter the signal handler in big-endian mode */ regs->msr &= ~MSR_LE; return 0; badframe: if (show_unhandled_signals) printk_ratelimited(KERN_INFO "%s[%d]: bad frame in handle_signal32: " "%p nip %08lx lr %08lx\n", current->comm, current->pid, frame, regs->nip, regs->link); return 1; } /* * Do a signal return; undo the signal stack. */ long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, struct pt_regs *regs) { struct sigframe __user *sf; struct sigcontext __user *sc; struct sigcontext sigctx; struct mcontext __user *sr; void __user *addr; sigset_t set; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM struct mcontext __user *mcp, *tm_mcp; unsigned long msr_hi; #endif /* Always make any pending restarted system calls return -EINTR */ current->restart_block.fn = do_no_restart_syscall; sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE); sc = &sf->sctx; addr = sc; if (copy_from_user(&sigctx, sc, sizeof(sigctx))) goto badframe; #ifdef CONFIG_PPC64 /* * Note that PPC32 puts the upper 32 bits of the sigmask in the * unused part of the signal stackframe */ set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32); #else set.sig[0] = sigctx.oldmask; set.sig[1] = sigctx._unused[3]; #endif set_current_blocked(&set); #ifdef CONFIG_PPC_TRANSACTIONAL_MEM mcp = (struct mcontext __user *)&sf->mctx; tm_mcp = (struct mcontext __user *)&sf->mctx_transact; if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR])) goto badframe; if (MSR_TM_ACTIVE(msr_hi<<32)) { if (!cpu_has_feature(CPU_FTR_TM)) goto badframe; if (restore_tm_user_regs(regs, mcp, tm_mcp)) goto badframe; } else #endif { sr = (struct mcontext __user *)from_user_ptr(sigctx.regs); addr = sr; if (!access_ok(VERIFY_READ, sr, sizeof(*sr)) || restore_user_regs(regs, sr, 1)) goto badframe; } set_thread_flag(TIF_RESTOREALL); return 0; badframe: if (show_unhandled_signals) printk_ratelimited(KERN_INFO "%s[%d]: bad frame in sys_sigreturn: " "%p nip %08lx lr %08lx\n", current->comm, current->pid, addr, regs->nip, regs->link); force_sig(SIGSEGV, current); return 0; }
./CrossVul/dataset_final_sorted/CWE-20/c/good_1821_1
crossvul-cpp_data_good_2976_0
/* * Glue code for optimized assembly version of Salsa20. * * Copyright (c) 2007 Tan Swee Heng <thesweeheng@gmail.com> * * The assembly codes are public domain assembly codes written by Daniel. J. * Bernstein <djb@cr.yp.to>. The codes are modified to include indentation * and to remove extraneous comments and functions that are not needed. * - i586 version, renamed as salsa20-i586-asm_32.S * available from <http://cr.yp.to/snuffle/salsa20/x86-pm/salsa20.s> * - x86-64 version, renamed as salsa20-x86_64-asm_64.S * available from <http://cr.yp.to/snuffle/salsa20/amd64-3/salsa20.s> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/algapi.h> #include <linux/module.h> #include <linux/crypto.h> #define SALSA20_IV_SIZE 8U #define SALSA20_MIN_KEY_SIZE 16U #define SALSA20_MAX_KEY_SIZE 32U struct salsa20_ctx { u32 input[16]; }; asmlinkage void salsa20_keysetup(struct salsa20_ctx *ctx, const u8 *k, u32 keysize, u32 ivsize); asmlinkage void salsa20_ivsetup(struct salsa20_ctx *ctx, const u8 *iv); asmlinkage void salsa20_encrypt_bytes(struct salsa20_ctx *ctx, const u8 *src, u8 *dst, u32 bytes); static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keysize) { struct salsa20_ctx *ctx = crypto_tfm_ctx(tfm); salsa20_keysetup(ctx, key, keysize*8, SALSA20_IV_SIZE*8); return 0; } static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct blkcipher_walk walk; struct crypto_blkcipher *tfm = desc->tfm; struct salsa20_ctx *ctx = crypto_blkcipher_ctx(tfm); int err; blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt_block(desc, &walk, 64); salsa20_ivsetup(ctx, walk.iv); while (walk.nbytes >= 64) { salsa20_encrypt_bytes(ctx, walk.src.virt.addr, walk.dst.virt.addr, walk.nbytes - (walk.nbytes % 64)); err = blkcipher_walk_done(desc, &walk, walk.nbytes % 64); } if (walk.nbytes) { salsa20_encrypt_bytes(ctx, walk.src.virt.addr, walk.dst.virt.addr, walk.nbytes); err = blkcipher_walk_done(desc, &walk, 0); } return err; } static struct crypto_alg alg = { .cra_name = "salsa20", .cra_driver_name = "salsa20-asm", .cra_priority = 200, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_type = &crypto_blkcipher_type, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct salsa20_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_u = { .blkcipher = { .setkey = setkey, .encrypt = encrypt, .decrypt = encrypt, .min_keysize = SALSA20_MIN_KEY_SIZE, .max_keysize = SALSA20_MAX_KEY_SIZE, .ivsize = SALSA20_IV_SIZE, } } }; static int __init init(void) { return crypto_register_alg(&alg); } static void __exit fini(void) { crypto_unregister_alg(&alg); } module_init(init); module_exit(fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm (optimized assembly version)"); MODULE_ALIAS_CRYPTO("salsa20"); MODULE_ALIAS_CRYPTO("salsa20-asm");
./CrossVul/dataset_final_sorted/CWE-20/c/good_2976_0
crossvul-cpp_data_bad_1284_2
/* ** 2008 August 18 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** ** This file contains routines used for walking the parser tree and ** resolve all identifiers by associating them with a particular ** table and column. */ #include "sqliteInt.h" /* ** Walk the expression tree pExpr and increase the aggregate function ** depth (the Expr.op2 field) by N on every TK_AGG_FUNCTION node. ** This needs to occur when copying a TK_AGG_FUNCTION node from an ** outer query into an inner subquery. ** ** incrAggFunctionDepth(pExpr,n) is the main routine. incrAggDepth(..) ** is a helper function - a callback for the tree walker. */ static int incrAggDepth(Walker *pWalker, Expr *pExpr){ if( pExpr->op==TK_AGG_FUNCTION ) pExpr->op2 += pWalker->u.n; return WRC_Continue; } static void incrAggFunctionDepth(Expr *pExpr, int N){ if( N>0 ){ Walker w; memset(&w, 0, sizeof(w)); w.xExprCallback = incrAggDepth; w.u.n = N; sqlite3WalkExpr(&w, pExpr); } } /* ** Turn the pExpr expression into an alias for the iCol-th column of the ** result set in pEList. ** ** If the reference is followed by a COLLATE operator, then make sure ** the COLLATE operator is preserved. For example: ** ** SELECT a+b, c+d FROM t1 ORDER BY 1 COLLATE nocase; ** ** Should be transformed into: ** ** SELECT a+b, c+d FROM t1 ORDER BY (a+b) COLLATE nocase; ** ** The nSubquery parameter specifies how many levels of subquery the ** alias is removed from the original expression. The usual value is ** zero but it might be more if the alias is contained within a subquery ** of the original expression. The Expr.op2 field of TK_AGG_FUNCTION ** structures must be increased by the nSubquery amount. */ static void resolveAlias( Parse *pParse, /* Parsing context */ ExprList *pEList, /* A result set */ int iCol, /* A column in the result set. 0..pEList->nExpr-1 */ Expr *pExpr, /* Transform this into an alias to the result set */ const char *zType, /* "GROUP" or "ORDER" or "" */ int nSubquery /* Number of subqueries that the label is moving */ ){ Expr *pOrig; /* The iCol-th column of the result set */ Expr *pDup; /* Copy of pOrig */ sqlite3 *db; /* The database connection */ assert( iCol>=0 && iCol<pEList->nExpr ); pOrig = pEList->a[iCol].pExpr; assert( pOrig!=0 ); db = pParse->db; pDup = sqlite3ExprDup(db, pOrig, 0); if( pDup!=0 ){ if( zType[0]!='G' ) incrAggFunctionDepth(pDup, nSubquery); if( pExpr->op==TK_COLLATE ){ pDup = sqlite3ExprAddCollateString(pParse, pDup, pExpr->u.zToken); } /* Before calling sqlite3ExprDelete(), set the EP_Static flag. This ** prevents ExprDelete() from deleting the Expr structure itself, ** allowing it to be repopulated by the memcpy() on the following line. ** The pExpr->u.zToken might point into memory that will be freed by the ** sqlite3DbFree(db, pDup) on the last line of this block, so be sure to ** make a copy of the token before doing the sqlite3DbFree(). */ ExprSetProperty(pExpr, EP_Static); sqlite3ExprDelete(db, pExpr); memcpy(pExpr, pDup, sizeof(*pExpr)); if( !ExprHasProperty(pExpr, EP_IntValue) && pExpr->u.zToken!=0 ){ assert( (pExpr->flags & (EP_Reduced|EP_TokenOnly))==0 ); pExpr->u.zToken = sqlite3DbStrDup(db, pExpr->u.zToken); pExpr->flags |= EP_MemToken; } if( ExprHasProperty(pExpr, EP_WinFunc) ){ if( pExpr->y.pWin!=0 ){ pExpr->y.pWin->pOwner = pExpr; }else{ assert( db->mallocFailed ); } } sqlite3DbFree(db, pDup); } ExprSetProperty(pExpr, EP_Alias); } /* ** Return TRUE if the name zCol occurs anywhere in the USING clause. ** ** Return FALSE if the USING clause is NULL or if it does not contain ** zCol. */ static int nameInUsingClause(IdList *pUsing, const char *zCol){ if( pUsing ){ int k; for(k=0; k<pUsing->nId; k++){ if( sqlite3StrICmp(pUsing->a[k].zName, zCol)==0 ) return 1; } } return 0; } /* ** Subqueries stores the original database, table and column names for their ** result sets in ExprList.a[].zSpan, in the form "DATABASE.TABLE.COLUMN". ** Check to see if the zSpan given to this routine matches the zDb, zTab, ** and zCol. If any of zDb, zTab, and zCol are NULL then those fields will ** match anything. */ int sqlite3MatchSpanName( const char *zSpan, const char *zCol, const char *zTab, const char *zDb ){ int n; for(n=0; ALWAYS(zSpan[n]) && zSpan[n]!='.'; n++){} if( zDb && (sqlite3StrNICmp(zSpan, zDb, n)!=0 || zDb[n]!=0) ){ return 0; } zSpan += n+1; for(n=0; ALWAYS(zSpan[n]) && zSpan[n]!='.'; n++){} if( zTab && (sqlite3StrNICmp(zSpan, zTab, n)!=0 || zTab[n]!=0) ){ return 0; } zSpan += n+1; if( zCol && sqlite3StrICmp(zSpan, zCol)!=0 ){ return 0; } return 1; } /* ** Return TRUE if the double-quoted string mis-feature should be supported. */ static int areDoubleQuotedStringsEnabled(sqlite3 *db, NameContext *pTopNC){ if( db->init.busy ) return 1; /* Always support for legacy schemas */ if( pTopNC->ncFlags & NC_IsDDL ){ /* Currently parsing a DDL statement */ if( sqlite3WritableSchema(db) && (db->flags & SQLITE_DqsDML)!=0 ){ return 1; } return (db->flags & SQLITE_DqsDDL)!=0; }else{ /* Currently parsing a DML statement */ return (db->flags & SQLITE_DqsDML)!=0; } } /* ** Given the name of a column of the form X.Y.Z or Y.Z or just Z, look up ** that name in the set of source tables in pSrcList and make the pExpr ** expression node refer back to that source column. The following changes ** are made to pExpr: ** ** pExpr->iDb Set the index in db->aDb[] of the database X ** (even if X is implied). ** pExpr->iTable Set to the cursor number for the table obtained ** from pSrcList. ** pExpr->y.pTab Points to the Table structure of X.Y (even if ** X and/or Y are implied.) ** pExpr->iColumn Set to the column number within the table. ** pExpr->op Set to TK_COLUMN. ** pExpr->pLeft Any expression this points to is deleted ** pExpr->pRight Any expression this points to is deleted. ** ** The zDb variable is the name of the database (the "X"). This value may be ** NULL meaning that name is of the form Y.Z or Z. Any available database ** can be used. The zTable variable is the name of the table (the "Y"). This ** value can be NULL if zDb is also NULL. If zTable is NULL it ** means that the form of the name is Z and that columns from any table ** can be used. ** ** If the name cannot be resolved unambiguously, leave an error message ** in pParse and return WRC_Abort. Return WRC_Prune on success. */ static int lookupName( Parse *pParse, /* The parsing context */ const char *zDb, /* Name of the database containing table, or NULL */ const char *zTab, /* Name of table containing column, or NULL */ const char *zCol, /* Name of the column. */ NameContext *pNC, /* The name context used to resolve the name */ Expr *pExpr /* Make this EXPR node point to the selected column */ ){ int i, j; /* Loop counters */ int cnt = 0; /* Number of matching column names */ int cntTab = 0; /* Number of matching table names */ int nSubquery = 0; /* How many levels of subquery */ sqlite3 *db = pParse->db; /* The database connection */ struct SrcList_item *pItem; /* Use for looping over pSrcList items */ struct SrcList_item *pMatch = 0; /* The matching pSrcList item */ NameContext *pTopNC = pNC; /* First namecontext in the list */ Schema *pSchema = 0; /* Schema of the expression */ int eNewExprOp = TK_COLUMN; /* New value for pExpr->op on success */ Table *pTab = 0; /* Table hold the row */ Column *pCol; /* A column of pTab */ assert( pNC ); /* the name context cannot be NULL. */ assert( zCol ); /* The Z in X.Y.Z cannot be NULL */ assert( !ExprHasProperty(pExpr, EP_TokenOnly|EP_Reduced) ); /* Initialize the node to no-match */ pExpr->iTable = -1; ExprSetVVAProperty(pExpr, EP_NoReduce); /* Translate the schema name in zDb into a pointer to the corresponding ** schema. If not found, pSchema will remain NULL and nothing will match ** resulting in an appropriate error message toward the end of this routine */ if( zDb ){ testcase( pNC->ncFlags & NC_PartIdx ); testcase( pNC->ncFlags & NC_IsCheck ); if( (pNC->ncFlags & (NC_PartIdx|NC_IsCheck))!=0 ){ /* Silently ignore database qualifiers inside CHECK constraints and ** partial indices. Do not raise errors because that might break ** legacy and because it does not hurt anything to just ignore the ** database name. */ zDb = 0; }else{ for(i=0; i<db->nDb; i++){ assert( db->aDb[i].zDbSName ); if( sqlite3StrICmp(db->aDb[i].zDbSName,zDb)==0 ){ pSchema = db->aDb[i].pSchema; break; } } } } /* Start at the inner-most context and move outward until a match is found */ assert( pNC && cnt==0 ); do{ ExprList *pEList; SrcList *pSrcList = pNC->pSrcList; if( pSrcList ){ for(i=0, pItem=pSrcList->a; i<pSrcList->nSrc; i++, pItem++){ pTab = pItem->pTab; assert( pTab!=0 && pTab->zName!=0 ); assert( pTab->nCol>0 ); if( pItem->pSelect && (pItem->pSelect->selFlags & SF_NestedFrom)!=0 ){ int hit = 0; pEList = pItem->pSelect->pEList; for(j=0; j<pEList->nExpr; j++){ if( sqlite3MatchSpanName(pEList->a[j].zSpan, zCol, zTab, zDb) ){ cnt++; cntTab = 2; pMatch = pItem; pExpr->iColumn = j; hit = 1; } } if( hit || zTab==0 ) continue; } if( zDb && pTab->pSchema!=pSchema ){ continue; } if( zTab ){ const char *zTabName = pItem->zAlias ? pItem->zAlias : pTab->zName; assert( zTabName!=0 ); if( sqlite3StrICmp(zTabName, zTab)!=0 ){ continue; } if( IN_RENAME_OBJECT && pItem->zAlias ){ sqlite3RenameTokenRemap(pParse, 0, (void*)&pExpr->y.pTab); } } if( 0==(cntTab++) ){ pMatch = pItem; } for(j=0, pCol=pTab->aCol; j<pTab->nCol; j++, pCol++){ if( sqlite3StrICmp(pCol->zName, zCol)==0 ){ /* If there has been exactly one prior match and this match ** is for the right-hand table of a NATURAL JOIN or is in a ** USING clause, then skip this match. */ if( cnt==1 ){ if( pItem->fg.jointype & JT_NATURAL ) continue; if( nameInUsingClause(pItem->pUsing, zCol) ) continue; } cnt++; pMatch = pItem; /* Substitute the rowid (column -1) for the INTEGER PRIMARY KEY */ pExpr->iColumn = j==pTab->iPKey ? -1 : (i16)j; break; } } } if( pMatch ){ pExpr->iTable = pMatch->iCursor; pExpr->y.pTab = pMatch->pTab; /* RIGHT JOIN not (yet) supported */ assert( (pMatch->fg.jointype & JT_RIGHT)==0 ); if( (pMatch->fg.jointype & JT_LEFT)!=0 ){ ExprSetProperty(pExpr, EP_CanBeNull); } pSchema = pExpr->y.pTab->pSchema; } } /* if( pSrcList ) */ #if !defined(SQLITE_OMIT_TRIGGER) || !defined(SQLITE_OMIT_UPSERT) /* If we have not already resolved the name, then maybe ** it is a new.* or old.* trigger argument reference. Or ** maybe it is an excluded.* from an upsert. */ if( zDb==0 && zTab!=0 && cntTab==0 ){ pTab = 0; #ifndef SQLITE_OMIT_TRIGGER if( pParse->pTriggerTab!=0 ){ int op = pParse->eTriggerOp; assert( op==TK_DELETE || op==TK_UPDATE || op==TK_INSERT ); if( op!=TK_DELETE && sqlite3StrICmp("new",zTab) == 0 ){ pExpr->iTable = 1; pTab = pParse->pTriggerTab; }else if( op!=TK_INSERT && sqlite3StrICmp("old",zTab)==0 ){ pExpr->iTable = 0; pTab = pParse->pTriggerTab; } } #endif /* SQLITE_OMIT_TRIGGER */ #ifndef SQLITE_OMIT_UPSERT if( (pNC->ncFlags & NC_UUpsert)!=0 ){ Upsert *pUpsert = pNC->uNC.pUpsert; if( pUpsert && sqlite3StrICmp("excluded",zTab)==0 ){ pTab = pUpsert->pUpsertSrc->a[0].pTab; pExpr->iTable = 2; } } #endif /* SQLITE_OMIT_UPSERT */ if( pTab ){ int iCol; pSchema = pTab->pSchema; cntTab++; for(iCol=0, pCol=pTab->aCol; iCol<pTab->nCol; iCol++, pCol++){ if( sqlite3StrICmp(pCol->zName, zCol)==0 ){ if( iCol==pTab->iPKey ){ iCol = -1; } break; } } if( iCol>=pTab->nCol && sqlite3IsRowid(zCol) && VisibleRowid(pTab) ){ /* IMP: R-51414-32910 */ iCol = -1; } if( iCol<pTab->nCol ){ cnt++; #ifndef SQLITE_OMIT_UPSERT if( pExpr->iTable==2 ){ testcase( iCol==(-1) ); if( IN_RENAME_OBJECT ){ pExpr->iColumn = iCol; pExpr->y.pTab = pTab; eNewExprOp = TK_COLUMN; }else{ pExpr->iTable = pNC->uNC.pUpsert->regData + iCol; eNewExprOp = TK_REGISTER; ExprSetProperty(pExpr, EP_Alias); } }else #endif /* SQLITE_OMIT_UPSERT */ { #ifndef SQLITE_OMIT_TRIGGER if( iCol<0 ){ pExpr->affExpr = SQLITE_AFF_INTEGER; }else if( pExpr->iTable==0 ){ testcase( iCol==31 ); testcase( iCol==32 ); pParse->oldmask |= (iCol>=32 ? 0xffffffff : (((u32)1)<<iCol)); }else{ testcase( iCol==31 ); testcase( iCol==32 ); pParse->newmask |= (iCol>=32 ? 0xffffffff : (((u32)1)<<iCol)); } pExpr->y.pTab = pTab; pExpr->iColumn = (i16)iCol; eNewExprOp = TK_TRIGGER; #endif /* SQLITE_OMIT_TRIGGER */ } } } } #endif /* !defined(SQLITE_OMIT_TRIGGER) || !defined(SQLITE_OMIT_UPSERT) */ /* ** Perhaps the name is a reference to the ROWID */ if( cnt==0 && cntTab==1 && pMatch && (pNC->ncFlags & (NC_IdxExpr|NC_GenCol))==0 && sqlite3IsRowid(zCol) && VisibleRowid(pMatch->pTab) ){ cnt = 1; pExpr->iColumn = -1; pExpr->affExpr = SQLITE_AFF_INTEGER; } /* ** If the input is of the form Z (not Y.Z or X.Y.Z) then the name Z ** might refer to an result-set alias. This happens, for example, when ** we are resolving names in the WHERE clause of the following command: ** ** SELECT a+b AS x FROM table WHERE x<10; ** ** In cases like this, replace pExpr with a copy of the expression that ** forms the result set entry ("a+b" in the example) and return immediately. ** Note that the expression in the result set should have already been ** resolved by the time the WHERE clause is resolved. ** ** The ability to use an output result-set column in the WHERE, GROUP BY, ** or HAVING clauses, or as part of a larger expression in the ORDER BY ** clause is not standard SQL. This is a (goofy) SQLite extension, that ** is supported for backwards compatibility only. Hence, we issue a warning ** on sqlite3_log() whenever the capability is used. */ if( (pNC->ncFlags & NC_UEList)!=0 && cnt==0 && zTab==0 ){ pEList = pNC->uNC.pEList; assert( pEList!=0 ); for(j=0; j<pEList->nExpr; j++){ char *zAs = pEList->a[j].zName; if( zAs!=0 && sqlite3StrICmp(zAs, zCol)==0 ){ Expr *pOrig; assert( pExpr->pLeft==0 && pExpr->pRight==0 ); assert( pExpr->x.pList==0 ); assert( pExpr->x.pSelect==0 ); pOrig = pEList->a[j].pExpr; if( (pNC->ncFlags&NC_AllowAgg)==0 && ExprHasProperty(pOrig, EP_Agg) ){ sqlite3ErrorMsg(pParse, "misuse of aliased aggregate %s", zAs); return WRC_Abort; } if( (pNC->ncFlags&NC_AllowWin)==0 && ExprHasProperty(pOrig, EP_Win) ){ sqlite3ErrorMsg(pParse, "misuse of aliased window function %s",zAs); return WRC_Abort; } if( sqlite3ExprVectorSize(pOrig)!=1 ){ sqlite3ErrorMsg(pParse, "row value misused"); return WRC_Abort; } resolveAlias(pParse, pEList, j, pExpr, "", nSubquery); cnt = 1; pMatch = 0; assert( zTab==0 && zDb==0 ); if( IN_RENAME_OBJECT ){ sqlite3RenameTokenRemap(pParse, 0, (void*)pExpr); } goto lookupname_end; } } } /* Advance to the next name context. The loop will exit when either ** we have a match (cnt>0) or when we run out of name contexts. */ if( cnt ) break; pNC = pNC->pNext; nSubquery++; }while( pNC ); /* ** If X and Y are NULL (in other words if only the column name Z is ** supplied) and the value of Z is enclosed in double-quotes, then ** Z is a string literal if it doesn't match any column names. In that ** case, we need to return right away and not make any changes to ** pExpr. ** ** Because no reference was made to outer contexts, the pNC->nRef ** fields are not changed in any context. */ if( cnt==0 && zTab==0 ){ assert( pExpr->op==TK_ID ); if( ExprHasProperty(pExpr,EP_DblQuoted) && areDoubleQuotedStringsEnabled(db, pTopNC) ){ /* If a double-quoted identifier does not match any known column name, ** then treat it as a string. ** ** This hack was added in the early days of SQLite in a misguided attempt ** to be compatible with MySQL 3.x, which used double-quotes for strings. ** I now sorely regret putting in this hack. The effect of this hack is ** that misspelled identifier names are silently converted into strings ** rather than causing an error, to the frustration of countless ** programmers. To all those frustrated programmers, my apologies. ** ** Someday, I hope to get rid of this hack. Unfortunately there is ** a huge amount of legacy SQL that uses it. So for now, we just ** issue a warning. */ sqlite3_log(SQLITE_WARNING, "double-quoted string literal: \"%w\"", zCol); #ifdef SQLITE_ENABLE_NORMALIZE sqlite3VdbeAddDblquoteStr(db, pParse->pVdbe, zCol); #endif pExpr->op = TK_STRING; pExpr->y.pTab = 0; return WRC_Prune; } if( sqlite3ExprIdToTrueFalse(pExpr) ){ return WRC_Prune; } } /* ** cnt==0 means there was not match. cnt>1 means there were two or ** more matches. Either way, we have an error. */ if( cnt!=1 ){ const char *zErr; zErr = cnt==0 ? "no such column" : "ambiguous column name"; if( zDb ){ sqlite3ErrorMsg(pParse, "%s: %s.%s.%s", zErr, zDb, zTab, zCol); }else if( zTab ){ sqlite3ErrorMsg(pParse, "%s: %s.%s", zErr, zTab, zCol); }else{ sqlite3ErrorMsg(pParse, "%s: %s", zErr, zCol); } pParse->checkSchema = 1; pTopNC->nErr++; } /* If a column from a table in pSrcList is referenced, then record ** this fact in the pSrcList.a[].colUsed bitmask. Column 0 causes ** bit 0 to be set. Column 1 sets bit 1. And so forth. If the ** column number is greater than the number of bits in the bitmask ** then set the high-order bit of the bitmask. */ if( pExpr->iColumn>=0 && pMatch!=0 ){ int n = pExpr->iColumn; testcase( n==BMS-1 ); if( n>=BMS ){ n = BMS-1; } assert( pMatch->iCursor==pExpr->iTable ); pMatch->colUsed |= ((Bitmask)1)<<n; } /* Clean up and return */ sqlite3ExprDelete(db, pExpr->pLeft); pExpr->pLeft = 0; sqlite3ExprDelete(db, pExpr->pRight); pExpr->pRight = 0; pExpr->op = eNewExprOp; ExprSetProperty(pExpr, EP_Leaf); lookupname_end: if( cnt==1 ){ assert( pNC!=0 ); if( !ExprHasProperty(pExpr, EP_Alias) ){ sqlite3AuthRead(pParse, pExpr, pSchema, pNC->pSrcList); } /* Increment the nRef value on all name contexts from TopNC up to ** the point where the name matched. */ for(;;){ assert( pTopNC!=0 ); pTopNC->nRef++; if( pTopNC==pNC ) break; pTopNC = pTopNC->pNext; } return WRC_Prune; } else { return WRC_Abort; } } /* ** Allocate and return a pointer to an expression to load the column iCol ** from datasource iSrc in SrcList pSrc. */ Expr *sqlite3CreateColumnExpr(sqlite3 *db, SrcList *pSrc, int iSrc, int iCol){ Expr *p = sqlite3ExprAlloc(db, TK_COLUMN, 0, 0); if( p ){ struct SrcList_item *pItem = &pSrc->a[iSrc]; p->y.pTab = pItem->pTab; p->iTable = pItem->iCursor; if( p->y.pTab->iPKey==iCol ){ p->iColumn = -1; }else{ p->iColumn = (ynVar)iCol; testcase( iCol==BMS ); testcase( iCol==BMS-1 ); pItem->colUsed |= ((Bitmask)1)<<(iCol>=BMS ? BMS-1 : iCol); } } return p; } /* ** Report an error that an expression is not valid for some set of ** pNC->ncFlags values determined by validMask. */ static void notValid( Parse *pParse, /* Leave error message here */ NameContext *pNC, /* The name context */ const char *zMsg, /* Type of error */ int validMask /* Set of contexts for which prohibited */ ){ assert( (validMask&~(NC_IsCheck|NC_PartIdx|NC_IdxExpr|NC_GenCol))==0 ); if( (pNC->ncFlags & validMask)!=0 ){ const char *zIn = "partial index WHERE clauses"; if( pNC->ncFlags & NC_IdxExpr ) zIn = "index expressions"; #ifndef SQLITE_OMIT_CHECK else if( pNC->ncFlags & NC_IsCheck ) zIn = "CHECK constraints"; #endif #ifndef SQLITE_OMIT_GENERATED_COLUMNS else if( pNC->ncFlags & NC_GenCol ) zIn = "generated columns"; #endif sqlite3ErrorMsg(pParse, "%s prohibited in %s", zMsg, zIn); } } /* ** Expression p should encode a floating point value between 1.0 and 0.0. ** Return 1024 times this value. Or return -1 if p is not a floating point ** value between 1.0 and 0.0. */ static int exprProbability(Expr *p){ double r = -1.0; if( p->op!=TK_FLOAT ) return -1; sqlite3AtoF(p->u.zToken, &r, sqlite3Strlen30(p->u.zToken), SQLITE_UTF8); assert( r>=0.0 ); if( r>1.0 ) return -1; return (int)(r*134217728.0); } /* ** This routine is callback for sqlite3WalkExpr(). ** ** Resolve symbolic names into TK_COLUMN operators for the current ** node in the expression tree. Return 0 to continue the search down ** the tree or 2 to abort the tree walk. ** ** This routine also does error checking and name resolution for ** function names. The operator for aggregate functions is changed ** to TK_AGG_FUNCTION. */ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ NameContext *pNC; Parse *pParse; pNC = pWalker->u.pNC; assert( pNC!=0 ); pParse = pNC->pParse; assert( pParse==pWalker->pParse ); #ifndef NDEBUG if( pNC->pSrcList && pNC->pSrcList->nAlloc>0 ){ SrcList *pSrcList = pNC->pSrcList; int i; for(i=0; i<pNC->pSrcList->nSrc; i++){ assert( pSrcList->a[i].iCursor>=0 && pSrcList->a[i].iCursor<pParse->nTab); } } #endif switch( pExpr->op ){ #if defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) && !defined(SQLITE_OMIT_SUBQUERY) /* The special operator TK_ROW means use the rowid for the first ** column in the FROM clause. This is used by the LIMIT and ORDER BY ** clause processing on UPDATE and DELETE statements. */ case TK_ROW: { SrcList *pSrcList = pNC->pSrcList; struct SrcList_item *pItem; assert( pSrcList && pSrcList->nSrc==1 ); pItem = pSrcList->a; assert( HasRowid(pItem->pTab) && pItem->pTab->pSelect==0 ); pExpr->op = TK_COLUMN; pExpr->y.pTab = pItem->pTab; pExpr->iTable = pItem->iCursor; pExpr->iColumn = -1; pExpr->affExpr = SQLITE_AFF_INTEGER; break; } #endif /* defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) && !defined(SQLITE_OMIT_SUBQUERY) */ /* A column name: ID ** Or table name and column name: ID.ID ** Or a database, table and column: ID.ID.ID ** ** The TK_ID and TK_OUT cases are combined so that there will only ** be one call to lookupName(). Then the compiler will in-line ** lookupName() for a size reduction and performance increase. */ case TK_ID: case TK_DOT: { const char *zColumn; const char *zTable; const char *zDb; Expr *pRight; if( pExpr->op==TK_ID ){ zDb = 0; zTable = 0; zColumn = pExpr->u.zToken; }else{ Expr *pLeft = pExpr->pLeft; notValid(pParse, pNC, "the \".\" operator", NC_IdxExpr|NC_GenCol); pRight = pExpr->pRight; if( pRight->op==TK_ID ){ zDb = 0; }else{ assert( pRight->op==TK_DOT ); zDb = pLeft->u.zToken; pLeft = pRight->pLeft; pRight = pRight->pRight; } zTable = pLeft->u.zToken; zColumn = pRight->u.zToken; if( IN_RENAME_OBJECT ){ sqlite3RenameTokenRemap(pParse, (void*)pExpr, (void*)pRight); sqlite3RenameTokenRemap(pParse, (void*)&pExpr->y.pTab, (void*)pLeft); } } return lookupName(pParse, zDb, zTable, zColumn, pNC, pExpr); } /* Resolve function names */ case TK_FUNCTION: { ExprList *pList = pExpr->x.pList; /* The argument list */ int n = pList ? pList->nExpr : 0; /* Number of arguments */ int no_such_func = 0; /* True if no such function exists */ int wrong_num_args = 0; /* True if wrong number of arguments */ int is_agg = 0; /* True if is an aggregate function */ int nId; /* Number of characters in function name */ const char *zId; /* The function name. */ FuncDef *pDef; /* Information about the function */ u8 enc = ENC(pParse->db); /* The database encoding */ int savedAllowFlags = (pNC->ncFlags & (NC_AllowAgg | NC_AllowWin)); #ifndef SQLITE_OMIT_WINDOWFUNC Window *pWin = (IsWindowFunc(pExpr) ? pExpr->y.pWin : 0); #endif assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); zId = pExpr->u.zToken; nId = sqlite3Strlen30(zId); pDef = sqlite3FindFunction(pParse->db, zId, n, enc, 0); if( pDef==0 ){ pDef = sqlite3FindFunction(pParse->db, zId, -2, enc, 0); if( pDef==0 ){ no_such_func = 1; }else{ wrong_num_args = 1; } }else{ is_agg = pDef->xFinalize!=0; if( pDef->funcFlags & SQLITE_FUNC_UNLIKELY ){ ExprSetProperty(pExpr, EP_Unlikely); if( n==2 ){ pExpr->iTable = exprProbability(pList->a[1].pExpr); if( pExpr->iTable<0 ){ sqlite3ErrorMsg(pParse, "second argument to likelihood() must be a " "constant between 0.0 and 1.0"); pNC->nErr++; } }else{ /* EVIDENCE-OF: R-61304-29449 The unlikely(X) function is ** equivalent to likelihood(X, 0.0625). ** EVIDENCE-OF: R-01283-11636 The unlikely(X) function is ** short-hand for likelihood(X,0.0625). ** EVIDENCE-OF: R-36850-34127 The likely(X) function is short-hand ** for likelihood(X,0.9375). ** EVIDENCE-OF: R-53436-40973 The likely(X) function is equivalent ** to likelihood(X,0.9375). */ /* TUNING: unlikely() probability is 0.0625. likely() is 0.9375 */ pExpr->iTable = pDef->zName[0]=='u' ? 8388608 : 125829120; } } #ifndef SQLITE_OMIT_AUTHORIZATION { int auth = sqlite3AuthCheck(pParse, SQLITE_FUNCTION, 0,pDef->zName,0); if( auth!=SQLITE_OK ){ if( auth==SQLITE_DENY ){ sqlite3ErrorMsg(pParse, "not authorized to use function: %s", pDef->zName); pNC->nErr++; } pExpr->op = TK_NULL; return WRC_Prune; } } #endif if( pDef->funcFlags & (SQLITE_FUNC_CONSTANT|SQLITE_FUNC_SLOCHNG) ){ /* For the purposes of the EP_ConstFunc flag, date and time ** functions and other functions that change slowly are considered ** constant because they are constant for the duration of one query. ** This allows them to be factored out of inner loops. */ ExprSetProperty(pExpr,EP_ConstFunc); } if( (pDef->funcFlags & SQLITE_FUNC_CONSTANT)==0 ){ /* Date/time functions that use 'now', and other functions like ** sqlite_version() that might change over time cannot be used ** in an index. */ notValid(pParse, pNC, "non-deterministic functions", NC_SelfRef); }else{ assert( (NC_SelfRef & 0xff)==NC_SelfRef ); /* Must fit in 8 bits */ pExpr->op2 = pNC->ncFlags & NC_SelfRef; } if( (pDef->funcFlags & SQLITE_FUNC_INTERNAL)!=0 && pParse->nested==0 && sqlite3Config.bInternalFunctions==0 ){ /* Internal-use-only functions are disallowed unless the ** SQL is being compiled using sqlite3NestedParse() */ no_such_func = 1; pDef = 0; }else if( (pDef->funcFlags & SQLITE_FUNC_DIRECT)!=0 && ExprHasProperty(pExpr, EP_Indirect) && !IN_RENAME_OBJECT ){ /* Functions tagged with SQLITE_DIRECTONLY may not be used ** inside of triggers and views */ sqlite3ErrorMsg(pParse, "%s() prohibited in triggers and views", pDef->zName); } } if( 0==IN_RENAME_OBJECT ){ #ifndef SQLITE_OMIT_WINDOWFUNC assert( is_agg==0 || (pDef->funcFlags & SQLITE_FUNC_MINMAX) || (pDef->xValue==0 && pDef->xInverse==0) || (pDef->xValue && pDef->xInverse && pDef->xSFunc && pDef->xFinalize) ); if( pDef && pDef->xValue==0 && pWin ){ sqlite3ErrorMsg(pParse, "%.*s() may not be used as a window function", nId, zId ); pNC->nErr++; }else if( (is_agg && (pNC->ncFlags & NC_AllowAgg)==0) || (is_agg && (pDef->funcFlags&SQLITE_FUNC_WINDOW) && !pWin) || (is_agg && pWin && (pNC->ncFlags & NC_AllowWin)==0) ){ const char *zType; if( (pDef->funcFlags & SQLITE_FUNC_WINDOW) || pWin ){ zType = "window"; }else{ zType = "aggregate"; } sqlite3ErrorMsg(pParse, "misuse of %s function %.*s()",zType,nId,zId); pNC->nErr++; is_agg = 0; } #else if( (is_agg && (pNC->ncFlags & NC_AllowAgg)==0) ){ sqlite3ErrorMsg(pParse,"misuse of aggregate function %.*s()",nId,zId); pNC->nErr++; is_agg = 0; } #endif else if( no_such_func && pParse->db->init.busy==0 #ifdef SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION && pParse->explain==0 #endif ){ sqlite3ErrorMsg(pParse, "no such function: %.*s", nId, zId); pNC->nErr++; }else if( wrong_num_args ){ sqlite3ErrorMsg(pParse,"wrong number of arguments to function %.*s()", nId, zId); pNC->nErr++; } #ifndef SQLITE_OMIT_WINDOWFUNC else if( is_agg==0 && ExprHasProperty(pExpr, EP_WinFunc) ){ sqlite3ErrorMsg(pParse, "FILTER may not be used with non-aggregate %.*s()", nId, zId ); pNC->nErr++; } #endif if( is_agg ){ /* Window functions may not be arguments of aggregate functions. ** Or arguments of other window functions. But aggregate functions ** may be arguments for window functions. */ #ifndef SQLITE_OMIT_WINDOWFUNC pNC->ncFlags &= ~(NC_AllowWin | (!pWin ? NC_AllowAgg : 0)); #else pNC->ncFlags &= ~NC_AllowAgg; #endif } } #ifndef SQLITE_OMIT_WINDOWFUNC else if( ExprHasProperty(pExpr, EP_WinFunc) ){ is_agg = 1; } #endif sqlite3WalkExprList(pWalker, pList); if( is_agg ){ #ifndef SQLITE_OMIT_WINDOWFUNC if( pWin ){ Select *pSel = pNC->pWinSelect; assert( pWin==pExpr->y.pWin ); if( IN_RENAME_OBJECT==0 ){ sqlite3WindowUpdate(pParse, pSel->pWinDefn, pWin, pDef); } sqlite3WalkExprList(pWalker, pWin->pPartition); sqlite3WalkExprList(pWalker, pWin->pOrderBy); sqlite3WalkExpr(pWalker, pWin->pFilter); sqlite3WindowLink(pSel, pWin); pNC->ncFlags |= NC_HasWin; }else #endif /* SQLITE_OMIT_WINDOWFUNC */ { NameContext *pNC2 = pNC; pExpr->op = TK_AGG_FUNCTION; pExpr->op2 = 0; #ifndef SQLITE_OMIT_WINDOWFUNC if( ExprHasProperty(pExpr, EP_WinFunc) ){ sqlite3WalkExpr(pWalker, pExpr->y.pWin->pFilter); } #endif while( pNC2 && !sqlite3FunctionUsesThisSrc(pExpr, pNC2->pSrcList) ){ pExpr->op2++; pNC2 = pNC2->pNext; } assert( pDef!=0 || IN_RENAME_OBJECT ); if( pNC2 && pDef ){ assert( SQLITE_FUNC_MINMAX==NC_MinMaxAgg ); testcase( (pDef->funcFlags & SQLITE_FUNC_MINMAX)!=0 ); pNC2->ncFlags |= NC_HasAgg | (pDef->funcFlags & SQLITE_FUNC_MINMAX); } } pNC->ncFlags |= savedAllowFlags; } /* FIX ME: Compute pExpr->affinity based on the expected return ** type of the function */ return WRC_Prune; } #ifndef SQLITE_OMIT_SUBQUERY case TK_SELECT: case TK_EXISTS: testcase( pExpr->op==TK_EXISTS ); #endif case TK_IN: { testcase( pExpr->op==TK_IN ); if( ExprHasProperty(pExpr, EP_xIsSelect) ){ int nRef = pNC->nRef; notValid(pParse, pNC, "subqueries", NC_IsCheck|NC_PartIdx|NC_IdxExpr|NC_GenCol); sqlite3WalkSelect(pWalker, pExpr->x.pSelect); assert( pNC->nRef>=nRef ); if( nRef!=pNC->nRef ){ ExprSetProperty(pExpr, EP_VarSelect); pNC->ncFlags |= NC_VarSelect; } } break; } case TK_VARIABLE: { notValid(pParse, pNC, "parameters", NC_IsCheck|NC_PartIdx|NC_IdxExpr|NC_GenCol); break; } case TK_IS: case TK_ISNOT: { Expr *pRight = sqlite3ExprSkipCollateAndLikely(pExpr->pRight); assert( !ExprHasProperty(pExpr, EP_Reduced) ); /* Handle special cases of "x IS TRUE", "x IS FALSE", "x IS NOT TRUE", ** and "x IS NOT FALSE". */ if( pRight->op==TK_ID ){ int rc = resolveExprStep(pWalker, pRight); if( rc==WRC_Abort ) return WRC_Abort; if( pRight->op==TK_TRUEFALSE ){ pExpr->op2 = pExpr->op; pExpr->op = TK_TRUTH; return WRC_Continue; } } /* Fall thru */ } case TK_BETWEEN: case TK_EQ: case TK_NE: case TK_LT: case TK_LE: case TK_GT: case TK_GE: { int nLeft, nRight; if( pParse->db->mallocFailed ) break; assert( pExpr->pLeft!=0 ); nLeft = sqlite3ExprVectorSize(pExpr->pLeft); if( pExpr->op==TK_BETWEEN ){ nRight = sqlite3ExprVectorSize(pExpr->x.pList->a[0].pExpr); if( nRight==nLeft ){ nRight = sqlite3ExprVectorSize(pExpr->x.pList->a[1].pExpr); } }else{ assert( pExpr->pRight!=0 ); nRight = sqlite3ExprVectorSize(pExpr->pRight); } if( nLeft!=nRight ){ testcase( pExpr->op==TK_EQ ); testcase( pExpr->op==TK_NE ); testcase( pExpr->op==TK_LT ); testcase( pExpr->op==TK_LE ); testcase( pExpr->op==TK_GT ); testcase( pExpr->op==TK_GE ); testcase( pExpr->op==TK_IS ); testcase( pExpr->op==TK_ISNOT ); testcase( pExpr->op==TK_BETWEEN ); sqlite3ErrorMsg(pParse, "row value misused"); } break; } } return (pParse->nErr || pParse->db->mallocFailed) ? WRC_Abort : WRC_Continue; } /* ** pEList is a list of expressions which are really the result set of the ** a SELECT statement. pE is a term in an ORDER BY or GROUP BY clause. ** This routine checks to see if pE is a simple identifier which corresponds ** to the AS-name of one of the terms of the expression list. If it is, ** this routine return an integer between 1 and N where N is the number of ** elements in pEList, corresponding to the matching entry. If there is ** no match, or if pE is not a simple identifier, then this routine ** return 0. ** ** pEList has been resolved. pE has not. */ static int resolveAsName( Parse *pParse, /* Parsing context for error messages */ ExprList *pEList, /* List of expressions to scan */ Expr *pE /* Expression we are trying to match */ ){ int i; /* Loop counter */ UNUSED_PARAMETER(pParse); if( pE->op==TK_ID ){ char *zCol = pE->u.zToken; for(i=0; i<pEList->nExpr; i++){ char *zAs = pEList->a[i].zName; if( zAs!=0 && sqlite3StrICmp(zAs, zCol)==0 ){ return i+1; } } } return 0; } /* ** pE is a pointer to an expression which is a single term in the ** ORDER BY of a compound SELECT. The expression has not been ** name resolved. ** ** At the point this routine is called, we already know that the ** ORDER BY term is not an integer index into the result set. That ** case is handled by the calling routine. ** ** Attempt to match pE against result set columns in the left-most ** SELECT statement. Return the index i of the matching column, ** as an indication to the caller that it should sort by the i-th column. ** The left-most column is 1. In other words, the value returned is the ** same integer value that would be used in the SQL statement to indicate ** the column. ** ** If there is no match, return 0. Return -1 if an error occurs. */ static int resolveOrderByTermToExprList( Parse *pParse, /* Parsing context for error messages */ Select *pSelect, /* The SELECT statement with the ORDER BY clause */ Expr *pE /* The specific ORDER BY term */ ){ int i; /* Loop counter */ ExprList *pEList; /* The columns of the result set */ NameContext nc; /* Name context for resolving pE */ sqlite3 *db; /* Database connection */ int rc; /* Return code from subprocedures */ u8 savedSuppErr; /* Saved value of db->suppressErr */ assert( sqlite3ExprIsInteger(pE, &i)==0 ); pEList = pSelect->pEList; /* Resolve all names in the ORDER BY term expression */ memset(&nc, 0, sizeof(nc)); nc.pParse = pParse; nc.pSrcList = pSelect->pSrc; nc.uNC.pEList = pEList; nc.ncFlags = NC_AllowAgg|NC_UEList; nc.nErr = 0; db = pParse->db; savedSuppErr = db->suppressErr; db->suppressErr = 1; rc = sqlite3ResolveExprNames(&nc, pE); db->suppressErr = savedSuppErr; if( rc ) return 0; /* Try to match the ORDER BY expression against an expression ** in the result set. Return an 1-based index of the matching ** result-set entry. */ for(i=0; i<pEList->nExpr; i++){ if( sqlite3ExprCompare(0, pEList->a[i].pExpr, pE, -1)<2 ){ return i+1; } } /* If no match, return 0. */ return 0; } /* ** Generate an ORDER BY or GROUP BY term out-of-range error. */ static void resolveOutOfRangeError( Parse *pParse, /* The error context into which to write the error */ const char *zType, /* "ORDER" or "GROUP" */ int i, /* The index (1-based) of the term out of range */ int mx /* Largest permissible value of i */ ){ sqlite3ErrorMsg(pParse, "%r %s BY term out of range - should be " "between 1 and %d", i, zType, mx); } /* ** Analyze the ORDER BY clause in a compound SELECT statement. Modify ** each term of the ORDER BY clause is a constant integer between 1 ** and N where N is the number of columns in the compound SELECT. ** ** ORDER BY terms that are already an integer between 1 and N are ** unmodified. ORDER BY terms that are integers outside the range of ** 1 through N generate an error. ORDER BY terms that are expressions ** are matched against result set expressions of compound SELECT ** beginning with the left-most SELECT and working toward the right. ** At the first match, the ORDER BY expression is transformed into ** the integer column number. ** ** Return the number of errors seen. */ static int resolveCompoundOrderBy( Parse *pParse, /* Parsing context. Leave error messages here */ Select *pSelect /* The SELECT statement containing the ORDER BY */ ){ int i; ExprList *pOrderBy; ExprList *pEList; sqlite3 *db; int moreToDo = 1; pOrderBy = pSelect->pOrderBy; if( pOrderBy==0 ) return 0; db = pParse->db; if( pOrderBy->nExpr>db->aLimit[SQLITE_LIMIT_COLUMN] ){ sqlite3ErrorMsg(pParse, "too many terms in ORDER BY clause"); return 1; } for(i=0; i<pOrderBy->nExpr; i++){ pOrderBy->a[i].done = 0; } pSelect->pNext = 0; while( pSelect->pPrior ){ pSelect->pPrior->pNext = pSelect; pSelect = pSelect->pPrior; } while( pSelect && moreToDo ){ struct ExprList_item *pItem; moreToDo = 0; pEList = pSelect->pEList; assert( pEList!=0 ); for(i=0, pItem=pOrderBy->a; i<pOrderBy->nExpr; i++, pItem++){ int iCol = -1; Expr *pE, *pDup; if( pItem->done ) continue; pE = sqlite3ExprSkipCollateAndLikely(pItem->pExpr); if( sqlite3ExprIsInteger(pE, &iCol) ){ if( iCol<=0 || iCol>pEList->nExpr ){ resolveOutOfRangeError(pParse, "ORDER", i+1, pEList->nExpr); return 1; } }else{ iCol = resolveAsName(pParse, pEList, pE); if( iCol==0 ){ /* Now test if expression pE matches one of the values returned ** by pSelect. In the usual case this is done by duplicating the ** expression, resolving any symbols in it, and then comparing ** it against each expression returned by the SELECT statement. ** Once the comparisons are finished, the duplicate expression ** is deleted. ** ** Or, if this is running as part of an ALTER TABLE operation, ** resolve the symbols in the actual expression, not a duplicate. ** And, if one of the comparisons is successful, leave the expression ** as is instead of transforming it to an integer as in the usual ** case. This allows the code in alter.c to modify column ** refererences within the ORDER BY expression as required. */ if( IN_RENAME_OBJECT ){ pDup = pE; }else{ pDup = sqlite3ExprDup(db, pE, 0); } if( !db->mallocFailed ){ assert(pDup); iCol = resolveOrderByTermToExprList(pParse, pSelect, pDup); } if( !IN_RENAME_OBJECT ){ sqlite3ExprDelete(db, pDup); } } } if( iCol>0 ){ /* Convert the ORDER BY term into an integer column number iCol, ** taking care to preserve the COLLATE clause if it exists */ if( !IN_RENAME_OBJECT ){ Expr *pNew = sqlite3Expr(db, TK_INTEGER, 0); if( pNew==0 ) return 1; pNew->flags |= EP_IntValue; pNew->u.iValue = iCol; if( pItem->pExpr==pE ){ pItem->pExpr = pNew; }else{ Expr *pParent = pItem->pExpr; assert( pParent->op==TK_COLLATE ); while( pParent->pLeft->op==TK_COLLATE ) pParent = pParent->pLeft; assert( pParent->pLeft==pE ); pParent->pLeft = pNew; } sqlite3ExprDelete(db, pE); pItem->u.x.iOrderByCol = (u16)iCol; } pItem->done = 1; }else{ moreToDo = 1; } } pSelect = pSelect->pNext; } for(i=0; i<pOrderBy->nExpr; i++){ if( pOrderBy->a[i].done==0 ){ sqlite3ErrorMsg(pParse, "%r ORDER BY term does not match any " "column in the result set", i+1); return 1; } } return 0; } /* ** Check every term in the ORDER BY or GROUP BY clause pOrderBy of ** the SELECT statement pSelect. If any term is reference to a ** result set expression (as determined by the ExprList.a.u.x.iOrderByCol ** field) then convert that term into a copy of the corresponding result set ** column. ** ** If any errors are detected, add an error message to pParse and ** return non-zero. Return zero if no errors are seen. */ int sqlite3ResolveOrderGroupBy( Parse *pParse, /* Parsing context. Leave error messages here */ Select *pSelect, /* The SELECT statement containing the clause */ ExprList *pOrderBy, /* The ORDER BY or GROUP BY clause to be processed */ const char *zType /* "ORDER" or "GROUP" */ ){ int i; sqlite3 *db = pParse->db; ExprList *pEList; struct ExprList_item *pItem; if( pOrderBy==0 || pParse->db->mallocFailed || IN_RENAME_OBJECT ) return 0; if( pOrderBy->nExpr>db->aLimit[SQLITE_LIMIT_COLUMN] ){ sqlite3ErrorMsg(pParse, "too many terms in %s BY clause", zType); return 1; } pEList = pSelect->pEList; assert( pEList!=0 ); /* sqlite3SelectNew() guarantees this */ for(i=0, pItem=pOrderBy->a; i<pOrderBy->nExpr; i++, pItem++){ if( pItem->u.x.iOrderByCol ){ if( pItem->u.x.iOrderByCol>pEList->nExpr ){ resolveOutOfRangeError(pParse, zType, i+1, pEList->nExpr); return 1; } resolveAlias(pParse, pEList, pItem->u.x.iOrderByCol-1, pItem->pExpr, zType,0); } } return 0; } #ifndef SQLITE_OMIT_WINDOWFUNC /* ** Walker callback for windowRemoveExprFromSelect(). */ static int resolveRemoveWindowsCb(Walker *pWalker, Expr *pExpr){ UNUSED_PARAMETER(pWalker); if( ExprHasProperty(pExpr, EP_WinFunc) ){ Window *pWin = pExpr->y.pWin; sqlite3WindowUnlinkFromSelect(pWin); } return WRC_Continue; } /* ** Remove any Window objects owned by the expression pExpr from the ** Select.pWin list of Select object pSelect. */ static void windowRemoveExprFromSelect(Select *pSelect, Expr *pExpr){ if( pSelect->pWin ){ Walker sWalker; memset(&sWalker, 0, sizeof(Walker)); sWalker.xExprCallback = resolveRemoveWindowsCb; sWalker.u.pSelect = pSelect; sqlite3WalkExpr(&sWalker, pExpr); } } #else # define windowRemoveExprFromSelect(a, b) #endif /* SQLITE_OMIT_WINDOWFUNC */ /* ** pOrderBy is an ORDER BY or GROUP BY clause in SELECT statement pSelect. ** The Name context of the SELECT statement is pNC. zType is either ** "ORDER" or "GROUP" depending on which type of clause pOrderBy is. ** ** This routine resolves each term of the clause into an expression. ** If the order-by term is an integer I between 1 and N (where N is the ** number of columns in the result set of the SELECT) then the expression ** in the resolution is a copy of the I-th result-set expression. If ** the order-by term is an identifier that corresponds to the AS-name of ** a result-set expression, then the term resolves to a copy of the ** result-set expression. Otherwise, the expression is resolved in ** the usual way - using sqlite3ResolveExprNames(). ** ** This routine returns the number of errors. If errors occur, then ** an appropriate error message might be left in pParse. (OOM errors ** excepted.) */ static int resolveOrderGroupBy( NameContext *pNC, /* The name context of the SELECT statement */ Select *pSelect, /* The SELECT statement holding pOrderBy */ ExprList *pOrderBy, /* An ORDER BY or GROUP BY clause to resolve */ const char *zType /* Either "ORDER" or "GROUP", as appropriate */ ){ int i, j; /* Loop counters */ int iCol; /* Column number */ struct ExprList_item *pItem; /* A term of the ORDER BY clause */ Parse *pParse; /* Parsing context */ int nResult; /* Number of terms in the result set */ if( pOrderBy==0 ) return 0; nResult = pSelect->pEList->nExpr; pParse = pNC->pParse; for(i=0, pItem=pOrderBy->a; i<pOrderBy->nExpr; i++, pItem++){ Expr *pE = pItem->pExpr; Expr *pE2 = sqlite3ExprSkipCollateAndLikely(pE); if( zType[0]!='G' ){ iCol = resolveAsName(pParse, pSelect->pEList, pE2); if( iCol>0 ){ /* If an AS-name match is found, mark this ORDER BY column as being ** a copy of the iCol-th result-set column. The subsequent call to ** sqlite3ResolveOrderGroupBy() will convert the expression to a ** copy of the iCol-th result-set expression. */ pItem->u.x.iOrderByCol = (u16)iCol; continue; } } if( sqlite3ExprIsInteger(pE2, &iCol) ){ /* The ORDER BY term is an integer constant. Again, set the column ** number so that sqlite3ResolveOrderGroupBy() will convert the ** order-by term to a copy of the result-set expression */ if( iCol<1 || iCol>0xffff ){ resolveOutOfRangeError(pParse, zType, i+1, nResult); return 1; } pItem->u.x.iOrderByCol = (u16)iCol; continue; } /* Otherwise, treat the ORDER BY term as an ordinary expression */ pItem->u.x.iOrderByCol = 0; if( sqlite3ResolveExprNames(pNC, pE) ){ return 1; } for(j=0; j<pSelect->pEList->nExpr; j++){ if( sqlite3ExprCompare(0, pE, pSelect->pEList->a[j].pExpr, -1)==0 ){ /* Since this expresion is being changed into a reference ** to an identical expression in the result set, remove all Window ** objects belonging to the expression from the Select.pWin list. */ windowRemoveExprFromSelect(pSelect, pE); pItem->u.x.iOrderByCol = j+1; } } } return sqlite3ResolveOrderGroupBy(pParse, pSelect, pOrderBy, zType); } /* ** Resolve names in the SELECT statement p and all of its descendants. */ static int resolveSelectStep(Walker *pWalker, Select *p){ NameContext *pOuterNC; /* Context that contains this SELECT */ NameContext sNC; /* Name context of this SELECT */ int isCompound; /* True if p is a compound select */ int nCompound; /* Number of compound terms processed so far */ Parse *pParse; /* Parsing context */ int i; /* Loop counter */ ExprList *pGroupBy; /* The GROUP BY clause */ Select *pLeftmost; /* Left-most of SELECT of a compound */ sqlite3 *db; /* Database connection */ assert( p!=0 ); if( p->selFlags & SF_Resolved ){ return WRC_Prune; } pOuterNC = pWalker->u.pNC; pParse = pWalker->pParse; db = pParse->db; /* Normally sqlite3SelectExpand() will be called first and will have ** already expanded this SELECT. However, if this is a subquery within ** an expression, sqlite3ResolveExprNames() will be called without a ** prior call to sqlite3SelectExpand(). When that happens, let ** sqlite3SelectPrep() do all of the processing for this SELECT. ** sqlite3SelectPrep() will invoke both sqlite3SelectExpand() and ** this routine in the correct order. */ if( (p->selFlags & SF_Expanded)==0 ){ sqlite3SelectPrep(pParse, p, pOuterNC); return (pParse->nErr || db->mallocFailed) ? WRC_Abort : WRC_Prune; } isCompound = p->pPrior!=0; nCompound = 0; pLeftmost = p; while( p ){ assert( (p->selFlags & SF_Expanded)!=0 ); assert( (p->selFlags & SF_Resolved)==0 ); p->selFlags |= SF_Resolved; /* Resolve the expressions in the LIMIT and OFFSET clauses. These ** are not allowed to refer to any names, so pass an empty NameContext. */ memset(&sNC, 0, sizeof(sNC)); sNC.pParse = pParse; sNC.pWinSelect = p; if( sqlite3ResolveExprNames(&sNC, p->pLimit) ){ return WRC_Abort; } /* If the SF_Converted flags is set, then this Select object was ** was created by the convertCompoundSelectToSubquery() function. ** In this case the ORDER BY clause (p->pOrderBy) should be resolved ** as if it were part of the sub-query, not the parent. This block ** moves the pOrderBy down to the sub-query. It will be moved back ** after the names have been resolved. */ if( p->selFlags & SF_Converted ){ Select *pSub = p->pSrc->a[0].pSelect; assert( p->pSrc->nSrc==1 && p->pOrderBy ); assert( pSub->pPrior && pSub->pOrderBy==0 ); pSub->pOrderBy = p->pOrderBy; p->pOrderBy = 0; } /* Recursively resolve names in all subqueries */ for(i=0; i<p->pSrc->nSrc; i++){ struct SrcList_item *pItem = &p->pSrc->a[i]; if( pItem->pSelect && (pItem->pSelect->selFlags & SF_Resolved)==0 ){ NameContext *pNC; /* Used to iterate name contexts */ int nRef = 0; /* Refcount for pOuterNC and outer contexts */ const char *zSavedContext = pParse->zAuthContext; /* Count the total number of references to pOuterNC and all of its ** parent contexts. After resolving references to expressions in ** pItem->pSelect, check if this value has changed. If so, then ** SELECT statement pItem->pSelect must be correlated. Set the ** pItem->fg.isCorrelated flag if this is the case. */ for(pNC=pOuterNC; pNC; pNC=pNC->pNext) nRef += pNC->nRef; if( pItem->zName ) pParse->zAuthContext = pItem->zName; sqlite3ResolveSelectNames(pParse, pItem->pSelect, pOuterNC); pParse->zAuthContext = zSavedContext; if( pParse->nErr || db->mallocFailed ) return WRC_Abort; for(pNC=pOuterNC; pNC; pNC=pNC->pNext) nRef -= pNC->nRef; assert( pItem->fg.isCorrelated==0 && nRef<=0 ); pItem->fg.isCorrelated = (nRef!=0); } } /* Set up the local name-context to pass to sqlite3ResolveExprNames() to ** resolve the result-set expression list. */ sNC.ncFlags = NC_AllowAgg|NC_AllowWin; sNC.pSrcList = p->pSrc; sNC.pNext = pOuterNC; /* Resolve names in the result set. */ if( sqlite3ResolveExprListNames(&sNC, p->pEList) ) return WRC_Abort; sNC.ncFlags &= ~NC_AllowWin; /* If there are no aggregate functions in the result-set, and no GROUP BY ** expression, do not allow aggregates in any of the other expressions. */ assert( (p->selFlags & SF_Aggregate)==0 ); pGroupBy = p->pGroupBy; if( pGroupBy || (sNC.ncFlags & NC_HasAgg)!=0 ){ assert( NC_MinMaxAgg==SF_MinMaxAgg ); p->selFlags |= SF_Aggregate | (sNC.ncFlags&NC_MinMaxAgg); }else{ sNC.ncFlags &= ~NC_AllowAgg; } /* If a HAVING clause is present, then there must be a GROUP BY clause. */ if( p->pHaving && !pGroupBy ){ sqlite3ErrorMsg(pParse, "a GROUP BY clause is required before HAVING"); return WRC_Abort; } /* Add the output column list to the name-context before parsing the ** other expressions in the SELECT statement. This is so that ** expressions in the WHERE clause (etc.) can refer to expressions by ** aliases in the result set. ** ** Minor point: If this is the case, then the expression will be ** re-evaluated for each reference to it. */ assert( (sNC.ncFlags & (NC_UAggInfo|NC_UUpsert))==0 ); sNC.uNC.pEList = p->pEList; sNC.ncFlags |= NC_UEList; if( sqlite3ResolveExprNames(&sNC, p->pHaving) ) return WRC_Abort; if( sqlite3ResolveExprNames(&sNC, p->pWhere) ) return WRC_Abort; /* Resolve names in table-valued-function arguments */ for(i=0; i<p->pSrc->nSrc; i++){ struct SrcList_item *pItem = &p->pSrc->a[i]; if( pItem->fg.isTabFunc && sqlite3ResolveExprListNames(&sNC, pItem->u1.pFuncArg) ){ return WRC_Abort; } } /* The ORDER BY and GROUP BY clauses may not refer to terms in ** outer queries */ sNC.pNext = 0; sNC.ncFlags |= NC_AllowAgg|NC_AllowWin; /* If this is a converted compound query, move the ORDER BY clause from ** the sub-query back to the parent query. At this point each term ** within the ORDER BY clause has been transformed to an integer value. ** These integers will be replaced by copies of the corresponding result ** set expressions by the call to resolveOrderGroupBy() below. */ if( p->selFlags & SF_Converted ){ Select *pSub = p->pSrc->a[0].pSelect; p->pOrderBy = pSub->pOrderBy; pSub->pOrderBy = 0; } /* Process the ORDER BY clause for singleton SELECT statements. ** The ORDER BY clause for compounds SELECT statements is handled ** below, after all of the result-sets for all of the elements of ** the compound have been resolved. ** ** If there is an ORDER BY clause on a term of a compound-select other ** than the right-most term, then that is a syntax error. But the error ** is not detected until much later, and so we need to go ahead and ** resolve those symbols on the incorrect ORDER BY for consistency. */ if( isCompound<=nCompound /* Defer right-most ORDER BY of a compound */ && resolveOrderGroupBy(&sNC, p, p->pOrderBy, "ORDER") ){ return WRC_Abort; } if( db->mallocFailed ){ return WRC_Abort; } sNC.ncFlags &= ~NC_AllowWin; /* Resolve the GROUP BY clause. At the same time, make sure ** the GROUP BY clause does not contain aggregate functions. */ if( pGroupBy ){ struct ExprList_item *pItem; if( resolveOrderGroupBy(&sNC, p, pGroupBy, "GROUP") || db->mallocFailed ){ return WRC_Abort; } for(i=0, pItem=pGroupBy->a; i<pGroupBy->nExpr; i++, pItem++){ if( ExprHasProperty(pItem->pExpr, EP_Agg) ){ sqlite3ErrorMsg(pParse, "aggregate functions are not allowed in " "the GROUP BY clause"); return WRC_Abort; } } } #ifndef SQLITE_OMIT_WINDOWFUNC if( IN_RENAME_OBJECT ){ Window *pWin; for(pWin=p->pWinDefn; pWin; pWin=pWin->pNextWin){ if( sqlite3ResolveExprListNames(&sNC, pWin->pOrderBy) || sqlite3ResolveExprListNames(&sNC, pWin->pPartition) ){ return WRC_Abort; } } } #endif /* If this is part of a compound SELECT, check that it has the right ** number of expressions in the select list. */ if( p->pNext && p->pEList->nExpr!=p->pNext->pEList->nExpr ){ sqlite3SelectWrongNumTermsError(pParse, p->pNext); return WRC_Abort; } /* Advance to the next term of the compound */ p = p->pPrior; nCompound++; } /* Resolve the ORDER BY on a compound SELECT after all terms of ** the compound have been resolved. */ if( isCompound && resolveCompoundOrderBy(pParse, pLeftmost) ){ return WRC_Abort; } return WRC_Prune; } /* ** This routine walks an expression tree and resolves references to ** table columns and result-set columns. At the same time, do error ** checking on function usage and set a flag if any aggregate functions ** are seen. ** ** To resolve table columns references we look for nodes (or subtrees) of the ** form X.Y.Z or Y.Z or just Z where ** ** X: The name of a database. Ex: "main" or "temp" or ** the symbolic name assigned to an ATTACH-ed database. ** ** Y: The name of a table in a FROM clause. Or in a trigger ** one of the special names "old" or "new". ** ** Z: The name of a column in table Y. ** ** The node at the root of the subtree is modified as follows: ** ** Expr.op Changed to TK_COLUMN ** Expr.pTab Points to the Table object for X.Y ** Expr.iColumn The column index in X.Y. -1 for the rowid. ** Expr.iTable The VDBE cursor number for X.Y ** ** ** To resolve result-set references, look for expression nodes of the ** form Z (with no X and Y prefix) where the Z matches the right-hand ** size of an AS clause in the result-set of a SELECT. The Z expression ** is replaced by a copy of the left-hand side of the result-set expression. ** Table-name and function resolution occurs on the substituted expression ** tree. For example, in: ** ** SELECT a+b AS x, c+d AS y FROM t1 ORDER BY x; ** ** The "x" term of the order by is replaced by "a+b" to render: ** ** SELECT a+b AS x, c+d AS y FROM t1 ORDER BY a+b; ** ** Function calls are checked to make sure that the function is ** defined and that the correct number of arguments are specified. ** If the function is an aggregate function, then the NC_HasAgg flag is ** set and the opcode is changed from TK_FUNCTION to TK_AGG_FUNCTION. ** If an expression contains aggregate functions then the EP_Agg ** property on the expression is set. ** ** An error message is left in pParse if anything is amiss. The number ** if errors is returned. */ int sqlite3ResolveExprNames( NameContext *pNC, /* Namespace to resolve expressions in. */ Expr *pExpr /* The expression to be analyzed. */ ){ int savedHasAgg; Walker w; if( pExpr==0 ) return SQLITE_OK; savedHasAgg = pNC->ncFlags & (NC_HasAgg|NC_MinMaxAgg|NC_HasWin); pNC->ncFlags &= ~(NC_HasAgg|NC_MinMaxAgg|NC_HasWin); w.pParse = pNC->pParse; w.xExprCallback = resolveExprStep; w.xSelectCallback = resolveSelectStep; w.xSelectCallback2 = 0; w.u.pNC = pNC; #if SQLITE_MAX_EXPR_DEPTH>0 w.pParse->nHeight += pExpr->nHeight; if( sqlite3ExprCheckHeight(w.pParse, w.pParse->nHeight) ){ return SQLITE_ERROR; } #endif sqlite3WalkExpr(&w, pExpr); #if SQLITE_MAX_EXPR_DEPTH>0 w.pParse->nHeight -= pExpr->nHeight; #endif assert( EP_Agg==NC_HasAgg ); assert( EP_Win==NC_HasWin ); testcase( pNC->ncFlags & NC_HasAgg ); testcase( pNC->ncFlags & NC_HasWin ); ExprSetProperty(pExpr, pNC->ncFlags & (NC_HasAgg|NC_HasWin) ); pNC->ncFlags |= savedHasAgg; return pNC->nErr>0 || w.pParse->nErr>0; } /* ** Resolve all names for all expression in an expression list. This is ** just like sqlite3ResolveExprNames() except that it works for an expression ** list rather than a single expression. */ int sqlite3ResolveExprListNames( NameContext *pNC, /* Namespace to resolve expressions in. */ ExprList *pList /* The expression list to be analyzed. */ ){ int i; if( pList ){ for(i=0; i<pList->nExpr; i++){ if( sqlite3ResolveExprNames(pNC, pList->a[i].pExpr) ) return WRC_Abort; } } return WRC_Continue; } /* ** Resolve all names in all expressions of a SELECT and in all ** decendents of the SELECT, including compounds off of p->pPrior, ** subqueries in expressions, and subqueries used as FROM clause ** terms. ** ** See sqlite3ResolveExprNames() for a description of the kinds of ** transformations that occur. ** ** All SELECT statements should have been expanded using ** sqlite3SelectExpand() prior to invoking this routine. */ void sqlite3ResolveSelectNames( Parse *pParse, /* The parser context */ Select *p, /* The SELECT statement being coded. */ NameContext *pOuterNC /* Name context for parent SELECT statement */ ){ Walker w; assert( p!=0 ); w.xExprCallback = resolveExprStep; w.xSelectCallback = resolveSelectStep; w.xSelectCallback2 = 0; w.pParse = pParse; w.u.pNC = pOuterNC; sqlite3WalkSelect(&w, p); } /* ** Resolve names in expressions that can only reference a single table ** or which cannot reference any tables at all. Examples: ** ** "type" flag ** ------------ ** (1) CHECK constraints NC_IsCheck ** (2) WHERE clauses on partial indices NC_PartIdx ** (3) Expressions in indexes on expressions NC_IdxExpr ** (4) Expression arguments to VACUUM INTO. 0 ** (5) GENERATED ALWAYS as expressions NC_GenCol ** ** In all cases except (4), the Expr.iTable value for Expr.op==TK_COLUMN ** nodes of the expression is set to -1 and the Expr.iColumn value is ** set to the column number. In case (4), TK_COLUMN nodes cause an error. ** ** Any errors cause an error message to be set in pParse. */ int sqlite3ResolveSelfReference( Parse *pParse, /* Parsing context */ Table *pTab, /* The table being referenced, or NULL */ int type, /* NC_IsCheck, NC_PartIdx, NC_IdxExpr, NC_GenCol, or 0 */ Expr *pExpr, /* Expression to resolve. May be NULL. */ ExprList *pList /* Expression list to resolve. May be NULL. */ ){ SrcList sSrc; /* Fake SrcList for pParse->pNewTable */ NameContext sNC; /* Name context for pParse->pNewTable */ int rc; assert( type==0 || pTab!=0 ); assert( type==NC_IsCheck || type==NC_PartIdx || type==NC_IdxExpr || type==NC_GenCol || pTab==0 ); memset(&sNC, 0, sizeof(sNC)); memset(&sSrc, 0, sizeof(sSrc)); if( pTab ){ sSrc.nSrc = 1; sSrc.a[0].zName = pTab->zName; sSrc.a[0].pTab = pTab; sSrc.a[0].iCursor = -1; } sNC.pParse = pParse; sNC.pSrcList = &sSrc; sNC.ncFlags = type | NC_IsDDL; if( (rc = sqlite3ResolveExprNames(&sNC, pExpr))!=SQLITE_OK ) return rc; if( pList ) rc = sqlite3ResolveExprListNames(&sNC, pList); return rc; }
./CrossVul/dataset_final_sorted/CWE-20/c/bad_1284_2
crossvul-cpp_data_good_3509_0
/* * fs/cifs/connect.c * * Copyright (C) International Business Machines Corp., 2002,2009 * Author(s): Steve French (sfrench@us.ibm.com) * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/fs.h> #include <linux/net.h> #include <linux/string.h> #include <linux/list.h> #include <linux/wait.h> #include <linux/slab.h> #include <linux/pagemap.h> #include <linux/ctype.h> #include <linux/utsname.h> #include <linux/mempool.h> #include <linux/delay.h> #include <linux/completion.h> #include <linux/kthread.h> #include <linux/pagevec.h> #include <linux/freezer.h> #include <linux/namei.h> #include <asm/uaccess.h> #include <asm/processor.h> #include <linux/inet.h> #include <net/ipv6.h> #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "cifs_unicode.h" #include "cifs_debug.h" #include "cifs_fs_sb.h" #include "ntlmssp.h" #include "nterr.h" #include "rfc1002pdu.h" #include "fscache.h" #define CIFS_PORT 445 #define RFC1001_PORT 139 /* SMB echo "timeout" -- FIXME: tunable? */ #define SMB_ECHO_INTERVAL (60 * HZ) extern mempool_t *cifs_req_poolp; struct smb_vol { char *username; char *password; char *domainname; char *UNC; char *UNCip; char *iocharset; /* local code page for mapping to and from Unicode */ char source_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* clnt nb name */ char target_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* srvr nb name */ uid_t cred_uid; uid_t linux_uid; gid_t linux_gid; mode_t file_mode; mode_t dir_mode; unsigned secFlg; bool retry:1; bool intr:1; bool setuids:1; bool override_uid:1; bool override_gid:1; bool dynperm:1; bool noperm:1; bool no_psx_acl:1; /* set if posix acl support should be disabled */ bool cifs_acl:1; bool no_xattr:1; /* set if xattr (EA) support should be disabled*/ bool server_ino:1; /* use inode numbers from server ie UniqueId */ bool direct_io:1; bool strict_io:1; /* strict cache behavior */ bool remap:1; /* set to remap seven reserved chars in filenames */ bool posix_paths:1; /* unset to not ask for posix pathnames. */ bool no_linux_ext:1; bool sfu_emul:1; bool nullauth:1; /* attempt to authenticate with null user */ bool nocase:1; /* request case insensitive filenames */ bool nobrl:1; /* disable sending byte range locks to srv */ bool mand_lock:1; /* send mandatory not posix byte range lock reqs */ bool seal:1; /* request transport encryption on share */ bool nodfs:1; /* Do not request DFS, even if available */ bool local_lease:1; /* check leases only on local system, not remote */ bool noblocksnd:1; bool noautotune:1; bool nostrictsync:1; /* do not force expensive SMBflush on every sync */ bool fsc:1; /* enable fscache */ bool mfsymlinks:1; /* use Minshall+French Symlinks */ bool multiuser:1; unsigned int rsize; unsigned int wsize; bool sockopt_tcp_nodelay:1; unsigned short int port; unsigned long actimeo; /* attribute cache timeout (jiffies) */ char *prepath; struct sockaddr_storage srcaddr; /* allow binding to a local IP */ struct nls_table *local_nls; }; /* FIXME: should these be tunable? */ #define TLINK_ERROR_EXPIRE (1 * HZ) #define TLINK_IDLE_EXPIRE (600 * HZ) static int ip_connect(struct TCP_Server_Info *server); static int generic_ip_connect(struct TCP_Server_Info *server); static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink); static void cifs_prune_tlinks(struct work_struct *work); /* * cifs tcp session reconnection * * mark tcp session as reconnecting so temporarily locked * mark all smb sessions as reconnecting for tcp session * reconnect tcp session * wake up waiters on reconnection? - (not needed currently) */ static int cifs_reconnect(struct TCP_Server_Info *server) { int rc = 0; struct list_head *tmp, *tmp2; struct cifsSesInfo *ses; struct cifsTconInfo *tcon; struct mid_q_entry *mid_entry; spin_lock(&GlobalMid_Lock); if (server->tcpStatus == CifsExiting) { /* the demux thread will exit normally next time through the loop */ spin_unlock(&GlobalMid_Lock); return rc; } else server->tcpStatus = CifsNeedReconnect; spin_unlock(&GlobalMid_Lock); server->maxBuf = 0; cFYI(1, "Reconnecting tcp session"); /* before reconnecting the tcp session, mark the smb session (uid) and the tid bad so they are not used until reconnected */ cFYI(1, "%s: marking sessions and tcons for reconnect", __func__); spin_lock(&cifs_tcp_ses_lock); list_for_each(tmp, &server->smb_ses_list) { ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list); ses->need_reconnect = true; ses->ipc_tid = 0; list_for_each(tmp2, &ses->tcon_list) { tcon = list_entry(tmp2, struct cifsTconInfo, tcon_list); tcon->need_reconnect = true; } } spin_unlock(&cifs_tcp_ses_lock); /* do not want to be sending data on a socket we are freeing */ cFYI(1, "%s: tearing down socket", __func__); mutex_lock(&server->srv_mutex); if (server->ssocket) { cFYI(1, "State: 0x%x Flags: 0x%lx", server->ssocket->state, server->ssocket->flags); kernel_sock_shutdown(server->ssocket, SHUT_WR); cFYI(1, "Post shutdown state: 0x%x Flags: 0x%lx", server->ssocket->state, server->ssocket->flags); sock_release(server->ssocket); server->ssocket = NULL; } server->sequence_number = 0; server->session_estab = false; kfree(server->session_key.response); server->session_key.response = NULL; server->session_key.len = 0; server->lstrp = jiffies; mutex_unlock(&server->srv_mutex); /* mark submitted MIDs for retry and issue callback */ cFYI(1, "%s: issuing mid callbacks", __func__); spin_lock(&GlobalMid_Lock); list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { mid_entry = list_entry(tmp, struct mid_q_entry, qhead); if (mid_entry->midState == MID_REQUEST_SUBMITTED) mid_entry->midState = MID_RETRY_NEEDED; list_del_init(&mid_entry->qhead); mid_entry->callback(mid_entry); } spin_unlock(&GlobalMid_Lock); while ((server->tcpStatus != CifsExiting) && (server->tcpStatus != CifsGood)) { try_to_freeze(); /* we should try only the port we connected to before */ rc = generic_ip_connect(server); if (rc) { cFYI(1, "reconnect error %d", rc); msleep(3000); } else { atomic_inc(&tcpSesReconnectCount); spin_lock(&GlobalMid_Lock); if (server->tcpStatus != CifsExiting) server->tcpStatus = CifsGood; spin_unlock(&GlobalMid_Lock); } } return rc; } /* return codes: 0 not a transact2, or all data present >0 transact2 with that much data missing -EINVAL = invalid transact2 */ static int check2ndT2(struct smb_hdr *pSMB, unsigned int maxBufSize) { struct smb_t2_rsp *pSMBt; int remaining; __u16 total_data_size, data_in_this_rsp; if (pSMB->Command != SMB_COM_TRANSACTION2) return 0; /* check for plausible wct, bcc and t2 data and parm sizes */ /* check for parm and data offset going beyond end of smb */ if (pSMB->WordCount != 10) { /* coalesce_t2 depends on this */ cFYI(1, "invalid transact2 word count"); return -EINVAL; } pSMBt = (struct smb_t2_rsp *)pSMB; total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount); data_in_this_rsp = get_unaligned_le16(&pSMBt->t2_rsp.DataCount); remaining = total_data_size - data_in_this_rsp; if (remaining == 0) return 0; else if (remaining < 0) { cFYI(1, "total data %d smaller than data in frame %d", total_data_size, data_in_this_rsp); return -EINVAL; } else { cFYI(1, "missing %d bytes from transact2, check next response", remaining); if (total_data_size > maxBufSize) { cERROR(1, "TotalDataSize %d is over maximum buffer %d", total_data_size, maxBufSize); return -EINVAL; } return remaining; } } static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB) { struct smb_t2_rsp *pSMB2 = (struct smb_t2_rsp *)psecond; struct smb_t2_rsp *pSMBt = (struct smb_t2_rsp *)pTargetSMB; char *data_area_of_target; char *data_area_of_buf2; int remaining; __u16 byte_count, total_data_size, total_in_buf, total_in_buf2; total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount); if (total_data_size != get_unaligned_le16(&pSMB2->t2_rsp.TotalDataCount)) cFYI(1, "total data size of primary and secondary t2 differ"); total_in_buf = get_unaligned_le16(&pSMBt->t2_rsp.DataCount); remaining = total_data_size - total_in_buf; if (remaining < 0) return -EINVAL; if (remaining == 0) /* nothing to do, ignore */ return 0; total_in_buf2 = get_unaligned_le16(&pSMB2->t2_rsp.DataCount); if (remaining < total_in_buf2) { cFYI(1, "transact2 2nd response contains too much data"); } /* find end of first SMB data area */ data_area_of_target = (char *)&pSMBt->hdr.Protocol + get_unaligned_le16(&pSMBt->t2_rsp.DataOffset); /* validate target area */ data_area_of_buf2 = (char *)&pSMB2->hdr.Protocol + get_unaligned_le16(&pSMB2->t2_rsp.DataOffset); data_area_of_target += total_in_buf; /* copy second buffer into end of first buffer */ memcpy(data_area_of_target, data_area_of_buf2, total_in_buf2); total_in_buf += total_in_buf2; put_unaligned_le16(total_in_buf, &pSMBt->t2_rsp.DataCount); byte_count = get_bcc_le(pTargetSMB); byte_count += total_in_buf2; put_bcc_le(byte_count, pTargetSMB); byte_count = pTargetSMB->smb_buf_length; byte_count += total_in_buf2; /* BB also add check that we are not beyond maximum buffer size */ pTargetSMB->smb_buf_length = byte_count; if (remaining == total_in_buf2) { cFYI(1, "found the last secondary response"); return 0; /* we are done */ } else /* more responses to go */ return 1; } static void cifs_echo_request(struct work_struct *work) { int rc; struct TCP_Server_Info *server = container_of(work, struct TCP_Server_Info, echo.work); /* * We cannot send an echo until the NEGOTIATE_PROTOCOL request is * done, which is indicated by maxBuf != 0. Also, no need to ping if * we got a response recently */ if (server->maxBuf == 0 || time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ)) goto requeue_echo; rc = CIFSSMBEcho(server); if (rc) cFYI(1, "Unable to send echo request to server: %s", server->hostname); requeue_echo: queue_delayed_work(system_nrt_wq, &server->echo, SMB_ECHO_INTERVAL); } static int cifs_demultiplex_thread(struct TCP_Server_Info *server) { int length; unsigned int pdu_length, total_read; struct smb_hdr *smb_buffer = NULL; struct smb_hdr *bigbuf = NULL; struct smb_hdr *smallbuf = NULL; struct msghdr smb_msg; struct kvec iov; struct socket *csocket = server->ssocket; struct list_head *tmp, *tmp2; struct task_struct *task_to_wake = NULL; struct mid_q_entry *mid_entry; char temp; bool isLargeBuf = false; bool isMultiRsp; int reconnect; current->flags |= PF_MEMALLOC; cFYI(1, "Demultiplex PID: %d", task_pid_nr(current)); length = atomic_inc_return(&tcpSesAllocCount); if (length > 1) mempool_resize(cifs_req_poolp, length + cifs_min_rcv, GFP_KERNEL); set_freezable(); while (server->tcpStatus != CifsExiting) { if (try_to_freeze()) continue; if (bigbuf == NULL) { bigbuf = cifs_buf_get(); if (!bigbuf) { cERROR(1, "No memory for large SMB response"); msleep(3000); /* retry will check if exiting */ continue; } } else if (isLargeBuf) { /* we are reusing a dirty large buf, clear its start */ memset(bigbuf, 0, sizeof(struct smb_hdr)); } if (smallbuf == NULL) { smallbuf = cifs_small_buf_get(); if (!smallbuf) { cERROR(1, "No memory for SMB response"); msleep(1000); /* retry will check if exiting */ continue; } /* beginning of smb buffer is cleared in our buf_get */ } else /* if existing small buf clear beginning */ memset(smallbuf, 0, sizeof(struct smb_hdr)); isLargeBuf = false; isMultiRsp = false; smb_buffer = smallbuf; iov.iov_base = smb_buffer; iov.iov_len = 4; smb_msg.msg_control = NULL; smb_msg.msg_controllen = 0; pdu_length = 4; /* enough to get RFC1001 header */ incomplete_rcv: if (echo_retries > 0 && time_after(jiffies, server->lstrp + (echo_retries * SMB_ECHO_INTERVAL))) { cERROR(1, "Server %s has not responded in %d seconds. " "Reconnecting...", server->hostname, (echo_retries * SMB_ECHO_INTERVAL / HZ)); cifs_reconnect(server); csocket = server->ssocket; wake_up(&server->response_q); continue; } length = kernel_recvmsg(csocket, &smb_msg, &iov, 1, pdu_length, 0 /* BB other flags? */); if (server->tcpStatus == CifsExiting) { break; } else if (server->tcpStatus == CifsNeedReconnect) { cFYI(1, "Reconnect after server stopped responding"); cifs_reconnect(server); cFYI(1, "call to reconnect done"); csocket = server->ssocket; continue; } else if (length == -ERESTARTSYS || length == -EAGAIN || length == -EINTR) { msleep(1); /* minimum sleep to prevent looping allowing socket to clear and app threads to set tcpStatus CifsNeedReconnect if server hung */ if (pdu_length < 4) { iov.iov_base = (4 - pdu_length) + (char *)smb_buffer; iov.iov_len = pdu_length; smb_msg.msg_control = NULL; smb_msg.msg_controllen = 0; goto incomplete_rcv; } else continue; } else if (length <= 0) { cFYI(1, "Reconnect after unexpected peek error %d", length); cifs_reconnect(server); csocket = server->ssocket; wake_up(&server->response_q); continue; } else if (length < pdu_length) { cFYI(1, "requested %d bytes but only got %d bytes", pdu_length, length); pdu_length -= length; msleep(1); goto incomplete_rcv; } /* The right amount was read from socket - 4 bytes */ /* so we can now interpret the length field */ /* the first byte big endian of the length field, is actually not part of the length but the type with the most common, zero, as regular data */ temp = *((char *) smb_buffer); /* Note that FC 1001 length is big endian on the wire, but we convert it here so it is always manipulated as host byte order */ pdu_length = be32_to_cpu((__force __be32)smb_buffer->smb_buf_length); smb_buffer->smb_buf_length = pdu_length; cFYI(1, "rfc1002 length 0x%x", pdu_length+4); if (temp == (char) RFC1002_SESSION_KEEP_ALIVE) { continue; } else if (temp == (char)RFC1002_POSITIVE_SESSION_RESPONSE) { cFYI(1, "Good RFC 1002 session rsp"); continue; } else if (temp == (char)RFC1002_NEGATIVE_SESSION_RESPONSE) { /* we get this from Windows 98 instead of an error on SMB negprot response */ cFYI(1, "Negative RFC1002 Session Response Error 0x%x)", pdu_length); /* give server a second to clean up */ msleep(1000); /* always try 445 first on reconnect since we get NACK * on some if we ever connected to port 139 (the NACK * is since we do not begin with RFC1001 session * initialize frame) */ cifs_set_port((struct sockaddr *) &server->dstaddr, CIFS_PORT); cifs_reconnect(server); csocket = server->ssocket; wake_up(&server->response_q); continue; } else if (temp != (char) 0) { cERROR(1, "Unknown RFC 1002 frame"); cifs_dump_mem(" Received Data: ", (char *)smb_buffer, length); cifs_reconnect(server); csocket = server->ssocket; continue; } /* else we have an SMB response */ if ((pdu_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) || (pdu_length < sizeof(struct smb_hdr) - 1 - 4)) { cERROR(1, "Invalid size SMB length %d pdu_length %d", length, pdu_length+4); cifs_reconnect(server); csocket = server->ssocket; wake_up(&server->response_q); continue; } /* else length ok */ reconnect = 0; if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) { isLargeBuf = true; memcpy(bigbuf, smallbuf, 4); smb_buffer = bigbuf; } length = 0; iov.iov_base = 4 + (char *)smb_buffer; iov.iov_len = pdu_length; for (total_read = 0; total_read < pdu_length; total_read += length) { length = kernel_recvmsg(csocket, &smb_msg, &iov, 1, pdu_length - total_read, 0); if (server->tcpStatus == CifsExiting) { /* then will exit */ reconnect = 2; break; } else if (server->tcpStatus == CifsNeedReconnect) { cifs_reconnect(server); csocket = server->ssocket; /* Reconnect wakes up rspns q */ /* Now we will reread sock */ reconnect = 1; break; } else if (length == -ERESTARTSYS || length == -EAGAIN || length == -EINTR) { msleep(1); /* minimum sleep to prevent looping, allowing socket to clear and app threads to set tcpStatus CifsNeedReconnect if server hung*/ length = 0; continue; } else if (length <= 0) { cERROR(1, "Received no data, expecting %d", pdu_length - total_read); cifs_reconnect(server); csocket = server->ssocket; reconnect = 1; break; } } if (reconnect == 2) break; else if (reconnect == 1) continue; total_read += 4; /* account for rfc1002 hdr */ dump_smb(smb_buffer, total_read); /* * We know that we received enough to get to the MID as we * checked the pdu_length earlier. Now check to see * if the rest of the header is OK. We borrow the length * var for the rest of the loop to avoid a new stack var. * * 48 bytes is enough to display the header and a little bit * into the payload for debugging purposes. */ length = checkSMB(smb_buffer, smb_buffer->Mid, total_read); if (length != 0) cifs_dump_mem("Bad SMB: ", smb_buffer, min_t(unsigned int, total_read, 48)); mid_entry = NULL; server->lstrp = jiffies; spin_lock(&GlobalMid_Lock); list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { mid_entry = list_entry(tmp, struct mid_q_entry, qhead); if ((mid_entry->mid == smb_buffer->Mid) && (mid_entry->midState == MID_REQUEST_SUBMITTED) && (mid_entry->command == smb_buffer->Command)) { if (length == 0 && check2ndT2(smb_buffer, server->maxBuf) > 0) { /* We have a multipart transact2 resp */ isMultiRsp = true; if (mid_entry->resp_buf) { /* merge response - fix up 1st*/ if (coalesce_t2(smb_buffer, mid_entry->resp_buf)) { mid_entry->multiRsp = true; break; } else { /* all parts received */ mid_entry->multiEnd = true; goto multi_t2_fnd; } } else { if (!isLargeBuf) { cERROR(1, "1st trans2 resp needs bigbuf"); /* BB maybe we can fix this up, switch to already allocated large buffer? */ } else { /* Have first buffer */ mid_entry->resp_buf = smb_buffer; mid_entry->largeBuf = true; bigbuf = NULL; } } break; } mid_entry->resp_buf = smb_buffer; mid_entry->largeBuf = isLargeBuf; multi_t2_fnd: if (length == 0) mid_entry->midState = MID_RESPONSE_RECEIVED; else mid_entry->midState = MID_RESPONSE_MALFORMED; #ifdef CONFIG_CIFS_STATS2 mid_entry->when_received = jiffies; #endif list_del_init(&mid_entry->qhead); mid_entry->callback(mid_entry); break; } mid_entry = NULL; } spin_unlock(&GlobalMid_Lock); if (mid_entry != NULL) { /* Was previous buf put in mpx struct for multi-rsp? */ if (!isMultiRsp) { /* smb buffer will be freed by user thread */ if (isLargeBuf) bigbuf = NULL; else smallbuf = NULL; } } else if (length != 0) { /* response sanity checks failed */ continue; } else if (!is_valid_oplock_break(smb_buffer, server) && !isMultiRsp) { cERROR(1, "No task to wake, unknown frame received! " "NumMids %d", atomic_read(&midCount)); cifs_dump_mem("Received Data is: ", (char *)smb_buffer, sizeof(struct smb_hdr)); #ifdef CONFIG_CIFS_DEBUG2 cifs_dump_detail(smb_buffer); cifs_dump_mids(server); #endif /* CIFS_DEBUG2 */ } } /* end while !EXITING */ /* take it off the list, if it's not already */ spin_lock(&cifs_tcp_ses_lock); list_del_init(&server->tcp_ses_list); spin_unlock(&cifs_tcp_ses_lock); spin_lock(&GlobalMid_Lock); server->tcpStatus = CifsExiting; spin_unlock(&GlobalMid_Lock); wake_up_all(&server->response_q); /* check if we have blocked requests that need to free */ /* Note that cifs_max_pending is normally 50, but can be set at module install time to as little as two */ spin_lock(&GlobalMid_Lock); if (atomic_read(&server->inFlight) >= cifs_max_pending) atomic_set(&server->inFlight, cifs_max_pending - 1); /* We do not want to set the max_pending too low or we could end up with the counter going negative */ spin_unlock(&GlobalMid_Lock); /* Although there should not be any requests blocked on this queue it can not hurt to be paranoid and try to wake up requests that may haven been blocked when more than 50 at time were on the wire to the same server - they now will see the session is in exit state and get out of SendReceive. */ wake_up_all(&server->request_q); /* give those requests time to exit */ msleep(125); if (server->ssocket) { sock_release(csocket); server->ssocket = NULL; } /* buffer usuallly freed in free_mid - need to free it here on exit */ cifs_buf_release(bigbuf); if (smallbuf) /* no sense logging a debug message if NULL */ cifs_small_buf_release(smallbuf); if (!list_empty(&server->pending_mid_q)) { spin_lock(&GlobalMid_Lock); list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { mid_entry = list_entry(tmp, struct mid_q_entry, qhead); cFYI(1, "Clearing Mid 0x%x - issuing callback", mid_entry->mid); list_del_init(&mid_entry->qhead); mid_entry->callback(mid_entry); } spin_unlock(&GlobalMid_Lock); /* 1/8th of sec is more than enough time for them to exit */ msleep(125); } if (!list_empty(&server->pending_mid_q)) { /* mpx threads have not exited yet give them at least the smb send timeout time for long ops */ /* due to delays on oplock break requests, we need to wait at least 45 seconds before giving up on a request getting a response and going ahead and killing cifsd */ cFYI(1, "Wait for exit from demultiplex thread"); msleep(46000); /* if threads still have not exited they are probably never coming home not much else we can do but free the memory */ } kfree(server->hostname); task_to_wake = xchg(&server->tsk, NULL); kfree(server); length = atomic_dec_return(&tcpSesAllocCount); if (length > 0) mempool_resize(cifs_req_poolp, length + cifs_min_rcv, GFP_KERNEL); /* if server->tsk was NULL then wait for a signal before exiting */ if (!task_to_wake) { set_current_state(TASK_INTERRUPTIBLE); while (!signal_pending(current)) { schedule(); set_current_state(TASK_INTERRUPTIBLE); } set_current_state(TASK_RUNNING); } module_put_and_exit(0); } /* extract the host portion of the UNC string */ static char * extract_hostname(const char *unc) { const char *src; char *dst, *delim; unsigned int len; /* skip double chars at beginning of string */ /* BB: check validity of these bytes? */ src = unc + 2; /* delimiter between hostname and sharename is always '\\' now */ delim = strchr(src, '\\'); if (!delim) return ERR_PTR(-EINVAL); len = delim - src; dst = kmalloc((len + 1), GFP_KERNEL); if (dst == NULL) return ERR_PTR(-ENOMEM); memcpy(dst, src, len); dst[len] = '\0'; return dst; } static int cifs_parse_mount_options(char *options, const char *devname, struct smb_vol *vol) { char *value; char *data; unsigned int temp_len, i, j; char separator[2]; short int override_uid = -1; short int override_gid = -1; bool uid_specified = false; bool gid_specified = false; char *nodename = utsname()->nodename; separator[0] = ','; separator[1] = 0; /* * does not have to be perfect mapping since field is * informational, only used for servers that do not support * port 445 and it can be overridden at mount time */ memset(vol->source_rfc1001_name, 0x20, RFC1001_NAME_LEN); for (i = 0; i < strnlen(nodename, RFC1001_NAME_LEN); i++) vol->source_rfc1001_name[i] = toupper(nodename[i]); vol->source_rfc1001_name[RFC1001_NAME_LEN] = 0; /* null target name indicates to use *SMBSERVR default called name if we end up sending RFC1001 session initialize */ vol->target_rfc1001_name[0] = 0; vol->cred_uid = current_uid(); vol->linux_uid = current_uid(); vol->linux_gid = current_gid(); /* default to only allowing write access to owner of the mount */ vol->dir_mode = vol->file_mode = S_IRUGO | S_IXUGO | S_IWUSR; /* vol->retry default is 0 (i.e. "soft" limited retry not hard retry) */ /* default is always to request posix paths. */ vol->posix_paths = 1; /* default to using server inode numbers where available */ vol->server_ino = 1; vol->actimeo = CIFS_DEF_ACTIMEO; if (!options) return 1; if (strncmp(options, "sep=", 4) == 0) { if (options[4] != 0) { separator[0] = options[4]; options += 5; } else { cFYI(1, "Null separator not allowed"); } } while ((data = strsep(&options, separator)) != NULL) { if (!*data) continue; if ((value = strchr(data, '=')) != NULL) *value++ = '\0'; /* Have to parse this before we parse for "user" */ if (strnicmp(data, "user_xattr", 10) == 0) { vol->no_xattr = 0; } else if (strnicmp(data, "nouser_xattr", 12) == 0) { vol->no_xattr = 1; } else if (strnicmp(data, "user", 4) == 0) { if (!value) { printk(KERN_WARNING "CIFS: invalid or missing username\n"); return 1; /* needs_arg; */ } else if (!*value) { /* null user, ie anonymous, authentication */ vol->nullauth = 1; } if (strnlen(value, MAX_USERNAME_SIZE) < MAX_USERNAME_SIZE) { vol->username = value; } else { printk(KERN_WARNING "CIFS: username too long\n"); return 1; } } else if (strnicmp(data, "pass", 4) == 0) { if (!value) { vol->password = NULL; continue; } else if (value[0] == 0) { /* check if string begins with double comma since that would mean the password really does start with a comma, and would not indicate an empty string */ if (value[1] != separator[0]) { vol->password = NULL; continue; } } temp_len = strlen(value); /* removed password length check, NTLM passwords can be arbitrarily long */ /* if comma in password, the string will be prematurely null terminated. Commas in password are specified across the cifs mount interface by a double comma ie ,, and a comma used as in other cases ie ',' as a parameter delimiter/separator is single and due to the strsep above is temporarily zeroed. */ /* NB: password legally can have multiple commas and the only illegal character in a password is null */ if ((value[temp_len] == 0) && (value[temp_len+1] == separator[0])) { /* reinsert comma */ value[temp_len] = separator[0]; temp_len += 2; /* move after second comma */ while (value[temp_len] != 0) { if (value[temp_len] == separator[0]) { if (value[temp_len+1] == separator[0]) { /* skip second comma */ temp_len++; } else { /* single comma indicating start of next parm */ break; } } temp_len++; } if (value[temp_len] == 0) { options = NULL; } else { value[temp_len] = 0; /* point option to start of next parm */ options = value + temp_len + 1; } /* go from value to value + temp_len condensing double commas to singles. Note that this ends up allocating a few bytes too many, which is ok */ vol->password = kzalloc(temp_len, GFP_KERNEL); if (vol->password == NULL) { printk(KERN_WARNING "CIFS: no memory " "for password\n"); return 1; } for (i = 0, j = 0; i < temp_len; i++, j++) { vol->password[j] = value[i]; if (value[i] == separator[0] && value[i+1] == separator[0]) { /* skip second comma */ i++; } } vol->password[j] = 0; } else { vol->password = kzalloc(temp_len+1, GFP_KERNEL); if (vol->password == NULL) { printk(KERN_WARNING "CIFS: no memory " "for password\n"); return 1; } strcpy(vol->password, value); } } else if (!strnicmp(data, "ip", 2) || !strnicmp(data, "addr", 4)) { if (!value || !*value) { vol->UNCip = NULL; } else if (strnlen(value, INET6_ADDRSTRLEN) < INET6_ADDRSTRLEN) { vol->UNCip = value; } else { printk(KERN_WARNING "CIFS: ip address " "too long\n"); return 1; } } else if (strnicmp(data, "sec", 3) == 0) { if (!value || !*value) { cERROR(1, "no security value specified"); continue; } else if (strnicmp(value, "krb5i", 5) == 0) { vol->secFlg |= CIFSSEC_MAY_KRB5 | CIFSSEC_MUST_SIGN; } else if (strnicmp(value, "krb5p", 5) == 0) { /* vol->secFlg |= CIFSSEC_MUST_SEAL | CIFSSEC_MAY_KRB5; */ cERROR(1, "Krb5 cifs privacy not supported"); return 1; } else if (strnicmp(value, "krb5", 4) == 0) { vol->secFlg |= CIFSSEC_MAY_KRB5; } else if (strnicmp(value, "ntlmsspi", 8) == 0) { vol->secFlg |= CIFSSEC_MAY_NTLMSSP | CIFSSEC_MUST_SIGN; } else if (strnicmp(value, "ntlmssp", 7) == 0) { vol->secFlg |= CIFSSEC_MAY_NTLMSSP; } else if (strnicmp(value, "ntlmv2i", 7) == 0) { vol->secFlg |= CIFSSEC_MAY_NTLMV2 | CIFSSEC_MUST_SIGN; } else if (strnicmp(value, "ntlmv2", 6) == 0) { vol->secFlg |= CIFSSEC_MAY_NTLMV2; } else if (strnicmp(value, "ntlmi", 5) == 0) { vol->secFlg |= CIFSSEC_MAY_NTLM | CIFSSEC_MUST_SIGN; } else if (strnicmp(value, "ntlm", 4) == 0) { /* ntlm is default so can be turned off too */ vol->secFlg |= CIFSSEC_MAY_NTLM; } else if (strnicmp(value, "nontlm", 6) == 0) { /* BB is there a better way to do this? */ vol->secFlg |= CIFSSEC_MAY_NTLMV2; #ifdef CONFIG_CIFS_WEAK_PW_HASH } else if (strnicmp(value, "lanman", 6) == 0) { vol->secFlg |= CIFSSEC_MAY_LANMAN; #endif } else if (strnicmp(value, "none", 4) == 0) { vol->nullauth = 1; } else { cERROR(1, "bad security option: %s", value); return 1; } } else if ((strnicmp(data, "unc", 3) == 0) || (strnicmp(data, "target", 6) == 0) || (strnicmp(data, "path", 4) == 0)) { if (!value || !*value) { printk(KERN_WARNING "CIFS: invalid path to " "network resource\n"); return 1; /* needs_arg; */ } if ((temp_len = strnlen(value, 300)) < 300) { vol->UNC = kmalloc(temp_len+1, GFP_KERNEL); if (vol->UNC == NULL) return 1; strcpy(vol->UNC, value); if (strncmp(vol->UNC, "//", 2) == 0) { vol->UNC[0] = '\\'; vol->UNC[1] = '\\'; } else if (strncmp(vol->UNC, "\\\\", 2) != 0) { printk(KERN_WARNING "CIFS: UNC Path does not begin " "with // or \\\\ \n"); return 1; } } else { printk(KERN_WARNING "CIFS: UNC name too long\n"); return 1; } } else if ((strnicmp(data, "domain", 3) == 0) || (strnicmp(data, "workgroup", 5) == 0)) { if (!value || !*value) { printk(KERN_WARNING "CIFS: invalid domain name\n"); return 1; /* needs_arg; */ } /* BB are there cases in which a comma can be valid in a domain name and need special handling? */ if (strnlen(value, 256) < 256) { vol->domainname = value; cFYI(1, "Domain name set"); } else { printk(KERN_WARNING "CIFS: domain name too " "long\n"); return 1; } } else if (strnicmp(data, "srcaddr", 7) == 0) { vol->srcaddr.ss_family = AF_UNSPEC; if (!value || !*value) { printk(KERN_WARNING "CIFS: srcaddr value" " not specified.\n"); return 1; /* needs_arg; */ } i = cifs_convert_address((struct sockaddr *)&vol->srcaddr, value, strlen(value)); if (i == 0) { printk(KERN_WARNING "CIFS: Could not parse" " srcaddr: %s\n", value); return 1; } } else if (strnicmp(data, "prefixpath", 10) == 0) { if (!value || !*value) { printk(KERN_WARNING "CIFS: invalid path prefix\n"); return 1; /* needs_argument */ } if ((temp_len = strnlen(value, 1024)) < 1024) { if (value[0] != '/') temp_len++; /* missing leading slash */ vol->prepath = kmalloc(temp_len+1, GFP_KERNEL); if (vol->prepath == NULL) return 1; if (value[0] != '/') { vol->prepath[0] = '/'; strcpy(vol->prepath+1, value); } else strcpy(vol->prepath, value); cFYI(1, "prefix path %s", vol->prepath); } else { printk(KERN_WARNING "CIFS: prefix too long\n"); return 1; } } else if (strnicmp(data, "iocharset", 9) == 0) { if (!value || !*value) { printk(KERN_WARNING "CIFS: invalid iocharset " "specified\n"); return 1; /* needs_arg; */ } if (strnlen(value, 65) < 65) { if (strnicmp(value, "default", 7)) vol->iocharset = value; /* if iocharset not set then load_nls_default is used by caller */ cFYI(1, "iocharset set to %s", value); } else { printk(KERN_WARNING "CIFS: iocharset name " "too long.\n"); return 1; } } else if (!strnicmp(data, "uid", 3) && value && *value) { vol->linux_uid = simple_strtoul(value, &value, 0); uid_specified = true; } else if (!strnicmp(data, "cruid", 5) && value && *value) { vol->cred_uid = simple_strtoul(value, &value, 0); } else if (!strnicmp(data, "forceuid", 8)) { override_uid = 1; } else if (!strnicmp(data, "noforceuid", 10)) { override_uid = 0; } else if (!strnicmp(data, "gid", 3) && value && *value) { vol->linux_gid = simple_strtoul(value, &value, 0); gid_specified = true; } else if (!strnicmp(data, "forcegid", 8)) { override_gid = 1; } else if (!strnicmp(data, "noforcegid", 10)) { override_gid = 0; } else if (strnicmp(data, "file_mode", 4) == 0) { if (value && *value) { vol->file_mode = simple_strtoul(value, &value, 0); } } else if (strnicmp(data, "dir_mode", 4) == 0) { if (value && *value) { vol->dir_mode = simple_strtoul(value, &value, 0); } } else if (strnicmp(data, "dirmode", 4) == 0) { if (value && *value) { vol->dir_mode = simple_strtoul(value, &value, 0); } } else if (strnicmp(data, "port", 4) == 0) { if (value && *value) { vol->port = simple_strtoul(value, &value, 0); } } else if (strnicmp(data, "rsize", 5) == 0) { if (value && *value) { vol->rsize = simple_strtoul(value, &value, 0); } } else if (strnicmp(data, "wsize", 5) == 0) { if (value && *value) { vol->wsize = simple_strtoul(value, &value, 0); } } else if (strnicmp(data, "sockopt", 5) == 0) { if (!value || !*value) { cERROR(1, "no socket option specified"); continue; } else if (strnicmp(value, "TCP_NODELAY", 11) == 0) { vol->sockopt_tcp_nodelay = 1; } } else if (strnicmp(data, "netbiosname", 4) == 0) { if (!value || !*value || (*value == ' ')) { cFYI(1, "invalid (empty) netbiosname"); } else { memset(vol->source_rfc1001_name, 0x20, RFC1001_NAME_LEN); /* * FIXME: are there cases in which a comma can * be valid in workstation netbios name (and * need special handling)? */ for (i = 0; i < RFC1001_NAME_LEN; i++) { /* don't ucase netbiosname for user */ if (value[i] == 0) break; vol->source_rfc1001_name[i] = value[i]; } /* The string has 16th byte zero still from set at top of the function */ if (i == RFC1001_NAME_LEN && value[i] != 0) printk(KERN_WARNING "CIFS: netbiosname" " longer than 15 truncated.\n"); } } else if (strnicmp(data, "servern", 7) == 0) { /* servernetbiosname specified override *SMBSERVER */ if (!value || !*value || (*value == ' ')) { cFYI(1, "empty server netbiosname specified"); } else { /* last byte, type, is 0x20 for servr type */ memset(vol->target_rfc1001_name, 0x20, RFC1001_NAME_LEN_WITH_NULL); for (i = 0; i < 15; i++) { /* BB are there cases in which a comma can be valid in this workstation netbios name (and need special handling)? */ /* user or mount helper must uppercase the netbiosname */ if (value[i] == 0) break; else vol->target_rfc1001_name[i] = value[i]; } /* The string has 16th byte zero still from set at top of the function */ if (i == RFC1001_NAME_LEN && value[i] != 0) printk(KERN_WARNING "CIFS: server net" "biosname longer than 15 truncated.\n"); } } else if (strnicmp(data, "actimeo", 7) == 0) { if (value && *value) { vol->actimeo = HZ * simple_strtoul(value, &value, 0); if (vol->actimeo > CIFS_MAX_ACTIMEO) { cERROR(1, "CIFS: attribute cache" "timeout too large"); return 1; } } } else if (strnicmp(data, "credentials", 4) == 0) { /* ignore */ } else if (strnicmp(data, "version", 3) == 0) { /* ignore */ } else if (strnicmp(data, "guest", 5) == 0) { /* ignore */ } else if (strnicmp(data, "rw", 2) == 0) { /* ignore */ } else if (strnicmp(data, "ro", 2) == 0) { /* ignore */ } else if (strnicmp(data, "noblocksend", 11) == 0) { vol->noblocksnd = 1; } else if (strnicmp(data, "noautotune", 10) == 0) { vol->noautotune = 1; } else if ((strnicmp(data, "suid", 4) == 0) || (strnicmp(data, "nosuid", 6) == 0) || (strnicmp(data, "exec", 4) == 0) || (strnicmp(data, "noexec", 6) == 0) || (strnicmp(data, "nodev", 5) == 0) || (strnicmp(data, "noauto", 6) == 0) || (strnicmp(data, "dev", 3) == 0)) { /* The mount tool or mount.cifs helper (if present) uses these opts to set flags, and the flags are read by the kernel vfs layer before we get here (ie before read super) so there is no point trying to parse these options again and set anything and it is ok to just ignore them */ continue; } else if (strnicmp(data, "hard", 4) == 0) { vol->retry = 1; } else if (strnicmp(data, "soft", 4) == 0) { vol->retry = 0; } else if (strnicmp(data, "perm", 4) == 0) { vol->noperm = 0; } else if (strnicmp(data, "noperm", 6) == 0) { vol->noperm = 1; } else if (strnicmp(data, "mapchars", 8) == 0) { vol->remap = 1; } else if (strnicmp(data, "nomapchars", 10) == 0) { vol->remap = 0; } else if (strnicmp(data, "sfu", 3) == 0) { vol->sfu_emul = 1; } else if (strnicmp(data, "nosfu", 5) == 0) { vol->sfu_emul = 0; } else if (strnicmp(data, "nodfs", 5) == 0) { vol->nodfs = 1; } else if (strnicmp(data, "posixpaths", 10) == 0) { vol->posix_paths = 1; } else if (strnicmp(data, "noposixpaths", 12) == 0) { vol->posix_paths = 0; } else if (strnicmp(data, "nounix", 6) == 0) { vol->no_linux_ext = 1; } else if (strnicmp(data, "nolinux", 7) == 0) { vol->no_linux_ext = 1; } else if ((strnicmp(data, "nocase", 6) == 0) || (strnicmp(data, "ignorecase", 10) == 0)) { vol->nocase = 1; } else if (strnicmp(data, "mand", 4) == 0) { /* ignore */ } else if (strnicmp(data, "nomand", 6) == 0) { /* ignore */ } else if (strnicmp(data, "_netdev", 7) == 0) { /* ignore */ } else if (strnicmp(data, "brl", 3) == 0) { vol->nobrl = 0; } else if ((strnicmp(data, "nobrl", 5) == 0) || (strnicmp(data, "nolock", 6) == 0)) { vol->nobrl = 1; /* turn off mandatory locking in mode if remote locking is turned off since the local vfs will do advisory */ if (vol->file_mode == (S_IALLUGO & ~(S_ISUID | S_IXGRP))) vol->file_mode = S_IALLUGO; } else if (strnicmp(data, "forcemandatorylock", 9) == 0) { /* will take the shorter form "forcemand" as well */ /* This mount option will force use of mandatory (DOS/Windows style) byte range locks, instead of using posix advisory byte range locks, even if the Unix extensions are available and posix locks would be supported otherwise. If Unix extensions are not negotiated this has no effect since mandatory locks would be used (mandatory locks is all that those those servers support) */ vol->mand_lock = 1; } else if (strnicmp(data, "setuids", 7) == 0) { vol->setuids = 1; } else if (strnicmp(data, "nosetuids", 9) == 0) { vol->setuids = 0; } else if (strnicmp(data, "dynperm", 7) == 0) { vol->dynperm = true; } else if (strnicmp(data, "nodynperm", 9) == 0) { vol->dynperm = false; } else if (strnicmp(data, "nohard", 6) == 0) { vol->retry = 0; } else if (strnicmp(data, "nosoft", 6) == 0) { vol->retry = 1; } else if (strnicmp(data, "nointr", 6) == 0) { vol->intr = 0; } else if (strnicmp(data, "intr", 4) == 0) { vol->intr = 1; } else if (strnicmp(data, "nostrictsync", 12) == 0) { vol->nostrictsync = 1; } else if (strnicmp(data, "strictsync", 10) == 0) { vol->nostrictsync = 0; } else if (strnicmp(data, "serverino", 7) == 0) { vol->server_ino = 1; } else if (strnicmp(data, "noserverino", 9) == 0) { vol->server_ino = 0; } else if (strnicmp(data, "cifsacl", 7) == 0) { vol->cifs_acl = 1; } else if (strnicmp(data, "nocifsacl", 9) == 0) { vol->cifs_acl = 0; } else if (strnicmp(data, "acl", 3) == 0) { vol->no_psx_acl = 0; } else if (strnicmp(data, "noacl", 5) == 0) { vol->no_psx_acl = 1; } else if (strnicmp(data, "locallease", 6) == 0) { vol->local_lease = 1; } else if (strnicmp(data, "sign", 4) == 0) { vol->secFlg |= CIFSSEC_MUST_SIGN; } else if (strnicmp(data, "seal", 4) == 0) { /* we do not do the following in secFlags because seal is a per tree connection (mount) not a per socket or per-smb connection option in the protocol */ /* vol->secFlg |= CIFSSEC_MUST_SEAL; */ vol->seal = 1; } else if (strnicmp(data, "direct", 6) == 0) { vol->direct_io = 1; } else if (strnicmp(data, "forcedirectio", 13) == 0) { vol->direct_io = 1; } else if (strnicmp(data, "strictcache", 11) == 0) { vol->strict_io = 1; } else if (strnicmp(data, "noac", 4) == 0) { printk(KERN_WARNING "CIFS: Mount option noac not " "supported. Instead set " "/proc/fs/cifs/LookupCacheEnabled to 0\n"); } else if (strnicmp(data, "fsc", 3) == 0) { #ifndef CONFIG_CIFS_FSCACHE cERROR(1, "FS-Cache support needs CONFIG_CIFS_FSCACHE" "kernel config option set"); return 1; #endif vol->fsc = true; } else if (strnicmp(data, "mfsymlinks", 10) == 0) { vol->mfsymlinks = true; } else if (strnicmp(data, "multiuser", 8) == 0) { vol->multiuser = true; } else printk(KERN_WARNING "CIFS: Unknown mount option %s\n", data); } if (vol->UNC == NULL) { if (devname == NULL) { printk(KERN_WARNING "CIFS: Missing UNC name for mount " "target\n"); return 1; } if ((temp_len = strnlen(devname, 300)) < 300) { vol->UNC = kmalloc(temp_len+1, GFP_KERNEL); if (vol->UNC == NULL) return 1; strcpy(vol->UNC, devname); if (strncmp(vol->UNC, "//", 2) == 0) { vol->UNC[0] = '\\'; vol->UNC[1] = '\\'; } else if (strncmp(vol->UNC, "\\\\", 2) != 0) { printk(KERN_WARNING "CIFS: UNC Path does not " "begin with // or \\\\ \n"); return 1; } value = strpbrk(vol->UNC+2, "/\\"); if (value) *value = '\\'; } else { printk(KERN_WARNING "CIFS: UNC name too long\n"); return 1; } } if (vol->multiuser && !(vol->secFlg & CIFSSEC_MAY_KRB5)) { cERROR(1, "Multiuser mounts currently require krb5 " "authentication!"); return 1; } if (vol->UNCip == NULL) vol->UNCip = &vol->UNC[2]; if (uid_specified) vol->override_uid = override_uid; else if (override_uid == 1) printk(KERN_NOTICE "CIFS: ignoring forceuid mount option " "specified with no uid= option.\n"); if (gid_specified) vol->override_gid = override_gid; else if (override_gid == 1) printk(KERN_NOTICE "CIFS: ignoring forcegid mount option " "specified with no gid= option.\n"); return 0; } /** Returns true if srcaddr isn't specified and rhs isn't * specified, or if srcaddr is specified and * matches the IP address of the rhs argument. */ static bool srcip_matches(struct sockaddr *srcaddr, struct sockaddr *rhs) { switch (srcaddr->sa_family) { case AF_UNSPEC: return (rhs->sa_family == AF_UNSPEC); case AF_INET: { struct sockaddr_in *saddr4 = (struct sockaddr_in *)srcaddr; struct sockaddr_in *vaddr4 = (struct sockaddr_in *)rhs; return (saddr4->sin_addr.s_addr == vaddr4->sin_addr.s_addr); } case AF_INET6: { struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr; struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)&rhs; return ipv6_addr_equal(&saddr6->sin6_addr, &vaddr6->sin6_addr); } default: WARN_ON(1); return false; /* don't expect to be here */ } } /* * If no port is specified in addr structure, we try to match with 445 port * and if it fails - with 139 ports. It should be called only if address * families of server and addr are equal. */ static bool match_port(struct TCP_Server_Info *server, struct sockaddr *addr) { __be16 port, *sport; switch (addr->sa_family) { case AF_INET: sport = &((struct sockaddr_in *) &server->dstaddr)->sin_port; port = ((struct sockaddr_in *) addr)->sin_port; break; case AF_INET6: sport = &((struct sockaddr_in6 *) &server->dstaddr)->sin6_port; port = ((struct sockaddr_in6 *) addr)->sin6_port; break; default: WARN_ON(1); return false; } if (!port) { port = htons(CIFS_PORT); if (port == *sport) return true; port = htons(RFC1001_PORT); } return port == *sport; } static bool match_address(struct TCP_Server_Info *server, struct sockaddr *addr, struct sockaddr *srcaddr) { switch (addr->sa_family) { case AF_INET: { struct sockaddr_in *addr4 = (struct sockaddr_in *)addr; struct sockaddr_in *srv_addr4 = (struct sockaddr_in *)&server->dstaddr; if (addr4->sin_addr.s_addr != srv_addr4->sin_addr.s_addr) return false; break; } case AF_INET6: { struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)addr; struct sockaddr_in6 *srv_addr6 = (struct sockaddr_in6 *)&server->dstaddr; if (!ipv6_addr_equal(&addr6->sin6_addr, &srv_addr6->sin6_addr)) return false; if (addr6->sin6_scope_id != srv_addr6->sin6_scope_id) return false; break; } default: WARN_ON(1); return false; /* don't expect to be here */ } if (!srcip_matches(srcaddr, (struct sockaddr *)&server->srcaddr)) return false; return true; } static bool match_security(struct TCP_Server_Info *server, struct smb_vol *vol) { unsigned int secFlags; if (vol->secFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL))) secFlags = vol->secFlg; else secFlags = global_secflags | vol->secFlg; switch (server->secType) { case LANMAN: if (!(secFlags & (CIFSSEC_MAY_LANMAN|CIFSSEC_MAY_PLNTXT))) return false; break; case NTLMv2: if (!(secFlags & CIFSSEC_MAY_NTLMV2)) return false; break; case NTLM: if (!(secFlags & CIFSSEC_MAY_NTLM)) return false; break; case Kerberos: if (!(secFlags & CIFSSEC_MAY_KRB5)) return false; break; case RawNTLMSSP: if (!(secFlags & CIFSSEC_MAY_NTLMSSP)) return false; break; default: /* shouldn't happen */ return false; } /* now check if signing mode is acceptable */ if ((secFlags & CIFSSEC_MAY_SIGN) == 0 && (server->secMode & SECMODE_SIGN_REQUIRED)) return false; else if (((secFlags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) && (server->secMode & (SECMODE_SIGN_ENABLED|SECMODE_SIGN_REQUIRED)) == 0) return false; return true; } static struct TCP_Server_Info * cifs_find_tcp_session(struct sockaddr *addr, struct smb_vol *vol) { struct TCP_Server_Info *server; spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns)) continue; if (!match_address(server, addr, (struct sockaddr *)&vol->srcaddr)) continue; if (!match_port(server, addr)) continue; if (!match_security(server, vol)) continue; ++server->srv_count; spin_unlock(&cifs_tcp_ses_lock); cFYI(1, "Existing tcp session with server found"); return server; } spin_unlock(&cifs_tcp_ses_lock); return NULL; } static void cifs_put_tcp_session(struct TCP_Server_Info *server) { struct task_struct *task; spin_lock(&cifs_tcp_ses_lock); if (--server->srv_count > 0) { spin_unlock(&cifs_tcp_ses_lock); return; } put_net(cifs_net_ns(server)); list_del_init(&server->tcp_ses_list); spin_unlock(&cifs_tcp_ses_lock); cancel_delayed_work_sync(&server->echo); spin_lock(&GlobalMid_Lock); server->tcpStatus = CifsExiting; spin_unlock(&GlobalMid_Lock); cifs_crypto_shash_release(server); cifs_fscache_release_client_cookie(server); kfree(server->session_key.response); server->session_key.response = NULL; server->session_key.len = 0; task = xchg(&server->tsk, NULL); if (task) force_sig(SIGKILL, task); } static struct TCP_Server_Info * cifs_get_tcp_session(struct smb_vol *volume_info) { struct TCP_Server_Info *tcp_ses = NULL; struct sockaddr_storage addr; struct sockaddr_in *sin_server = (struct sockaddr_in *) &addr; struct sockaddr_in6 *sin_server6 = (struct sockaddr_in6 *) &addr; int rc; memset(&addr, 0, sizeof(struct sockaddr_storage)); cFYI(1, "UNC: %s ip: %s", volume_info->UNC, volume_info->UNCip); if (volume_info->UNCip && volume_info->UNC) { rc = cifs_fill_sockaddr((struct sockaddr *)&addr, volume_info->UNCip, strlen(volume_info->UNCip), volume_info->port); if (!rc) { /* we failed translating address */ rc = -EINVAL; goto out_err; } } else if (volume_info->UNCip) { /* BB using ip addr as tcp_ses name to connect to the DFS root below */ cERROR(1, "Connecting to DFS root not implemented yet"); rc = -EINVAL; goto out_err; } else /* which tcp_sess DFS root would we conect to */ { cERROR(1, "CIFS mount error: No UNC path (e.g. -o " "unc=//192.168.1.100/public) specified"); rc = -EINVAL; goto out_err; } /* see if we already have a matching tcp_ses */ tcp_ses = cifs_find_tcp_session((struct sockaddr *)&addr, volume_info); if (tcp_ses) return tcp_ses; tcp_ses = kzalloc(sizeof(struct TCP_Server_Info), GFP_KERNEL); if (!tcp_ses) { rc = -ENOMEM; goto out_err; } rc = cifs_crypto_shash_allocate(tcp_ses); if (rc) { cERROR(1, "could not setup hash structures rc %d", rc); goto out_err; } cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns)); tcp_ses->hostname = extract_hostname(volume_info->UNC); if (IS_ERR(tcp_ses->hostname)) { rc = PTR_ERR(tcp_ses->hostname); goto out_err_crypto_release; } tcp_ses->noblocksnd = volume_info->noblocksnd; tcp_ses->noautotune = volume_info->noautotune; tcp_ses->tcp_nodelay = volume_info->sockopt_tcp_nodelay; atomic_set(&tcp_ses->inFlight, 0); init_waitqueue_head(&tcp_ses->response_q); init_waitqueue_head(&tcp_ses->request_q); INIT_LIST_HEAD(&tcp_ses->pending_mid_q); mutex_init(&tcp_ses->srv_mutex); memcpy(tcp_ses->workstation_RFC1001_name, volume_info->source_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL); memcpy(tcp_ses->server_RFC1001_name, volume_info->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL); tcp_ses->session_estab = false; tcp_ses->sequence_number = 0; tcp_ses->lstrp = jiffies; INIT_LIST_HEAD(&tcp_ses->tcp_ses_list); INIT_LIST_HEAD(&tcp_ses->smb_ses_list); INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request); /* * at this point we are the only ones with the pointer * to the struct since the kernel thread not created yet * no need to spinlock this init of tcpStatus or srv_count */ tcp_ses->tcpStatus = CifsNew; memcpy(&tcp_ses->srcaddr, &volume_info->srcaddr, sizeof(tcp_ses->srcaddr)); ++tcp_ses->srv_count; if (addr.ss_family == AF_INET6) { cFYI(1, "attempting ipv6 connect"); /* BB should we allow ipv6 on port 139? */ /* other OS never observed in Wild doing 139 with v6 */ memcpy(&tcp_ses->dstaddr, sin_server6, sizeof(struct sockaddr_in6)); } else memcpy(&tcp_ses->dstaddr, sin_server, sizeof(struct sockaddr_in)); rc = ip_connect(tcp_ses); if (rc < 0) { cERROR(1, "Error connecting to socket. Aborting operation"); goto out_err_crypto_release; } /* * since we're in a cifs function already, we know that * this will succeed. No need for try_module_get(). */ __module_get(THIS_MODULE); tcp_ses->tsk = kthread_run((void *)(void *)cifs_demultiplex_thread, tcp_ses, "cifsd"); if (IS_ERR(tcp_ses->tsk)) { rc = PTR_ERR(tcp_ses->tsk); cERROR(1, "error %d create cifsd thread", rc); module_put(THIS_MODULE); goto out_err_crypto_release; } /* thread spawned, put it on the list */ spin_lock(&cifs_tcp_ses_lock); list_add(&tcp_ses->tcp_ses_list, &cifs_tcp_ses_list); spin_unlock(&cifs_tcp_ses_lock); cifs_fscache_get_client_cookie(tcp_ses); /* queue echo request delayed work */ queue_delayed_work(system_nrt_wq, &tcp_ses->echo, SMB_ECHO_INTERVAL); return tcp_ses; out_err_crypto_release: cifs_crypto_shash_release(tcp_ses); put_net(cifs_net_ns(tcp_ses)); out_err: if (tcp_ses) { if (!IS_ERR(tcp_ses->hostname)) kfree(tcp_ses->hostname); if (tcp_ses->ssocket) sock_release(tcp_ses->ssocket); kfree(tcp_ses); } return ERR_PTR(rc); } static struct cifsSesInfo * cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol) { struct cifsSesInfo *ses; spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { switch (server->secType) { case Kerberos: if (vol->cred_uid != ses->cred_uid) continue; break; default: /* anything else takes username/password */ if (ses->user_name == NULL) continue; if (strncmp(ses->user_name, vol->username, MAX_USERNAME_SIZE)) continue; if (strlen(vol->username) != 0 && ses->password != NULL && strncmp(ses->password, vol->password ? vol->password : "", MAX_PASSWORD_SIZE)) continue; } ++ses->ses_count; spin_unlock(&cifs_tcp_ses_lock); return ses; } spin_unlock(&cifs_tcp_ses_lock); return NULL; } static void cifs_put_smb_ses(struct cifsSesInfo *ses) { int xid; struct TCP_Server_Info *server = ses->server; cFYI(1, "%s: ses_count=%d\n", __func__, ses->ses_count); spin_lock(&cifs_tcp_ses_lock); if (--ses->ses_count > 0) { spin_unlock(&cifs_tcp_ses_lock); return; } list_del_init(&ses->smb_ses_list); spin_unlock(&cifs_tcp_ses_lock); if (ses->status == CifsGood) { xid = GetXid(); CIFSSMBLogoff(xid, ses); _FreeXid(xid); } sesInfoFree(ses); cifs_put_tcp_session(server); } static struct cifsSesInfo * cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) { int rc = -ENOMEM, xid; struct cifsSesInfo *ses; struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr; xid = GetXid(); ses = cifs_find_smb_ses(server, volume_info); if (ses) { cFYI(1, "Existing smb sess found (status=%d)", ses->status); mutex_lock(&ses->session_mutex); rc = cifs_negotiate_protocol(xid, ses); if (rc) { mutex_unlock(&ses->session_mutex); /* problem -- put our ses reference */ cifs_put_smb_ses(ses); FreeXid(xid); return ERR_PTR(rc); } if (ses->need_reconnect) { cFYI(1, "Session needs reconnect"); rc = cifs_setup_session(xid, ses, volume_info->local_nls); if (rc) { mutex_unlock(&ses->session_mutex); /* problem -- put our reference */ cifs_put_smb_ses(ses); FreeXid(xid); return ERR_PTR(rc); } } mutex_unlock(&ses->session_mutex); /* existing SMB ses has a server reference already */ cifs_put_tcp_session(server); FreeXid(xid); return ses; } cFYI(1, "Existing smb sess not found"); ses = sesInfoAlloc(); if (ses == NULL) goto get_ses_fail; /* new SMB session uses our server ref */ ses->server = server; if (server->dstaddr.ss_family == AF_INET6) sprintf(ses->serverName, "%pI6", &addr6->sin6_addr); else sprintf(ses->serverName, "%pI4", &addr->sin_addr); if (volume_info->username) { ses->user_name = kstrdup(volume_info->username, GFP_KERNEL); if (!ses->user_name) goto get_ses_fail; } /* volume_info->password freed at unmount */ if (volume_info->password) { ses->password = kstrdup(volume_info->password, GFP_KERNEL); if (!ses->password) goto get_ses_fail; } if (volume_info->domainname) { ses->domainName = kstrdup(volume_info->domainname, GFP_KERNEL); if (!ses->domainName) goto get_ses_fail; } ses->cred_uid = volume_info->cred_uid; ses->linux_uid = volume_info->linux_uid; ses->overrideSecFlg = volume_info->secFlg; mutex_lock(&ses->session_mutex); rc = cifs_negotiate_protocol(xid, ses); if (!rc) rc = cifs_setup_session(xid, ses, volume_info->local_nls); mutex_unlock(&ses->session_mutex); if (rc) goto get_ses_fail; /* success, put it on the list */ spin_lock(&cifs_tcp_ses_lock); list_add(&ses->smb_ses_list, &server->smb_ses_list); spin_unlock(&cifs_tcp_ses_lock); FreeXid(xid); return ses; get_ses_fail: sesInfoFree(ses); FreeXid(xid); return ERR_PTR(rc); } static struct cifsTconInfo * cifs_find_tcon(struct cifsSesInfo *ses, const char *unc) { struct list_head *tmp; struct cifsTconInfo *tcon; spin_lock(&cifs_tcp_ses_lock); list_for_each(tmp, &ses->tcon_list) { tcon = list_entry(tmp, struct cifsTconInfo, tcon_list); if (tcon->tidStatus == CifsExiting) continue; if (strncmp(tcon->treeName, unc, MAX_TREE_SIZE)) continue; ++tcon->tc_count; spin_unlock(&cifs_tcp_ses_lock); return tcon; } spin_unlock(&cifs_tcp_ses_lock); return NULL; } static void cifs_put_tcon(struct cifsTconInfo *tcon) { int xid; struct cifsSesInfo *ses = tcon->ses; cFYI(1, "%s: tc_count=%d\n", __func__, tcon->tc_count); spin_lock(&cifs_tcp_ses_lock); if (--tcon->tc_count > 0) { spin_unlock(&cifs_tcp_ses_lock); return; } list_del_init(&tcon->tcon_list); spin_unlock(&cifs_tcp_ses_lock); xid = GetXid(); CIFSSMBTDis(xid, tcon); _FreeXid(xid); cifs_fscache_release_super_cookie(tcon); tconInfoFree(tcon); cifs_put_smb_ses(ses); } static struct cifsTconInfo * cifs_get_tcon(struct cifsSesInfo *ses, struct smb_vol *volume_info) { int rc, xid; struct cifsTconInfo *tcon; tcon = cifs_find_tcon(ses, volume_info->UNC); if (tcon) { cFYI(1, "Found match on UNC path"); /* existing tcon already has a reference */ cifs_put_smb_ses(ses); if (tcon->seal != volume_info->seal) cERROR(1, "transport encryption setting " "conflicts with existing tid"); return tcon; } tcon = tconInfoAlloc(); if (tcon == NULL) { rc = -ENOMEM; goto out_fail; } tcon->ses = ses; if (volume_info->password) { tcon->password = kstrdup(volume_info->password, GFP_KERNEL); if (!tcon->password) { rc = -ENOMEM; goto out_fail; } } if (strchr(volume_info->UNC + 3, '\\') == NULL && strchr(volume_info->UNC + 3, '/') == NULL) { cERROR(1, "Missing share name"); rc = -ENODEV; goto out_fail; } /* BB Do we need to wrap session_mutex around * this TCon call and Unix SetFS as * we do on SessSetup and reconnect? */ xid = GetXid(); rc = CIFSTCon(xid, ses, volume_info->UNC, tcon, volume_info->local_nls); FreeXid(xid); cFYI(1, "CIFS Tcon rc = %d", rc); if (rc) goto out_fail; if (volume_info->nodfs) { tcon->Flags &= ~SMB_SHARE_IS_IN_DFS; cFYI(1, "DFS disabled (%d)", tcon->Flags); } tcon->seal = volume_info->seal; /* we can have only one retry value for a connection to a share so for resources mounted more than once to the same server share the last value passed in for the retry flag is used */ tcon->retry = volume_info->retry; tcon->nocase = volume_info->nocase; tcon->local_lease = volume_info->local_lease; spin_lock(&cifs_tcp_ses_lock); list_add(&tcon->tcon_list, &ses->tcon_list); spin_unlock(&cifs_tcp_ses_lock); cifs_fscache_get_super_cookie(tcon); return tcon; out_fail: tconInfoFree(tcon); return ERR_PTR(rc); } void cifs_put_tlink(struct tcon_link *tlink) { if (!tlink || IS_ERR(tlink)) return; if (!atomic_dec_and_test(&tlink->tl_count) || test_bit(TCON_LINK_IN_TREE, &tlink->tl_flags)) { tlink->tl_time = jiffies; return; } if (!IS_ERR(tlink_tcon(tlink))) cifs_put_tcon(tlink_tcon(tlink)); kfree(tlink); return; } int get_dfs_path(int xid, struct cifsSesInfo *pSesInfo, const char *old_path, const struct nls_table *nls_codepage, unsigned int *pnum_referrals, struct dfs_info3_param **preferrals, int remap) { char *temp_unc; int rc = 0; *pnum_referrals = 0; *preferrals = NULL; if (pSesInfo->ipc_tid == 0) { temp_unc = kmalloc(2 /* for slashes */ + strnlen(pSesInfo->serverName, SERVER_NAME_LEN_WITH_NULL * 2) + 1 + 4 /* slash IPC$ */ + 2, GFP_KERNEL); if (temp_unc == NULL) return -ENOMEM; temp_unc[0] = '\\'; temp_unc[1] = '\\'; strcpy(temp_unc + 2, pSesInfo->serverName); strcpy(temp_unc + 2 + strlen(pSesInfo->serverName), "\\IPC$"); rc = CIFSTCon(xid, pSesInfo, temp_unc, NULL, nls_codepage); cFYI(1, "CIFS Tcon rc = %d ipc_tid = %d", rc, pSesInfo->ipc_tid); kfree(temp_unc); } if (rc == 0) rc = CIFSGetDFSRefer(xid, pSesInfo, old_path, preferrals, pnum_referrals, nls_codepage, remap); /* BB map targetUNCs to dfs_info3 structures, here or in CIFSGetDFSRefer BB */ return rc; } #ifdef CONFIG_DEBUG_LOCK_ALLOC static struct lock_class_key cifs_key[2]; static struct lock_class_key cifs_slock_key[2]; static inline void cifs_reclassify_socket4(struct socket *sock) { struct sock *sk = sock->sk; BUG_ON(sock_owned_by_user(sk)); sock_lock_init_class_and_name(sk, "slock-AF_INET-CIFS", &cifs_slock_key[0], "sk_lock-AF_INET-CIFS", &cifs_key[0]); } static inline void cifs_reclassify_socket6(struct socket *sock) { struct sock *sk = sock->sk; BUG_ON(sock_owned_by_user(sk)); sock_lock_init_class_and_name(sk, "slock-AF_INET6-CIFS", &cifs_slock_key[1], "sk_lock-AF_INET6-CIFS", &cifs_key[1]); } #else static inline void cifs_reclassify_socket4(struct socket *sock) { } static inline void cifs_reclassify_socket6(struct socket *sock) { } #endif /* See RFC1001 section 14 on representation of Netbios names */ static void rfc1002mangle(char *target, char *source, unsigned int length) { unsigned int i, j; for (i = 0, j = 0; i < (length); i++) { /* mask a nibble at a time and encode */ target[j] = 'A' + (0x0F & (source[i] >> 4)); target[j+1] = 'A' + (0x0F & source[i]); j += 2; } } static int bind_socket(struct TCP_Server_Info *server) { int rc = 0; if (server->srcaddr.ss_family != AF_UNSPEC) { /* Bind to the specified local IP address */ struct socket *socket = server->ssocket; rc = socket->ops->bind(socket, (struct sockaddr *) &server->srcaddr, sizeof(server->srcaddr)); if (rc < 0) { struct sockaddr_in *saddr4; struct sockaddr_in6 *saddr6; saddr4 = (struct sockaddr_in *)&server->srcaddr; saddr6 = (struct sockaddr_in6 *)&server->srcaddr; if (saddr6->sin6_family == AF_INET6) cERROR(1, "cifs: " "Failed to bind to: %pI6c, error: %d\n", &saddr6->sin6_addr, rc); else cERROR(1, "cifs: " "Failed to bind to: %pI4, error: %d\n", &saddr4->sin_addr.s_addr, rc); } } return rc; } static int ip_rfc1001_connect(struct TCP_Server_Info *server) { int rc = 0; /* * some servers require RFC1001 sessinit before sending * negprot - BB check reconnection in case where second * sessinit is sent but no second negprot */ struct rfc1002_session_packet *ses_init_buf; struct smb_hdr *smb_buf; ses_init_buf = kzalloc(sizeof(struct rfc1002_session_packet), GFP_KERNEL); if (ses_init_buf) { ses_init_buf->trailer.session_req.called_len = 32; if (server->server_RFC1001_name && server->server_RFC1001_name[0] != 0) rfc1002mangle(ses_init_buf->trailer. session_req.called_name, server->server_RFC1001_name, RFC1001_NAME_LEN_WITH_NULL); else rfc1002mangle(ses_init_buf->trailer. session_req.called_name, DEFAULT_CIFS_CALLED_NAME, RFC1001_NAME_LEN_WITH_NULL); ses_init_buf->trailer.session_req.calling_len = 32; /* * calling name ends in null (byte 16) from old smb * convention. */ if (server->workstation_RFC1001_name && server->workstation_RFC1001_name[0] != 0) rfc1002mangle(ses_init_buf->trailer. session_req.calling_name, server->workstation_RFC1001_name, RFC1001_NAME_LEN_WITH_NULL); else rfc1002mangle(ses_init_buf->trailer. session_req.calling_name, "LINUX_CIFS_CLNT", RFC1001_NAME_LEN_WITH_NULL); ses_init_buf->trailer.session_req.scope1 = 0; ses_init_buf->trailer.session_req.scope2 = 0; smb_buf = (struct smb_hdr *)ses_init_buf; /* sizeof RFC1002_SESSION_REQUEST with no scope */ smb_buf->smb_buf_length = 0x81000044; rc = smb_send(server, smb_buf, 0x44); kfree(ses_init_buf); /* * RFC1001 layer in at least one server * requires very short break before negprot * presumably because not expecting negprot * to follow so fast. This is a simple * solution that works without * complicating the code and causes no * significant slowing down on mount * for everyone else */ usleep_range(1000, 2000); } /* * else the negprot may still work without this * even though malloc failed */ return rc; } static int generic_ip_connect(struct TCP_Server_Info *server) { int rc = 0; __be16 sport; int slen, sfamily; struct socket *socket = server->ssocket; struct sockaddr *saddr; saddr = (struct sockaddr *) &server->dstaddr; if (server->dstaddr.ss_family == AF_INET6) { sport = ((struct sockaddr_in6 *) saddr)->sin6_port; slen = sizeof(struct sockaddr_in6); sfamily = AF_INET6; } else { sport = ((struct sockaddr_in *) saddr)->sin_port; slen = sizeof(struct sockaddr_in); sfamily = AF_INET; } if (socket == NULL) { rc = __sock_create(cifs_net_ns(server), sfamily, SOCK_STREAM, IPPROTO_TCP, &socket, 1); if (rc < 0) { cERROR(1, "Error %d creating socket", rc); server->ssocket = NULL; return rc; } /* BB other socket options to set KEEPALIVE, NODELAY? */ cFYI(1, "Socket created"); server->ssocket = socket; socket->sk->sk_allocation = GFP_NOFS; if (sfamily == AF_INET6) cifs_reclassify_socket6(socket); else cifs_reclassify_socket4(socket); } rc = bind_socket(server); if (rc < 0) return rc; rc = socket->ops->connect(socket, saddr, slen, 0); if (rc < 0) { cFYI(1, "Error %d connecting to server", rc); sock_release(socket); server->ssocket = NULL; return rc; } /* * Eventually check for other socket options to change from * the default. sock_setsockopt not used because it expects * user space buffer */ socket->sk->sk_rcvtimeo = 7 * HZ; socket->sk->sk_sndtimeo = 5 * HZ; /* make the bufsizes depend on wsize/rsize and max requests */ if (server->noautotune) { if (socket->sk->sk_sndbuf < (200 * 1024)) socket->sk->sk_sndbuf = 200 * 1024; if (socket->sk->sk_rcvbuf < (140 * 1024)) socket->sk->sk_rcvbuf = 140 * 1024; } if (server->tcp_nodelay) { int val = 1; rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY, (char *)&val, sizeof(val)); if (rc) cFYI(1, "set TCP_NODELAY socket option error %d", rc); } cFYI(1, "sndbuf %d rcvbuf %d rcvtimeo 0x%lx", socket->sk->sk_sndbuf, socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo); if (sport == htons(RFC1001_PORT)) rc = ip_rfc1001_connect(server); return rc; } static int ip_connect(struct TCP_Server_Info *server) { __be16 *sport; struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr; struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; if (server->dstaddr.ss_family == AF_INET6) sport = &addr6->sin6_port; else sport = &addr->sin_port; if (*sport == 0) { int rc; /* try with 445 port at first */ *sport = htons(CIFS_PORT); rc = generic_ip_connect(server); if (rc >= 0) return rc; /* if it failed, try with 139 port */ *sport = htons(RFC1001_PORT); } return generic_ip_connect(server); } void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon, struct super_block *sb, struct smb_vol *vol_info) { /* if we are reconnecting then should we check to see if * any requested capabilities changed locally e.g. via * remount but we can not do much about it here * if they have (even if we could detect it by the following) * Perhaps we could add a backpointer to array of sb from tcon * or if we change to make all sb to same share the same * sb as NFS - then we only have one backpointer to sb. * What if we wanted to mount the server share twice once with * and once without posixacls or posix paths? */ __u64 saved_cap = le64_to_cpu(tcon->fsUnixInfo.Capability); if (vol_info && vol_info->no_linux_ext) { tcon->fsUnixInfo.Capability = 0; tcon->unix_ext = 0; /* Unix Extensions disabled */ cFYI(1, "Linux protocol extensions disabled"); return; } else if (vol_info) tcon->unix_ext = 1; /* Unix Extensions supported */ if (tcon->unix_ext == 0) { cFYI(1, "Unix extensions disabled so not set on reconnect"); return; } if (!CIFSSMBQFSUnixInfo(xid, tcon)) { __u64 cap = le64_to_cpu(tcon->fsUnixInfo.Capability); /* check for reconnect case in which we do not want to change the mount behavior if we can avoid it */ if (vol_info == NULL) { /* turn off POSIX ACL and PATHNAMES if not set originally at mount time */ if ((saved_cap & CIFS_UNIX_POSIX_ACL_CAP) == 0) cap &= ~CIFS_UNIX_POSIX_ACL_CAP; if ((saved_cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) { if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) cERROR(1, "POSIXPATH support change"); cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP; } else if ((cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) { cERROR(1, "possible reconnect error"); cERROR(1, "server disabled POSIX path support"); } } cap &= CIFS_UNIX_CAP_MASK; if (vol_info && vol_info->no_psx_acl) cap &= ~CIFS_UNIX_POSIX_ACL_CAP; else if (CIFS_UNIX_POSIX_ACL_CAP & cap) { cFYI(1, "negotiated posix acl support"); if (sb) sb->s_flags |= MS_POSIXACL; } if (vol_info && vol_info->posix_paths == 0) cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP; else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) { cFYI(1, "negotiate posix pathnames"); if (sb) CIFS_SB(sb)->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS; } /* We might be setting the path sep back to a different form if we are reconnecting and the server switched its posix path capability for this share */ if (sb && (CIFS_SB(sb)->prepathlen > 0)) CIFS_SB(sb)->prepath[0] = CIFS_DIR_SEP(CIFS_SB(sb)); if (sb && (CIFS_SB(sb)->rsize > 127 * 1024)) { if ((cap & CIFS_UNIX_LARGE_READ_CAP) == 0) { CIFS_SB(sb)->rsize = 127 * 1024; cFYI(DBG2, "larger reads not supported by srv"); } } cFYI(1, "Negotiate caps 0x%x", (int)cap); #ifdef CONFIG_CIFS_DEBUG2 if (cap & CIFS_UNIX_FCNTL_CAP) cFYI(1, "FCNTL cap"); if (cap & CIFS_UNIX_EXTATTR_CAP) cFYI(1, "EXTATTR cap"); if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) cFYI(1, "POSIX path cap"); if (cap & CIFS_UNIX_XATTR_CAP) cFYI(1, "XATTR cap"); if (cap & CIFS_UNIX_POSIX_ACL_CAP) cFYI(1, "POSIX ACL cap"); if (cap & CIFS_UNIX_LARGE_READ_CAP) cFYI(1, "very large read cap"); if (cap & CIFS_UNIX_LARGE_WRITE_CAP) cFYI(1, "very large write cap"); #endif /* CIFS_DEBUG2 */ if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) { if (vol_info == NULL) { cFYI(1, "resetting capabilities failed"); } else cERROR(1, "Negotiating Unix capabilities " "with the server failed. Consider " "mounting with the Unix Extensions\n" "disabled, if problems are found, " "by specifying the nounix mount " "option."); } } } static void convert_delimiter(char *path, char delim) { int i; char old_delim; if (path == NULL) return; if (delim == '/') old_delim = '\\'; else old_delim = '/'; for (i = 0; path[i] != '\0'; i++) { if (path[i] == old_delim) path[i] = delim; } } static void setup_cifs_sb(struct smb_vol *pvolume_info, struct cifs_sb_info *cifs_sb) { INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks); if (pvolume_info->rsize > CIFSMaxBufSize) { cERROR(1, "rsize %d too large, using MaxBufSize", pvolume_info->rsize); cifs_sb->rsize = CIFSMaxBufSize; } else if ((pvolume_info->rsize) && (pvolume_info->rsize <= CIFSMaxBufSize)) cifs_sb->rsize = pvolume_info->rsize; else /* default */ cifs_sb->rsize = CIFSMaxBufSize; if (pvolume_info->wsize > PAGEVEC_SIZE * PAGE_CACHE_SIZE) { cERROR(1, "wsize %d too large, using 4096 instead", pvolume_info->wsize); cifs_sb->wsize = 4096; } else if (pvolume_info->wsize) cifs_sb->wsize = pvolume_info->wsize; else cifs_sb->wsize = min_t(const int, PAGEVEC_SIZE * PAGE_CACHE_SIZE, 127*1024); /* old default of CIFSMaxBufSize was too small now that SMB Write2 can send multiple pages in kvec. RFC1001 does not describe what happens when frame bigger than 128K is sent so use that as max in conjunction with 52K kvec constraint on arch with 4K page size */ if (cifs_sb->rsize < 2048) { cifs_sb->rsize = 2048; /* Windows ME may prefer this */ cFYI(1, "readsize set to minimum: 2048"); } /* calculate prepath */ cifs_sb->prepath = pvolume_info->prepath; if (cifs_sb->prepath) { cifs_sb->prepathlen = strlen(cifs_sb->prepath); /* we can not convert the / to \ in the path separators in the prefixpath yet because we do not know (until reset_cifs_unix_caps is called later) whether POSIX PATH CAP is available. We normalize the / to \ after reset_cifs_unix_caps is called */ pvolume_info->prepath = NULL; } else cifs_sb->prepathlen = 0; cifs_sb->mnt_uid = pvolume_info->linux_uid; cifs_sb->mnt_gid = pvolume_info->linux_gid; cifs_sb->mnt_file_mode = pvolume_info->file_mode; cifs_sb->mnt_dir_mode = pvolume_info->dir_mode; cFYI(1, "file mode: 0x%x dir mode: 0x%x", cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode); cifs_sb->actimeo = pvolume_info->actimeo; if (pvolume_info->noperm) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM; if (pvolume_info->setuids) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SET_UID; if (pvolume_info->server_ino) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SERVER_INUM; if (pvolume_info->remap) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MAP_SPECIAL_CHR; if (pvolume_info->no_xattr) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_XATTR; if (pvolume_info->sfu_emul) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_UNX_EMUL; if (pvolume_info->nobrl) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_BRL; if (pvolume_info->nostrictsync) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOSSYNC; if (pvolume_info->mand_lock) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOPOSIXBRL; if (pvolume_info->cifs_acl) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_ACL; if (pvolume_info->override_uid) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_UID; if (pvolume_info->override_gid) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_GID; if (pvolume_info->dynperm) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DYNPERM; if (pvolume_info->fsc) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_FSCACHE; if (pvolume_info->multiuser) cifs_sb->mnt_cifs_flags |= (CIFS_MOUNT_MULTIUSER | CIFS_MOUNT_NO_PERM); if (pvolume_info->strict_io) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_STRICT_IO; if (pvolume_info->direct_io) { cFYI(1, "mounting share using direct i/o"); cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DIRECT_IO; } if (pvolume_info->mfsymlinks) { if (pvolume_info->sfu_emul) { cERROR(1, "mount option mfsymlinks ignored if sfu " "mount option is used"); } else { cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MF_SYMLINKS; } } if ((pvolume_info->cifs_acl) && (pvolume_info->dynperm)) cERROR(1, "mount option dynperm ignored if cifsacl " "mount option supported"); } static int is_path_accessible(int xid, struct cifsTconInfo *tcon, struct cifs_sb_info *cifs_sb, const char *full_path) { int rc; FILE_ALL_INFO *pfile_info; pfile_info = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL); if (pfile_info == NULL) return -ENOMEM; rc = CIFSSMBQPathInfo(xid, tcon, full_path, pfile_info, 0 /* not legacy */, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); kfree(pfile_info); return rc; } static void cleanup_volume_info(struct smb_vol **pvolume_info) { struct smb_vol *volume_info; if (!pvolume_info || !*pvolume_info) return; volume_info = *pvolume_info; kzfree(volume_info->password); kfree(volume_info->UNC); kfree(volume_info->prepath); kfree(volume_info); *pvolume_info = NULL; return; } #ifdef CONFIG_CIFS_DFS_UPCALL /* build_path_to_root returns full path to root when * we do not have an exiting connection (tcon) */ static char * build_unc_path_to_root(const struct smb_vol *volume_info, const struct cifs_sb_info *cifs_sb) { char *full_path; int unc_len = strnlen(volume_info->UNC, MAX_TREE_SIZE + 1); full_path = kmalloc(unc_len + cifs_sb->prepathlen + 1, GFP_KERNEL); if (full_path == NULL) return ERR_PTR(-ENOMEM); strncpy(full_path, volume_info->UNC, unc_len); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) { int i; for (i = 0; i < unc_len; i++) { if (full_path[i] == '\\') full_path[i] = '/'; } } if (cifs_sb->prepathlen) strncpy(full_path + unc_len, cifs_sb->prepath, cifs_sb->prepathlen); full_path[unc_len + cifs_sb->prepathlen] = 0; /* add trailing null */ return full_path; } #endif int cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, char *mount_data_global, const char *devname) { int rc; int xid; struct smb_vol *volume_info; struct cifsSesInfo *pSesInfo; struct cifsTconInfo *tcon; struct TCP_Server_Info *srvTcp; char *full_path; char *mount_data = mount_data_global; struct tcon_link *tlink; #ifdef CONFIG_CIFS_DFS_UPCALL struct dfs_info3_param *referrals = NULL; unsigned int num_referrals = 0; int referral_walks_count = 0; try_mount_again: #endif rc = 0; tcon = NULL; pSesInfo = NULL; srvTcp = NULL; full_path = NULL; tlink = NULL; xid = GetXid(); volume_info = kzalloc(sizeof(struct smb_vol), GFP_KERNEL); if (!volume_info) { rc = -ENOMEM; goto out; } if (cifs_parse_mount_options(mount_data, devname, volume_info)) { rc = -EINVAL; goto out; } if (volume_info->nullauth) { cFYI(1, "null user"); volume_info->username = ""; } else if (volume_info->username) { /* BB fixme parse for domain name here */ cFYI(1, "Username: %s", volume_info->username); } else { cifserror("No username specified"); /* In userspace mount helper we can get user name from alternate locations such as env variables and files on disk */ rc = -EINVAL; goto out; } /* this is needed for ASCII cp to Unicode converts */ if (volume_info->iocharset == NULL) { /* load_nls_default cannot return null */ volume_info->local_nls = load_nls_default(); } else { volume_info->local_nls = load_nls(volume_info->iocharset); if (volume_info->local_nls == NULL) { cERROR(1, "CIFS mount error: iocharset %s not found", volume_info->iocharset); rc = -ELIBACC; goto out; } } cifs_sb->local_nls = volume_info->local_nls; /* get a reference to a tcp session */ srvTcp = cifs_get_tcp_session(volume_info); if (IS_ERR(srvTcp)) { rc = PTR_ERR(srvTcp); goto out; } /* get a reference to a SMB session */ pSesInfo = cifs_get_smb_ses(srvTcp, volume_info); if (IS_ERR(pSesInfo)) { rc = PTR_ERR(pSesInfo); pSesInfo = NULL; goto mount_fail_check; } setup_cifs_sb(volume_info, cifs_sb); if (pSesInfo->capabilities & CAP_LARGE_FILES) sb->s_maxbytes = MAX_LFS_FILESIZE; else sb->s_maxbytes = MAX_NON_LFS; /* BB FIXME fix time_gran to be larger for LANMAN sessions */ sb->s_time_gran = 100; /* search for existing tcon to this server share */ tcon = cifs_get_tcon(pSesInfo, volume_info); if (IS_ERR(tcon)) { rc = PTR_ERR(tcon); tcon = NULL; goto remote_path_check; } /* do not care if following two calls succeed - informational */ if (!tcon->ipc) { CIFSSMBQFSDeviceInfo(xid, tcon); CIFSSMBQFSAttributeInfo(xid, tcon); } /* tell server which Unix caps we support */ if (tcon->ses->capabilities & CAP_UNIX) /* reset of caps checks mount to see if unix extensions disabled for just this mount */ reset_cifs_unix_caps(xid, tcon, sb, volume_info); else tcon->unix_ext = 0; /* server does not support them */ /* convert forward to back slashes in prepath here if needed */ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) == 0) convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb)); if ((tcon->unix_ext == 0) && (cifs_sb->rsize > (1024 * 127))) { cifs_sb->rsize = 1024 * 127; cFYI(DBG2, "no very large read support, rsize now 127K"); } if (!(tcon->ses->capabilities & CAP_LARGE_WRITE_X)) cifs_sb->wsize = min(cifs_sb->wsize, (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE)); if (!(tcon->ses->capabilities & CAP_LARGE_READ_X)) cifs_sb->rsize = min(cifs_sb->rsize, (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE)); remote_path_check: /* check if a whole path (including prepath) is not remote */ if (!rc && tcon) { /* build_path_to_root works only when we have a valid tcon */ full_path = cifs_build_path_to_root(cifs_sb, tcon); if (full_path == NULL) { rc = -ENOMEM; goto mount_fail_check; } rc = is_path_accessible(xid, tcon, cifs_sb, full_path); if (rc != 0 && rc != -EREMOTE) { kfree(full_path); goto mount_fail_check; } kfree(full_path); } /* get referral if needed */ if (rc == -EREMOTE) { #ifdef CONFIG_CIFS_DFS_UPCALL if (referral_walks_count > MAX_NESTED_LINKS) { /* * BB: when we implement proper loop detection, * we will remove this check. But now we need it * to prevent an indefinite loop if 'DFS tree' is * misconfigured (i.e. has loops). */ rc = -ELOOP; goto mount_fail_check; } /* convert forward to back slashes in prepath here if needed */ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) == 0) convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb)); full_path = build_unc_path_to_root(volume_info, cifs_sb); if (IS_ERR(full_path)) { rc = PTR_ERR(full_path); goto mount_fail_check; } cFYI(1, "Getting referral for: %s", full_path); rc = get_dfs_path(xid, pSesInfo , full_path + 1, cifs_sb->local_nls, &num_referrals, &referrals, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); if (!rc && num_referrals > 0) { char *fake_devname = NULL; if (mount_data != mount_data_global) kfree(mount_data); mount_data = cifs_compose_mount_options( cifs_sb->mountdata, full_path + 1, referrals, &fake_devname); free_dfs_info_array(referrals, num_referrals); kfree(fake_devname); kfree(full_path); if (IS_ERR(mount_data)) { rc = PTR_ERR(mount_data); mount_data = NULL; goto mount_fail_check; } if (tcon) cifs_put_tcon(tcon); else if (pSesInfo) cifs_put_smb_ses(pSesInfo); cleanup_volume_info(&volume_info); referral_walks_count++; FreeXid(xid); goto try_mount_again; } #else /* No DFS support, return error on mount */ rc = -EOPNOTSUPP; #endif } if (rc) goto mount_fail_check; /* now, hang the tcon off of the superblock */ tlink = kzalloc(sizeof *tlink, GFP_KERNEL); if (tlink == NULL) { rc = -ENOMEM; goto mount_fail_check; } tlink->tl_uid = pSesInfo->linux_uid; tlink->tl_tcon = tcon; tlink->tl_time = jiffies; set_bit(TCON_LINK_MASTER, &tlink->tl_flags); set_bit(TCON_LINK_IN_TREE, &tlink->tl_flags); cifs_sb->master_tlink = tlink; spin_lock(&cifs_sb->tlink_tree_lock); tlink_rb_insert(&cifs_sb->tlink_tree, tlink); spin_unlock(&cifs_sb->tlink_tree_lock); queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks, TLINK_IDLE_EXPIRE); mount_fail_check: /* on error free sesinfo and tcon struct if needed */ if (rc) { if (mount_data != mount_data_global) kfree(mount_data); /* If find_unc succeeded then rc == 0 so we can not end */ /* up accidentally freeing someone elses tcon struct */ if (tcon) cifs_put_tcon(tcon); else if (pSesInfo) cifs_put_smb_ses(pSesInfo); else cifs_put_tcp_session(srvTcp); goto out; } /* volume_info->password is freed above when existing session found (in which case it is not needed anymore) but when new sesion is created the password ptr is put in the new session structure (in which case the password will be freed at unmount time) */ out: /* zero out password before freeing */ cleanup_volume_info(&volume_info); FreeXid(xid); return rc; } int CIFSTCon(unsigned int xid, struct cifsSesInfo *ses, const char *tree, struct cifsTconInfo *tcon, const struct nls_table *nls_codepage) { struct smb_hdr *smb_buffer; struct smb_hdr *smb_buffer_response; TCONX_REQ *pSMB; TCONX_RSP *pSMBr; unsigned char *bcc_ptr; int rc = 0; int length; __u16 bytes_left, count; if (ses == NULL) return -EIO; smb_buffer = cifs_buf_get(); if (smb_buffer == NULL) return -ENOMEM; smb_buffer_response = smb_buffer; header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX, NULL /*no tid */ , 4 /*wct */ ); smb_buffer->Mid = GetNextMid(ses->server); smb_buffer->Uid = ses->Suid; pSMB = (TCONX_REQ *) smb_buffer; pSMBr = (TCONX_RSP *) smb_buffer_response; pSMB->AndXCommand = 0xFF; pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO); bcc_ptr = &pSMB->Password[0]; if ((ses->server->secMode) & SECMODE_USER) { pSMB->PasswordLength = cpu_to_le16(1); /* minimum */ *bcc_ptr = 0; /* password is null byte */ bcc_ptr++; /* skip password */ /* already aligned so no need to do it below */ } else { pSMB->PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE); /* BB FIXME add code to fail this if NTLMv2 or Kerberos specified as required (when that support is added to the vfs in the future) as only NTLM or the much weaker LANMAN (which we do not send by default) is accepted by Samba (not sure whether other servers allow NTLMv2 password here) */ #ifdef CONFIG_CIFS_WEAK_PW_HASH if ((global_secflags & CIFSSEC_MAY_LANMAN) && (ses->server->secType == LANMAN)) calc_lanman_hash(tcon->password, ses->server->cryptkey, ses->server->secMode & SECMODE_PW_ENCRYPT ? true : false, bcc_ptr); else #endif /* CIFS_WEAK_PW_HASH */ rc = SMBNTencrypt(tcon->password, ses->server->cryptkey, bcc_ptr); bcc_ptr += CIFS_AUTH_RESP_SIZE; if (ses->capabilities & CAP_UNICODE) { /* must align unicode strings */ *bcc_ptr = 0; /* null byte password */ bcc_ptr++; } } if (ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; if (ses->capabilities & CAP_STATUS32) { smb_buffer->Flags2 |= SMBFLG2_ERR_STATUS; } if (ses->capabilities & CAP_DFS) { smb_buffer->Flags2 |= SMBFLG2_DFS; } if (ses->capabilities & CAP_UNICODE) { smb_buffer->Flags2 |= SMBFLG2_UNICODE; length = cifs_strtoUCS((__le16 *) bcc_ptr, tree, 6 /* max utf8 char length in bytes */ * (/* server len*/ + 256 /* share len */), nls_codepage); bcc_ptr += 2 * length; /* convert num 16 bit words to bytes */ bcc_ptr += 2; /* skip trailing null */ } else { /* ASCII */ strcpy(bcc_ptr, tree); bcc_ptr += strlen(tree) + 1; } strcpy(bcc_ptr, "?????"); bcc_ptr += strlen("?????"); bcc_ptr += 1; count = bcc_ptr - &pSMB->Password[0]; pSMB->hdr.smb_buf_length += count; pSMB->ByteCount = cpu_to_le16(count); rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, &length, 0); /* above now done in SendReceive */ if ((rc == 0) && (tcon != NULL)) { bool is_unicode; tcon->tidStatus = CifsGood; tcon->need_reconnect = false; tcon->tid = smb_buffer_response->Tid; bcc_ptr = pByteArea(smb_buffer_response); bytes_left = get_bcc(smb_buffer_response); length = strnlen(bcc_ptr, bytes_left - 2); if (smb_buffer->Flags2 & SMBFLG2_UNICODE) is_unicode = true; else is_unicode = false; /* skip service field (NB: this field is always ASCII) */ if (length == 3) { if ((bcc_ptr[0] == 'I') && (bcc_ptr[1] == 'P') && (bcc_ptr[2] == 'C')) { cFYI(1, "IPC connection"); tcon->ipc = 1; } } else if (length == 2) { if ((bcc_ptr[0] == 'A') && (bcc_ptr[1] == ':')) { /* the most common case */ cFYI(1, "disk share connection"); } } bcc_ptr += length + 1; bytes_left -= (length + 1); strncpy(tcon->treeName, tree, MAX_TREE_SIZE); /* mostly informational -- no need to fail on error here */ kfree(tcon->nativeFileSystem); tcon->nativeFileSystem = cifs_strndup_from_ucs(bcc_ptr, bytes_left, is_unicode, nls_codepage); cFYI(1, "nativeFileSystem=%s", tcon->nativeFileSystem); if ((smb_buffer_response->WordCount == 3) || (smb_buffer_response->WordCount == 7)) /* field is in same location */ tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport); else tcon->Flags = 0; cFYI(1, "Tcon flags: 0x%x ", tcon->Flags); } else if ((rc == 0) && tcon == NULL) { /* all we need to save for IPC$ connection */ ses->ipc_tid = smb_buffer_response->Tid; } cifs_buf_release(smb_buffer); return rc; } int cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb) { struct rb_root *root = &cifs_sb->tlink_tree; struct rb_node *node; struct tcon_link *tlink; char *tmp; cancel_delayed_work_sync(&cifs_sb->prune_tlinks); spin_lock(&cifs_sb->tlink_tree_lock); while ((node = rb_first(root))) { tlink = rb_entry(node, struct tcon_link, tl_rbnode); cifs_get_tlink(tlink); clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags); rb_erase(node, root); spin_unlock(&cifs_sb->tlink_tree_lock); cifs_put_tlink(tlink); spin_lock(&cifs_sb->tlink_tree_lock); } spin_unlock(&cifs_sb->tlink_tree_lock); tmp = cifs_sb->prepath; cifs_sb->prepathlen = 0; cifs_sb->prepath = NULL; kfree(tmp); return 0; } int cifs_negotiate_protocol(unsigned int xid, struct cifsSesInfo *ses) { int rc = 0; struct TCP_Server_Info *server = ses->server; /* only send once per connect */ if (server->maxBuf != 0) return 0; rc = CIFSSMBNegotiate(xid, ses); if (rc == -EAGAIN) { /* retry only once on 1st time connection */ rc = CIFSSMBNegotiate(xid, ses); if (rc == -EAGAIN) rc = -EHOSTDOWN; } if (rc == 0) { spin_lock(&GlobalMid_Lock); if (server->tcpStatus != CifsExiting) server->tcpStatus = CifsGood; else rc = -EHOSTDOWN; spin_unlock(&GlobalMid_Lock); } return rc; } int cifs_setup_session(unsigned int xid, struct cifsSesInfo *ses, struct nls_table *nls_info) { int rc = 0; struct TCP_Server_Info *server = ses->server; ses->flags = 0; ses->capabilities = server->capabilities; if (linuxExtEnabled == 0) ses->capabilities &= (~CAP_UNIX); cFYI(1, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d", server->secMode, server->capabilities, server->timeAdj); rc = CIFS_SessSetup(xid, ses, nls_info); if (rc) { cERROR(1, "Send error in SessSetup = %d", rc); } else { mutex_lock(&ses->server->srv_mutex); if (!server->session_estab) { server->session_key.response = ses->auth_key.response; server->session_key.len = ses->auth_key.len; server->sequence_number = 0x2; server->session_estab = true; ses->auth_key.response = NULL; } mutex_unlock(&server->srv_mutex); cFYI(1, "CIFS Session Established successfully"); spin_lock(&GlobalMid_Lock); ses->status = CifsGood; ses->need_reconnect = false; spin_unlock(&GlobalMid_Lock); } kfree(ses->auth_key.response); ses->auth_key.response = NULL; ses->auth_key.len = 0; kfree(ses->ntlmssp); ses->ntlmssp = NULL; return rc; } static struct cifsTconInfo * cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid) { struct cifsTconInfo *master_tcon = cifs_sb_master_tcon(cifs_sb); struct cifsSesInfo *ses; struct cifsTconInfo *tcon = NULL; struct smb_vol *vol_info; char username[MAX_USERNAME_SIZE + 1]; vol_info = kzalloc(sizeof(*vol_info), GFP_KERNEL); if (vol_info == NULL) { tcon = ERR_PTR(-ENOMEM); goto out; } snprintf(username, MAX_USERNAME_SIZE, "krb50x%x", fsuid); vol_info->username = username; vol_info->local_nls = cifs_sb->local_nls; vol_info->linux_uid = fsuid; vol_info->cred_uid = fsuid; vol_info->UNC = master_tcon->treeName; vol_info->retry = master_tcon->retry; vol_info->nocase = master_tcon->nocase; vol_info->local_lease = master_tcon->local_lease; vol_info->no_linux_ext = !master_tcon->unix_ext; /* FIXME: allow for other secFlg settings */ vol_info->secFlg = CIFSSEC_MUST_KRB5; /* get a reference for the same TCP session */ spin_lock(&cifs_tcp_ses_lock); ++master_tcon->ses->server->srv_count; spin_unlock(&cifs_tcp_ses_lock); ses = cifs_get_smb_ses(master_tcon->ses->server, vol_info); if (IS_ERR(ses)) { tcon = (struct cifsTconInfo *)ses; cifs_put_tcp_session(master_tcon->ses->server); goto out; } tcon = cifs_get_tcon(ses, vol_info); if (IS_ERR(tcon)) { cifs_put_smb_ses(ses); goto out; } if (ses->capabilities & CAP_UNIX) reset_cifs_unix_caps(0, tcon, NULL, vol_info); out: kfree(vol_info); return tcon; } static inline struct tcon_link * cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb) { return cifs_sb->master_tlink; } struct cifsTconInfo * cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb) { return tlink_tcon(cifs_sb_master_tlink(cifs_sb)); } static int cifs_sb_tcon_pending_wait(void *unused) { schedule(); return signal_pending(current) ? -ERESTARTSYS : 0; } /* find and return a tlink with given uid */ static struct tcon_link * tlink_rb_search(struct rb_root *root, uid_t uid) { struct rb_node *node = root->rb_node; struct tcon_link *tlink; while (node) { tlink = rb_entry(node, struct tcon_link, tl_rbnode); if (tlink->tl_uid > uid) node = node->rb_left; else if (tlink->tl_uid < uid) node = node->rb_right; else return tlink; } return NULL; } /* insert a tcon_link into the tree */ static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink) { struct rb_node **new = &(root->rb_node), *parent = NULL; struct tcon_link *tlink; while (*new) { tlink = rb_entry(*new, struct tcon_link, tl_rbnode); parent = *new; if (tlink->tl_uid > new_tlink->tl_uid) new = &((*new)->rb_left); else new = &((*new)->rb_right); } rb_link_node(&new_tlink->tl_rbnode, parent, new); rb_insert_color(&new_tlink->tl_rbnode, root); } /* * Find or construct an appropriate tcon given a cifs_sb and the fsuid of the * current task. * * If the superblock doesn't refer to a multiuser mount, then just return * the master tcon for the mount. * * First, search the rbtree for an existing tcon for this fsuid. If one * exists, then check to see if it's pending construction. If it is then wait * for construction to complete. Once it's no longer pending, check to see if * it failed and either return an error or retry construction, depending on * the timeout. * * If one doesn't exist then insert a new tcon_link struct into the tree and * try to construct a new one. */ struct tcon_link * cifs_sb_tlink(struct cifs_sb_info *cifs_sb) { int ret; uid_t fsuid = current_fsuid(); struct tcon_link *tlink, *newtlink; if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb)); spin_lock(&cifs_sb->tlink_tree_lock); tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid); if (tlink) cifs_get_tlink(tlink); spin_unlock(&cifs_sb->tlink_tree_lock); if (tlink == NULL) { newtlink = kzalloc(sizeof(*tlink), GFP_KERNEL); if (newtlink == NULL) return ERR_PTR(-ENOMEM); newtlink->tl_uid = fsuid; newtlink->tl_tcon = ERR_PTR(-EACCES); set_bit(TCON_LINK_PENDING, &newtlink->tl_flags); set_bit(TCON_LINK_IN_TREE, &newtlink->tl_flags); cifs_get_tlink(newtlink); spin_lock(&cifs_sb->tlink_tree_lock); /* was one inserted after previous search? */ tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid); if (tlink) { cifs_get_tlink(tlink); spin_unlock(&cifs_sb->tlink_tree_lock); kfree(newtlink); goto wait_for_construction; } tlink = newtlink; tlink_rb_insert(&cifs_sb->tlink_tree, tlink); spin_unlock(&cifs_sb->tlink_tree_lock); } else { wait_for_construction: ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING, cifs_sb_tcon_pending_wait, TASK_INTERRUPTIBLE); if (ret) { cifs_put_tlink(tlink); return ERR_PTR(ret); } /* if it's good, return it */ if (!IS_ERR(tlink->tl_tcon)) return tlink; /* return error if we tried this already recently */ if (time_before(jiffies, tlink->tl_time + TLINK_ERROR_EXPIRE)) { cifs_put_tlink(tlink); return ERR_PTR(-EACCES); } if (test_and_set_bit(TCON_LINK_PENDING, &tlink->tl_flags)) goto wait_for_construction; } tlink->tl_tcon = cifs_construct_tcon(cifs_sb, fsuid); clear_bit(TCON_LINK_PENDING, &tlink->tl_flags); wake_up_bit(&tlink->tl_flags, TCON_LINK_PENDING); if (IS_ERR(tlink->tl_tcon)) { cifs_put_tlink(tlink); return ERR_PTR(-EACCES); } return tlink; } /* * periodic workqueue job that scans tcon_tree for a superblock and closes * out tcons. */ static void cifs_prune_tlinks(struct work_struct *work) { struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info, prune_tlinks.work); struct rb_root *root = &cifs_sb->tlink_tree; struct rb_node *node = rb_first(root); struct rb_node *tmp; struct tcon_link *tlink; /* * Because we drop the spinlock in the loop in order to put the tlink * it's not guarded against removal of links from the tree. The only * places that remove entries from the tree are this function and * umounts. Because this function is non-reentrant and is canceled * before umount can proceed, this is safe. */ spin_lock(&cifs_sb->tlink_tree_lock); node = rb_first(root); while (node != NULL) { tmp = node; node = rb_next(tmp); tlink = rb_entry(tmp, struct tcon_link, tl_rbnode); if (test_bit(TCON_LINK_MASTER, &tlink->tl_flags) || atomic_read(&tlink->tl_count) != 0 || time_after(tlink->tl_time + TLINK_IDLE_EXPIRE, jiffies)) continue; cifs_get_tlink(tlink); clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags); rb_erase(tmp, root); spin_unlock(&cifs_sb->tlink_tree_lock); cifs_put_tlink(tlink); spin_lock(&cifs_sb->tlink_tree_lock); } spin_unlock(&cifs_sb->tlink_tree_lock); queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks, TLINK_IDLE_EXPIRE); }
./CrossVul/dataset_final_sorted/CWE-20/c/good_3509_0
crossvul-cpp_data_bad_3244_1
/* +----------------------------------------------------------------------+ | PHP Version 7 | +----------------------------------------------------------------------+ | Copyright (c) 1997-2017 The PHP Group | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Author: Wez Furlong <wez@thebrainroom.com> | +----------------------------------------------------------------------+ */ /* $Id$ */ #include "php.h" #include "ext/standard/file.h" #include "streams/php_streams_int.h" #include "php_network.h" #if defined(PHP_WIN32) || defined(__riscos__) || defined(NETWARE) # undef AF_UNIX #endif #if defined(AF_UNIX) #include <sys/un.h> #endif #ifndef MSG_DONTWAIT # define MSG_DONTWAIT 0 #endif #ifndef MSG_PEEK # define MSG_PEEK 0 #endif #ifdef PHP_WIN32 /* send/recv family on windows expects int */ # define XP_SOCK_BUF_SIZE(sz) (((sz) > INT_MAX) ? INT_MAX : (int)(sz)) #else # define XP_SOCK_BUF_SIZE(sz) (sz) #endif php_stream_ops php_stream_generic_socket_ops; PHPAPI php_stream_ops php_stream_socket_ops; php_stream_ops php_stream_udp_socket_ops; #ifdef AF_UNIX php_stream_ops php_stream_unix_socket_ops; php_stream_ops php_stream_unixdg_socket_ops; #endif static int php_tcp_sockop_set_option(php_stream *stream, int option, int value, void *ptrparam); /* {{{ Generic socket stream operations */ static size_t php_sockop_write(php_stream *stream, const char *buf, size_t count) { php_netstream_data_t *sock = (php_netstream_data_t*)stream->abstract; int didwrite; struct timeval *ptimeout; if (!sock || sock->socket == -1) { return 0; } if (sock->timeout.tv_sec == -1) ptimeout = NULL; else ptimeout = &sock->timeout; retry: didwrite = send(sock->socket, buf, XP_SOCK_BUF_SIZE(count), (sock->is_blocked && ptimeout) ? MSG_DONTWAIT : 0); if (didwrite <= 0) { int err = php_socket_errno(); char *estr; if (sock->is_blocked && (err == EWOULDBLOCK || err == EAGAIN)) { int retval; sock->timeout_event = 0; do { retval = php_pollfd_for(sock->socket, POLLOUT, ptimeout); if (retval == 0) { sock->timeout_event = 1; break; } if (retval > 0) { /* writable now; retry */ goto retry; } err = php_socket_errno(); } while (err == EINTR); } estr = php_socket_strerror(err, NULL, 0); php_error_docref(NULL, E_NOTICE, "send of " ZEND_LONG_FMT " bytes failed with errno=%ld %s", (zend_long)count, err, estr); efree(estr); } if (didwrite > 0) { php_stream_notify_progress_increment(PHP_STREAM_CONTEXT(stream), didwrite, 0); } if (didwrite < 0) { didwrite = 0; } return didwrite; } static void php_sock_stream_wait_for_data(php_stream *stream, php_netstream_data_t *sock) { int retval; struct timeval *ptimeout; if (!sock || sock->socket == -1) { return; } sock->timeout_event = 0; if (sock->timeout.tv_sec == -1) ptimeout = NULL; else ptimeout = &sock->timeout; while(1) { retval = php_pollfd_for(sock->socket, PHP_POLLREADABLE, ptimeout); if (retval == 0) sock->timeout_event = 1; if (retval >= 0) break; if (php_socket_errno() != EINTR) break; } } static size_t php_sockop_read(php_stream *stream, char *buf, size_t count) { php_netstream_data_t *sock = (php_netstream_data_t*)stream->abstract; ssize_t nr_bytes = 0; int err; if (!sock || sock->socket == -1) { return 0; } if (sock->is_blocked) { php_sock_stream_wait_for_data(stream, sock); if (sock->timeout_event) return 0; } nr_bytes = recv(sock->socket, buf, XP_SOCK_BUF_SIZE(count), (sock->is_blocked && sock->timeout.tv_sec != -1) ? MSG_DONTWAIT : 0); err = php_socket_errno(); stream->eof = (nr_bytes == 0 || (nr_bytes == -1 && err != EWOULDBLOCK && err != EAGAIN)); if (nr_bytes > 0) { php_stream_notify_progress_increment(PHP_STREAM_CONTEXT(stream), nr_bytes, 0); } if (nr_bytes < 0) { nr_bytes = 0; } return nr_bytes; } static int php_sockop_close(php_stream *stream, int close_handle) { php_netstream_data_t *sock = (php_netstream_data_t*)stream->abstract; #ifdef PHP_WIN32 int n; #endif if (!sock) { return 0; } if (close_handle) { #ifdef PHP_WIN32 if (sock->socket == -1) sock->socket = SOCK_ERR; #endif if (sock->socket != SOCK_ERR) { #ifdef PHP_WIN32 /* prevent more data from coming in */ shutdown(sock->socket, SHUT_RD); /* try to make sure that the OS sends all data before we close the connection. * Essentially, we are waiting for the socket to become writeable, which means * that all pending data has been sent. * We use a small timeout which should encourage the OS to send the data, * but at the same time avoid hanging indefinitely. * */ do { n = php_pollfd_for_ms(sock->socket, POLLOUT, 500); } while (n == -1 && php_socket_errno() == EINTR); #endif closesocket(sock->socket); sock->socket = SOCK_ERR; } } pefree(sock, php_stream_is_persistent(stream)); return 0; } static int php_sockop_flush(php_stream *stream) { #if 0 php_netstream_data_t *sock = (php_netstream_data_t*)stream->abstract; return fsync(sock->socket); #endif return 0; } static int php_sockop_stat(php_stream *stream, php_stream_statbuf *ssb) { #if ZEND_WIN32 return 0; #else php_netstream_data_t *sock = (php_netstream_data_t*)stream->abstract; return zend_fstat(sock->socket, &ssb->sb); #endif } static inline int sock_sendto(php_netstream_data_t *sock, const char *buf, size_t buflen, int flags, struct sockaddr *addr, socklen_t addrlen ) { int ret; if (addr) { ret = sendto(sock->socket, buf, XP_SOCK_BUF_SIZE(buflen), flags, addr, XP_SOCK_BUF_SIZE(addrlen)); return (ret == SOCK_CONN_ERR) ? -1 : ret; } #ifdef PHP_WIN32 return ((ret = send(sock->socket, buf, buflen > INT_MAX ? INT_MAX : (int)buflen, flags)) == SOCK_CONN_ERR) ? -1 : ret; #else return ((ret = send(sock->socket, buf, buflen, flags)) == SOCK_CONN_ERR) ? -1 : ret; #endif } static inline int sock_recvfrom(php_netstream_data_t *sock, char *buf, size_t buflen, int flags, zend_string **textaddr, struct sockaddr **addr, socklen_t *addrlen ) { int ret; int want_addr = textaddr || addr; if (want_addr) { php_sockaddr_storage sa; socklen_t sl = sizeof(sa); ret = recvfrom(sock->socket, buf, XP_SOCK_BUF_SIZE(buflen), flags, (struct sockaddr*)&sa, &sl); ret = (ret == SOCK_CONN_ERR) ? -1 : ret; if (sl) { php_network_populate_name_from_sockaddr((struct sockaddr*)&sa, sl, textaddr, addr, addrlen); } else { if (textaddr) { *textaddr = ZSTR_EMPTY_ALLOC(); } if (addr) { *addr = NULL; *addrlen = 0; } } } else { ret = recv(sock->socket, buf, XP_SOCK_BUF_SIZE(buflen), flags); ret = (ret == SOCK_CONN_ERR) ? -1 : ret; } return ret; } static int php_sockop_set_option(php_stream *stream, int option, int value, void *ptrparam) { int oldmode, flags; php_netstream_data_t *sock = (php_netstream_data_t*)stream->abstract; php_stream_xport_param *xparam; if (!sock) { return PHP_STREAM_OPTION_RETURN_NOTIMPL; } switch(option) { case PHP_STREAM_OPTION_CHECK_LIVENESS: { struct timeval tv; char buf; int alive = 1; if (value == -1) { if (sock->timeout.tv_sec == -1) { tv.tv_sec = FG(default_socket_timeout); tv.tv_usec = 0; } else { tv = sock->timeout; } } else { tv.tv_sec = value; tv.tv_usec = 0; } if (sock->socket == -1) { alive = 0; } else if (php_pollfd_for(sock->socket, PHP_POLLREADABLE|POLLPRI, &tv) > 0) { #ifdef PHP_WIN32 int ret; #else ssize_t ret; #endif int err; ret = recv(sock->socket, &buf, sizeof(buf), MSG_PEEK); err = php_socket_errno(); if (0 == ret || /* the counterpart did properly shutdown*/ (0 > ret && err != EWOULDBLOCK && err != EAGAIN && err != EMSGSIZE)) { /* there was an unrecoverable error */ alive = 0; } } return alive ? PHP_STREAM_OPTION_RETURN_OK : PHP_STREAM_OPTION_RETURN_ERR; } case PHP_STREAM_OPTION_BLOCKING: oldmode = sock->is_blocked; if (SUCCESS == php_set_sock_blocking(sock->socket, value)) { sock->is_blocked = value; return oldmode; } return PHP_STREAM_OPTION_RETURN_ERR; case PHP_STREAM_OPTION_READ_TIMEOUT: sock->timeout = *(struct timeval*)ptrparam; sock->timeout_event = 0; return PHP_STREAM_OPTION_RETURN_OK; case PHP_STREAM_OPTION_META_DATA_API: add_assoc_bool((zval *)ptrparam, "timed_out", sock->timeout_event); add_assoc_bool((zval *)ptrparam, "blocked", sock->is_blocked); add_assoc_bool((zval *)ptrparam, "eof", stream->eof); return PHP_STREAM_OPTION_RETURN_OK; case PHP_STREAM_OPTION_XPORT_API: xparam = (php_stream_xport_param *)ptrparam; switch (xparam->op) { case STREAM_XPORT_OP_LISTEN: xparam->outputs.returncode = (listen(sock->socket, xparam->inputs.backlog) == 0) ? 0: -1; return PHP_STREAM_OPTION_RETURN_OK; case STREAM_XPORT_OP_GET_NAME: xparam->outputs.returncode = php_network_get_sock_name(sock->socket, xparam->want_textaddr ? &xparam->outputs.textaddr : NULL, xparam->want_addr ? &xparam->outputs.addr : NULL, xparam->want_addr ? &xparam->outputs.addrlen : NULL ); return PHP_STREAM_OPTION_RETURN_OK; case STREAM_XPORT_OP_GET_PEER_NAME: xparam->outputs.returncode = php_network_get_peer_name(sock->socket, xparam->want_textaddr ? &xparam->outputs.textaddr : NULL, xparam->want_addr ? &xparam->outputs.addr : NULL, xparam->want_addr ? &xparam->outputs.addrlen : NULL ); return PHP_STREAM_OPTION_RETURN_OK; case STREAM_XPORT_OP_SEND: flags = 0; if ((xparam->inputs.flags & STREAM_OOB) == STREAM_OOB) { flags |= MSG_OOB; } xparam->outputs.returncode = sock_sendto(sock, xparam->inputs.buf, xparam->inputs.buflen, flags, xparam->inputs.addr, xparam->inputs.addrlen); if (xparam->outputs.returncode == -1) { char *err = php_socket_strerror(php_socket_errno(), NULL, 0); php_error_docref(NULL, E_WARNING, "%s\n", err); efree(err); } return PHP_STREAM_OPTION_RETURN_OK; case STREAM_XPORT_OP_RECV: flags = 0; if ((xparam->inputs.flags & STREAM_OOB) == STREAM_OOB) { flags |= MSG_OOB; } if ((xparam->inputs.flags & STREAM_PEEK) == STREAM_PEEK) { flags |= MSG_PEEK; } xparam->outputs.returncode = sock_recvfrom(sock, xparam->inputs.buf, xparam->inputs.buflen, flags, xparam->want_textaddr ? &xparam->outputs.textaddr : NULL, xparam->want_addr ? &xparam->outputs.addr : NULL, xparam->want_addr ? &xparam->outputs.addrlen : NULL ); return PHP_STREAM_OPTION_RETURN_OK; #ifdef HAVE_SHUTDOWN # ifndef SHUT_RD # define SHUT_RD 0 # endif # ifndef SHUT_WR # define SHUT_WR 1 # endif # ifndef SHUT_RDWR # define SHUT_RDWR 2 # endif case STREAM_XPORT_OP_SHUTDOWN: { static const int shutdown_how[] = {SHUT_RD, SHUT_WR, SHUT_RDWR}; xparam->outputs.returncode = shutdown(sock->socket, shutdown_how[xparam->how]); return PHP_STREAM_OPTION_RETURN_OK; } #endif default: return PHP_STREAM_OPTION_RETURN_NOTIMPL; } default: return PHP_STREAM_OPTION_RETURN_NOTIMPL; } } static int php_sockop_cast(php_stream *stream, int castas, void **ret) { php_netstream_data_t *sock = (php_netstream_data_t*)stream->abstract; if (!sock) { return FAILURE; } switch(castas) { case PHP_STREAM_AS_STDIO: if (ret) { *(FILE**)ret = fdopen(sock->socket, stream->mode); if (*ret) return SUCCESS; return FAILURE; } return SUCCESS; case PHP_STREAM_AS_FD_FOR_SELECT: case PHP_STREAM_AS_FD: case PHP_STREAM_AS_SOCKETD: if (ret) *(php_socket_t *)ret = sock->socket; return SUCCESS; default: return FAILURE; } } /* }}} */ /* These may look identical, but we need them this way so that * we can determine which type of socket we are dealing with * by inspecting stream->ops. * A "useful" side-effect is that the user's scripts can then * make similar decisions using stream_get_meta_data. * */ php_stream_ops php_stream_generic_socket_ops = { php_sockop_write, php_sockop_read, php_sockop_close, php_sockop_flush, "generic_socket", NULL, /* seek */ php_sockop_cast, php_sockop_stat, php_sockop_set_option, }; php_stream_ops php_stream_socket_ops = { php_sockop_write, php_sockop_read, php_sockop_close, php_sockop_flush, "tcp_socket", NULL, /* seek */ php_sockop_cast, php_sockop_stat, php_tcp_sockop_set_option, }; php_stream_ops php_stream_udp_socket_ops = { php_sockop_write, php_sockop_read, php_sockop_close, php_sockop_flush, "udp_socket", NULL, /* seek */ php_sockop_cast, php_sockop_stat, php_tcp_sockop_set_option, }; #ifdef AF_UNIX php_stream_ops php_stream_unix_socket_ops = { php_sockop_write, php_sockop_read, php_sockop_close, php_sockop_flush, "unix_socket", NULL, /* seek */ php_sockop_cast, php_sockop_stat, php_tcp_sockop_set_option, }; php_stream_ops php_stream_unixdg_socket_ops = { php_sockop_write, php_sockop_read, php_sockop_close, php_sockop_flush, "udg_socket", NULL, /* seek */ php_sockop_cast, php_sockop_stat, php_tcp_sockop_set_option, }; #endif /* network socket operations */ #ifdef AF_UNIX static inline int parse_unix_address(php_stream_xport_param *xparam, struct sockaddr_un *unix_addr) { memset(unix_addr, 0, sizeof(*unix_addr)); unix_addr->sun_family = AF_UNIX; /* we need to be binary safe on systems that support an abstract * namespace */ if (xparam->inputs.namelen >= sizeof(unix_addr->sun_path)) { /* On linux, when the path begins with a NUL byte we are * referring to an abstract namespace. In theory we should * allow an extra byte below, since we don't need the NULL. * BUT, to get into this branch of code, the name is too long, * so we don't care. */ xparam->inputs.namelen = sizeof(unix_addr->sun_path) - 1; php_error_docref(NULL, E_NOTICE, "socket path exceeded the maximum allowed length of %lu bytes " "and was truncated", (unsigned long)sizeof(unix_addr->sun_path)); } memcpy(unix_addr->sun_path, xparam->inputs.name, xparam->inputs.namelen); return 1; } #endif static inline char *parse_ip_address_ex(const char *str, size_t str_len, int *portno, int get_err, zend_string **err) { char *colon; char *host = NULL; #ifdef HAVE_IPV6 char *p; if (*(str) == '[' && str_len > 1) { /* IPV6 notation to specify raw address with port (i.e. [fe80::1]:80) */ p = memchr(str + 1, ']', str_len - 2); if (!p || *(p + 1) != ':') { if (get_err) { *err = strpprintf(0, "Failed to parse IPv6 address \"%s\"", str); } return NULL; } *portno = atoi(p + 2); return estrndup(str + 1, p - str - 1); } #endif if (str_len) { colon = memchr(str, ':', str_len - 1); } else { colon = NULL; } if (colon) { *portno = atoi(colon + 1); host = estrndup(str, colon - str); } else { if (get_err) { *err = strpprintf(0, "Failed to parse address \"%s\"", str); } return NULL; } return host; } static inline char *parse_ip_address(php_stream_xport_param *xparam, int *portno) { return parse_ip_address_ex(xparam->inputs.name, xparam->inputs.namelen, portno, xparam->want_errortext, &xparam->outputs.error_text); } static inline int php_tcp_sockop_bind(php_stream *stream, php_netstream_data_t *sock, php_stream_xport_param *xparam) { char *host = NULL; int portno, err; long sockopts = STREAM_SOCKOP_NONE; zval *tmpzval = NULL; #ifdef AF_UNIX if (stream->ops == &php_stream_unix_socket_ops || stream->ops == &php_stream_unixdg_socket_ops) { struct sockaddr_un unix_addr; sock->socket = socket(PF_UNIX, stream->ops == &php_stream_unix_socket_ops ? SOCK_STREAM : SOCK_DGRAM, 0); if (sock->socket == SOCK_ERR) { if (xparam->want_errortext) { xparam->outputs.error_text = strpprintf(0, "Failed to create unix%s socket %s", stream->ops == &php_stream_unix_socket_ops ? "" : "datagram", strerror(errno)); } return -1; } parse_unix_address(xparam, &unix_addr); return bind(sock->socket, (const struct sockaddr *)&unix_addr, (socklen_t) XtOffsetOf(struct sockaddr_un, sun_path) + xparam->inputs.namelen); } #endif host = parse_ip_address(xparam, &portno); if (host == NULL) { return -1; } #ifdef IPV6_V6ONLY if (PHP_STREAM_CONTEXT(stream) && (tmpzval = php_stream_context_get_option(PHP_STREAM_CONTEXT(stream), "socket", "ipv6_v6only")) != NULL && Z_TYPE_P(tmpzval) != IS_NULL ) { sockopts |= STREAM_SOCKOP_IPV6_V6ONLY; sockopts |= STREAM_SOCKOP_IPV6_V6ONLY_ENABLED * zend_is_true(tmpzval); } #endif #ifdef SO_REUSEPORT if (PHP_STREAM_CONTEXT(stream) && (tmpzval = php_stream_context_get_option(PHP_STREAM_CONTEXT(stream), "socket", "so_reuseport")) != NULL && zend_is_true(tmpzval) ) { sockopts |= STREAM_SOCKOP_SO_REUSEPORT; } #endif #ifdef SO_BROADCAST if (stream->ops == &php_stream_udp_socket_ops /* SO_BROADCAST is only applicable for UDP */ && PHP_STREAM_CONTEXT(stream) && (tmpzval = php_stream_context_get_option(PHP_STREAM_CONTEXT(stream), "socket", "so_broadcast")) != NULL && zend_is_true(tmpzval) ) { sockopts |= STREAM_SOCKOP_SO_BROADCAST; } #endif sock->socket = php_network_bind_socket_to_local_addr(host, portno, stream->ops == &php_stream_udp_socket_ops ? SOCK_DGRAM : SOCK_STREAM, sockopts, xparam->want_errortext ? &xparam->outputs.error_text : NULL, &err ); if (host) { efree(host); } return sock->socket == -1 ? -1 : 0; } static inline int php_tcp_sockop_connect(php_stream *stream, php_netstream_data_t *sock, php_stream_xport_param *xparam) { char *host = NULL, *bindto = NULL; int portno, bindport = 0; int err = 0; int ret; zval *tmpzval = NULL; long sockopts = STREAM_SOCKOP_NONE; #ifdef AF_UNIX if (stream->ops == &php_stream_unix_socket_ops || stream->ops == &php_stream_unixdg_socket_ops) { struct sockaddr_un unix_addr; sock->socket = socket(PF_UNIX, stream->ops == &php_stream_unix_socket_ops ? SOCK_STREAM : SOCK_DGRAM, 0); if (sock->socket == SOCK_ERR) { if (xparam->want_errortext) { xparam->outputs.error_text = strpprintf(0, "Failed to create unix socket"); } return -1; } parse_unix_address(xparam, &unix_addr); ret = php_network_connect_socket(sock->socket, (const struct sockaddr *)&unix_addr, (socklen_t) XtOffsetOf(struct sockaddr_un, sun_path) + xparam->inputs.namelen, xparam->op == STREAM_XPORT_OP_CONNECT_ASYNC, xparam->inputs.timeout, xparam->want_errortext ? &xparam->outputs.error_text : NULL, &err); xparam->outputs.error_code = err; goto out; } #endif host = parse_ip_address(xparam, &portno); if (host == NULL) { return -1; } if (PHP_STREAM_CONTEXT(stream) && (tmpzval = php_stream_context_get_option(PHP_STREAM_CONTEXT(stream), "socket", "bindto")) != NULL) { if (Z_TYPE_P(tmpzval) != IS_STRING) { if (xparam->want_errortext) { xparam->outputs.error_text = strpprintf(0, "local_addr context option is not a string."); } efree(host); return -1; } bindto = parse_ip_address_ex(Z_STRVAL_P(tmpzval), Z_STRLEN_P(tmpzval), &bindport, xparam->want_errortext, &xparam->outputs.error_text); } #ifdef SO_BROADCAST if (stream->ops == &php_stream_udp_socket_ops /* SO_BROADCAST is only applicable for UDP */ && PHP_STREAM_CONTEXT(stream) && (tmpzval = php_stream_context_get_option(PHP_STREAM_CONTEXT(stream), "socket", "so_broadcast")) != NULL && zend_is_true(tmpzval) ) { sockopts |= STREAM_SOCKOP_SO_BROADCAST; } #endif /* Note: the test here for php_stream_udp_socket_ops is important, because we * want the default to be TCP sockets so that the openssl extension can * re-use this code. */ sock->socket = php_network_connect_socket_to_host(host, portno, stream->ops == &php_stream_udp_socket_ops ? SOCK_DGRAM : SOCK_STREAM, xparam->op == STREAM_XPORT_OP_CONNECT_ASYNC, xparam->inputs.timeout, xparam->want_errortext ? &xparam->outputs.error_text : NULL, &err, bindto, bindport, sockopts ); ret = sock->socket == -1 ? -1 : 0; xparam->outputs.error_code = err; if (host) { efree(host); } if (bindto) { efree(bindto); } #ifdef AF_UNIX out: #endif if (ret >= 0 && xparam->op == STREAM_XPORT_OP_CONNECT_ASYNC && err == EINPROGRESS) { /* indicates pending connection */ return 1; } return ret; } static inline int php_tcp_sockop_accept(php_stream *stream, php_netstream_data_t *sock, php_stream_xport_param *xparam STREAMS_DC) { int clisock; xparam->outputs.client = NULL; clisock = php_network_accept_incoming(sock->socket, xparam->want_textaddr ? &xparam->outputs.textaddr : NULL, xparam->want_addr ? &xparam->outputs.addr : NULL, xparam->want_addr ? &xparam->outputs.addrlen : NULL, xparam->inputs.timeout, xparam->want_errortext ? &xparam->outputs.error_text : NULL, &xparam->outputs.error_code ); if (clisock >= 0) { php_netstream_data_t *clisockdata; clisockdata = emalloc(sizeof(*clisockdata)); if (clisockdata == NULL) { close(clisock); /* technically a fatal error */ } else { memcpy(clisockdata, sock, sizeof(*clisockdata)); clisockdata->socket = clisock; xparam->outputs.client = php_stream_alloc_rel(stream->ops, clisockdata, NULL, "r+"); if (xparam->outputs.client) { xparam->outputs.client->ctx = stream->ctx; if (stream->ctx) { GC_REFCOUNT(stream->ctx)++; } } } } return xparam->outputs.client == NULL ? -1 : 0; } static int php_tcp_sockop_set_option(php_stream *stream, int option, int value, void *ptrparam) { php_netstream_data_t *sock = (php_netstream_data_t*)stream->abstract; php_stream_xport_param *xparam; switch(option) { case PHP_STREAM_OPTION_XPORT_API: xparam = (php_stream_xport_param *)ptrparam; switch(xparam->op) { case STREAM_XPORT_OP_CONNECT: case STREAM_XPORT_OP_CONNECT_ASYNC: xparam->outputs.returncode = php_tcp_sockop_connect(stream, sock, xparam); return PHP_STREAM_OPTION_RETURN_OK; case STREAM_XPORT_OP_BIND: xparam->outputs.returncode = php_tcp_sockop_bind(stream, sock, xparam); return PHP_STREAM_OPTION_RETURN_OK; case STREAM_XPORT_OP_ACCEPT: xparam->outputs.returncode = php_tcp_sockop_accept(stream, sock, xparam STREAMS_CC); return PHP_STREAM_OPTION_RETURN_OK; default: /* fall through */ ; } } return php_sockop_set_option(stream, option, value, ptrparam); } PHPAPI php_stream *php_stream_generic_socket_factory(const char *proto, size_t protolen, const char *resourcename, size_t resourcenamelen, const char *persistent_id, int options, int flags, struct timeval *timeout, php_stream_context *context STREAMS_DC) { php_stream *stream = NULL; php_netstream_data_t *sock; php_stream_ops *ops; /* which type of socket ? */ if (strncmp(proto, "tcp", protolen) == 0) { ops = &php_stream_socket_ops; } else if (strncmp(proto, "udp", protolen) == 0) { ops = &php_stream_udp_socket_ops; } #ifdef AF_UNIX else if (strncmp(proto, "unix", protolen) == 0) { ops = &php_stream_unix_socket_ops; } else if (strncmp(proto, "udg", protolen) == 0) { ops = &php_stream_unixdg_socket_ops; } #endif else { /* should never happen */ return NULL; } sock = pemalloc(sizeof(php_netstream_data_t), persistent_id ? 1 : 0); memset(sock, 0, sizeof(php_netstream_data_t)); sock->is_blocked = 1; sock->timeout.tv_sec = FG(default_socket_timeout); sock->timeout.tv_usec = 0; /* we don't know the socket until we have determined if we are binding or * connecting */ sock->socket = -1; stream = php_stream_alloc_rel(ops, sock, persistent_id, "r+"); if (stream == NULL) { pefree(sock, persistent_id ? 1 : 0); return NULL; } if (flags == 0) { return stream; } return stream; } /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */
./CrossVul/dataset_final_sorted/CWE-20/c/bad_3244_1
crossvul-cpp_data_bad_2578_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % JJJJJ PPPP EEEEE GGGG % % J P P E G % % J PPPP EEE G GG % % J J P E G G % % JJJ P EEEEE GGG % % % % % % Read/Write JPEG Image Format % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % This software is based in part on the work of the Independent JPEG Group. % See ftp://ftp.uu.net/graphics/jpeg/jpegsrc.v6b.tar.gz for copyright and % licensing restrictions. Blob support contributed by Glenn Randers-Pehrson. % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/color.h" #include "magick/colormap-private.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/constitute.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/geometry.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/module.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/option-private.h" #include "magick/pixel-accessor.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/quantum-private.h" #include "magick/resource_.h" #include "magick/semaphore.h" #include "magick/splay-tree.h" #include "magick/static.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/xml-tree.h" #include <setjmp.h> #if defined(MAGICKCORE_JPEG_DELEGATE) #define JPEG_INTERNAL_OPTIONS #if defined(__MINGW32__) || defined(__MINGW64__) # define XMD_H 1 /* Avoid conflicting typedef for INT32 */ #endif #undef HAVE_STDLIB_H #include "jpeglib.h" #include "jerror.h" #endif /* Define declarations. */ #define ICC_MARKER (JPEG_APP0+2) #define ICC_PROFILE "ICC_PROFILE" #define IPTC_MARKER (JPEG_APP0+13) #define XML_MARKER (JPEG_APP0+1) #define MaxBufferExtent 16384 /* Typedef declarations. */ #if defined(MAGICKCORE_JPEG_DELEGATE) typedef struct _DestinationManager { struct jpeg_destination_mgr manager; Image *image; JOCTET *buffer; } DestinationManager; typedef struct _ErrorManager { Image *image; MagickBooleanType finished; StringInfo *profile; jmp_buf error_recovery; } ErrorManager; typedef struct _SourceManager { struct jpeg_source_mgr manager; Image *image; JOCTET *buffer; boolean start_of_blob; } SourceManager; #endif typedef struct _QuantizationTable { char *slot, *description; size_t width, height; double divisor; unsigned int *levels; } QuantizationTable; /* Forward declarations. */ #if defined(MAGICKCORE_JPEG_DELEGATE) static MagickBooleanType WriteJPEGImage(const ImageInfo *,Image *); #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s J P E G % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsJPEG() returns MagickTrue if the image format type, identified by the % magick string, is JPEG. % % The format of the IsJPEG method is: % % MagickBooleanType IsJPEG(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsJPEG(const unsigned char *magick,const size_t length) { if (length < 3) return(MagickFalse); if (memcmp(magick,"\377\330\377",3) == 0) return(MagickTrue); return(MagickFalse); } #if defined(MAGICKCORE_JPEG_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d J P E G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadJPEGImage() reads a JPEG image file and returns it. It allocates % the memory necessary for the new Image structure and returns a pointer to % the new image. % % The format of the ReadJPEGImage method is: % % Image *ReadJPEGImage(const ImageInfo *image_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static boolean FillInputBuffer(j_decompress_ptr cinfo) { SourceManager *source; source=(SourceManager *) cinfo->src; source->manager.bytes_in_buffer=(size_t) ReadBlob(source->image, MaxBufferExtent,source->buffer); if (source->manager.bytes_in_buffer == 0) { if (source->start_of_blob != FALSE) ERREXIT(cinfo,JERR_INPUT_EMPTY); WARNMS(cinfo,JWRN_JPEG_EOF); source->buffer[0]=(JOCTET) 0xff; source->buffer[1]=(JOCTET) JPEG_EOI; source->manager.bytes_in_buffer=2; } source->manager.next_input_byte=source->buffer; source->start_of_blob=FALSE; return(TRUE); } static int GetCharacter(j_decompress_ptr jpeg_info) { if (jpeg_info->src->bytes_in_buffer == 0) (void) (*jpeg_info->src->fill_input_buffer)(jpeg_info); jpeg_info->src->bytes_in_buffer--; return((int) GETJOCTET(*jpeg_info->src->next_input_byte++)); } static void InitializeSource(j_decompress_ptr cinfo) { SourceManager *source; source=(SourceManager *) cinfo->src; source->start_of_blob=TRUE; } static MagickBooleanType IsITUFaxImage(const Image *image) { const StringInfo *profile; const unsigned char *datum; profile=GetImageProfile(image,"8bim"); if (profile == (const StringInfo *) NULL) return(MagickFalse); if (GetStringInfoLength(profile) < 5) return(MagickFalse); datum=GetStringInfoDatum(profile); if ((datum[0] == 0x47) && (datum[1] == 0x33) && (datum[2] == 0x46) && (datum[3] == 0x41) && (datum[4] == 0x58)) return(MagickTrue); return(MagickFalse); } static void JPEGErrorHandler(j_common_ptr jpeg_info) { char message[JMSG_LENGTH_MAX]; ErrorManager *error_manager; Image *image; *message='\0'; error_manager=(ErrorManager *) jpeg_info->client_data; image=error_manager->image; (jpeg_info->err->format_message)(jpeg_info,message); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "[%s] JPEG Trace: \"%s\"",image->filename,message); if (error_manager->finished != MagickFalse) (void) ThrowMagickException(&image->exception,GetMagickModule(), CorruptImageWarning,(char *) message,"`%s'",image->filename); else (void) ThrowMagickException(&image->exception,GetMagickModule(), CorruptImageError,(char *) message,"`%s'",image->filename); longjmp(error_manager->error_recovery,1); } static MagickBooleanType JPEGWarningHandler(j_common_ptr jpeg_info,int level) { #define JPEGExcessiveWarnings 1000 char message[JMSG_LENGTH_MAX]; ErrorManager *error_manager; Image *image; *message='\0'; error_manager=(ErrorManager *) jpeg_info->client_data; image=error_manager->image; if (level < 0) { /* Process warning message. */ (jpeg_info->err->format_message)(jpeg_info,message); if (jpeg_info->err->num_warnings++ < JPEGExcessiveWarnings) ThrowBinaryException(CorruptImageWarning,(char *) message, image->filename); } else if ((image->debug != MagickFalse) && (level >= jpeg_info->err->trace_level)) { /* Process trace message. */ (jpeg_info->err->format_message)(jpeg_info,message); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "[%s] JPEG Trace: \"%s\"",image->filename,message); } return(MagickTrue); } static boolean ReadComment(j_decompress_ptr jpeg_info) { ErrorManager *error_manager; Image *image; register unsigned char *p; register ssize_t i; size_t length; StringInfo *comment; /* Determine length of comment. */ error_manager=(ErrorManager *) jpeg_info->client_data; image=error_manager->image; length=(size_t) ((size_t) GetCharacter(jpeg_info) << 8); length+=GetCharacter(jpeg_info); if (length <= 2) return(TRUE); length-=2; comment=BlobToStringInfo((const void *) NULL,length); if (comment == (StringInfo *) NULL) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(FALSE); } /* Read comment. */ error_manager->profile=comment; p=GetStringInfoDatum(comment); for (i=0; i < (ssize_t) GetStringInfoLength(comment); i++) *p++=(unsigned char) GetCharacter(jpeg_info); *p='\0'; error_manager->profile=NULL; p=GetStringInfoDatum(comment); (void) SetImageProperty(image,"comment",(const char *) p); comment=DestroyStringInfo(comment); return(TRUE); } static boolean ReadICCProfile(j_decompress_ptr jpeg_info) { char magick[12]; ErrorManager *error_manager; Image *image; MagickBooleanType status; register ssize_t i; register unsigned char *p; size_t length; StringInfo *icc_profile, *profile; /* Read color profile. */ length=(size_t) ((size_t) GetCharacter(jpeg_info) << 8); length+=(size_t) GetCharacter(jpeg_info); length-=2; if (length <= 14) { while (length-- > 0) (void) GetCharacter(jpeg_info); return(TRUE); } for (i=0; i < 12; i++) magick[i]=(char) GetCharacter(jpeg_info); if (LocaleCompare(magick,ICC_PROFILE) != 0) { /* Not a ICC profile, return. */ for (i=0; i < (ssize_t) (length-12); i++) (void) GetCharacter(jpeg_info); return(TRUE); } (void) GetCharacter(jpeg_info); /* id */ (void) GetCharacter(jpeg_info); /* markers */ length-=14; error_manager=(ErrorManager *) jpeg_info->client_data; image=error_manager->image; profile=BlobToStringInfo((const void *) NULL,length); if (profile == (StringInfo *) NULL) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(FALSE); } error_manager->profile=profile; p=GetStringInfoDatum(profile); for (i=(ssize_t) GetStringInfoLength(profile)-1; i >= 0; i--) *p++=(unsigned char) GetCharacter(jpeg_info); error_manager->profile=NULL; icc_profile=(StringInfo *) GetImageProfile(image,"icc"); if (icc_profile != (StringInfo *) NULL) { ConcatenateStringInfo(icc_profile,profile); profile=DestroyStringInfo(profile); } else { status=SetImageProfile(image,"icc",profile); profile=DestroyStringInfo(profile); if (status == MagickFalse) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(FALSE); } } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Profile: ICC, %.20g bytes",(double) length); return(TRUE); } static boolean ReadIPTCProfile(j_decompress_ptr jpeg_info) { char magick[MaxTextExtent]; ErrorManager *error_manager; Image *image; MagickBooleanType status; register ssize_t i; register unsigned char *p; size_t length; StringInfo *iptc_profile, *profile; /* Determine length of binary data stored here. */ length=(size_t) ((size_t) GetCharacter(jpeg_info) << 8); length+=(size_t) GetCharacter(jpeg_info); length-=2; if (length <= 14) { while (length-- > 0) (void) GetCharacter(jpeg_info); return(TRUE); } /* Validate that this was written as a Photoshop resource format slug. */ for (i=0; i < 10; i++) magick[i]=(char) GetCharacter(jpeg_info); magick[10]='\0'; length-=10; if (length <= 10) return(TRUE); if (LocaleCompare(magick,"Photoshop ") != 0) { /* Not a IPTC profile, return. */ for (i=0; i < (ssize_t) length; i++) (void) GetCharacter(jpeg_info); return(TRUE); } /* Remove the version number. */ for (i=0; i < 4; i++) (void) GetCharacter(jpeg_info); if (length <= 11) return(TRUE); length-=4; error_manager=(ErrorManager *) jpeg_info->client_data; image=error_manager->image; profile=BlobToStringInfo((const void *) NULL,length); if (profile == (StringInfo *) NULL) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(FALSE); } error_manager->profile=profile; p=GetStringInfoDatum(profile); for (i=0; i < (ssize_t) GetStringInfoLength(profile); i++) *p++=(unsigned char) GetCharacter(jpeg_info); error_manager->profile=NULL; iptc_profile=(StringInfo *) GetImageProfile(image,"8bim"); if (iptc_profile != (StringInfo *) NULL) { ConcatenateStringInfo(iptc_profile,profile); profile=DestroyStringInfo(profile); } else { status=SetImageProfile(image,"8bim",profile); profile=DestroyStringInfo(profile); if (status == MagickFalse) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(FALSE); } } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Profile: iptc, %.20g bytes",(double) length); return(TRUE); } static boolean ReadProfile(j_decompress_ptr jpeg_info) { char name[MaxTextExtent]; const StringInfo *previous_profile; ErrorManager *error_manager; Image *image; int marker; MagickBooleanType status; register ssize_t i; register unsigned char *p; size_t length; StringInfo *profile; /* Read generic profile. */ length=(size_t) ((size_t) GetCharacter(jpeg_info) << 8); length+=(size_t) GetCharacter(jpeg_info); if (length <= 2) return(TRUE); length-=2; marker=jpeg_info->unread_marker-JPEG_APP0; (void) FormatLocaleString(name,MaxTextExtent,"APP%d",marker); error_manager=(ErrorManager *) jpeg_info->client_data; image=error_manager->image; profile=BlobToStringInfo((const void *) NULL,length); if (profile == (StringInfo *) NULL) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(FALSE); } error_manager->profile=profile; p=GetStringInfoDatum(profile); for (i=0; i < (ssize_t) GetStringInfoLength(profile); i++) *p++=(unsigned char) GetCharacter(jpeg_info); error_manager->profile=NULL; if (marker == 1) { p=GetStringInfoDatum(profile); if ((length > 4) && (LocaleNCompare((char *) p,"exif",4) == 0)) (void) CopyMagickString(name,"exif",MaxTextExtent); if ((length > 5) && (LocaleNCompare((char *) p,"http:",5) == 0)) { ssize_t j; /* Extract namespace from XMP profile. */ p=GetStringInfoDatum(profile); for (j=0; j < (ssize_t) GetStringInfoLength(profile); j++) { if (*p == '\0') break; p++; } if (j < (ssize_t) GetStringInfoLength(profile)) (void) DestroyStringInfo(SplitStringInfo(profile,(size_t) (j+1))); (void) CopyMagickString(name,"xmp",MaxTextExtent); } } previous_profile=GetImageProfile(image,name); if (previous_profile != (const StringInfo *) NULL) { size_t length; length=GetStringInfoLength(profile); SetStringInfoLength(profile,GetStringInfoLength(profile)+ GetStringInfoLength(previous_profile)); (void) memmove(GetStringInfoDatum(profile)+ GetStringInfoLength(previous_profile),GetStringInfoDatum(profile), length); (void) memcpy(GetStringInfoDatum(profile), GetStringInfoDatum(previous_profile), GetStringInfoLength(previous_profile)); } status=SetImageProfile(image,name,profile); profile=DestroyStringInfo(profile); if (status == MagickFalse) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(FALSE); } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Profile: %s, %.20g bytes",name,(double) length); return(TRUE); } static void SkipInputData(j_decompress_ptr cinfo,long number_bytes) { SourceManager *source; if (number_bytes <= 0) return; source=(SourceManager *) cinfo->src; while (number_bytes > (long) source->manager.bytes_in_buffer) { number_bytes-=(long) source->manager.bytes_in_buffer; (void) FillInputBuffer(cinfo); } source->manager.next_input_byte+=number_bytes; source->manager.bytes_in_buffer-=number_bytes; } static void TerminateSource(j_decompress_ptr cinfo) { (void) cinfo; } static void JPEGSourceManager(j_decompress_ptr cinfo,Image *image) { SourceManager *source; cinfo->src=(struct jpeg_source_mgr *) (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo,JPOOL_IMAGE,sizeof(SourceManager)); source=(SourceManager *) cinfo->src; source->buffer=(JOCTET *) (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo,JPOOL_IMAGE,MaxBufferExtent*sizeof(JOCTET)); source=(SourceManager *) cinfo->src; source->manager.init_source=InitializeSource; source->manager.fill_input_buffer=FillInputBuffer; source->manager.skip_input_data=SkipInputData; source->manager.resync_to_restart=jpeg_resync_to_restart; source->manager.term_source=TerminateSource; source->manager.bytes_in_buffer=0; source->manager.next_input_byte=NULL; source->image=image; } static void JPEGSetImageQuality(struct jpeg_decompress_struct *jpeg_info, Image *image) { image->quality=UndefinedCompressionQuality; #if defined(D_PROGRESSIVE_SUPPORTED) if (image->compression == LosslessJPEGCompression) { image->quality=100; (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Quality: 100 (lossless)"); } else #endif { ssize_t j, qvalue, sum; register ssize_t i; /* Determine the JPEG compression quality from the quantization tables. */ sum=0; for (i=0; i < NUM_QUANT_TBLS; i++) { if (jpeg_info->quant_tbl_ptrs[i] != NULL) for (j=0; j < DCTSIZE2; j++) sum+=jpeg_info->quant_tbl_ptrs[i]->quantval[j]; } if ((jpeg_info->quant_tbl_ptrs[0] != NULL) && (jpeg_info->quant_tbl_ptrs[1] != NULL)) { ssize_t hash[101] = { 1020, 1015, 932, 848, 780, 735, 702, 679, 660, 645, 632, 623, 613, 607, 600, 594, 589, 585, 581, 571, 555, 542, 529, 514, 494, 474, 457, 439, 424, 410, 397, 386, 373, 364, 351, 341, 334, 324, 317, 309, 299, 294, 287, 279, 274, 267, 262, 257, 251, 247, 243, 237, 232, 227, 222, 217, 213, 207, 202, 198, 192, 188, 183, 177, 173, 168, 163, 157, 153, 148, 143, 139, 132, 128, 125, 119, 115, 108, 104, 99, 94, 90, 84, 79, 74, 70, 64, 59, 55, 49, 45, 40, 34, 30, 25, 20, 15, 11, 6, 4, 0 }, sums[101] = { 32640, 32635, 32266, 31495, 30665, 29804, 29146, 28599, 28104, 27670, 27225, 26725, 26210, 25716, 25240, 24789, 24373, 23946, 23572, 22846, 21801, 20842, 19949, 19121, 18386, 17651, 16998, 16349, 15800, 15247, 14783, 14321, 13859, 13535, 13081, 12702, 12423, 12056, 11779, 11513, 11135, 10955, 10676, 10392, 10208, 9928, 9747, 9564, 9369, 9193, 9017, 8822, 8639, 8458, 8270, 8084, 7896, 7710, 7527, 7347, 7156, 6977, 6788, 6607, 6422, 6236, 6054, 5867, 5684, 5495, 5305, 5128, 4945, 4751, 4638, 4442, 4248, 4065, 3888, 3698, 3509, 3326, 3139, 2957, 2775, 2586, 2405, 2216, 2037, 1846, 1666, 1483, 1297, 1109, 927, 735, 554, 375, 201, 128, 0 }; qvalue=(ssize_t) (jpeg_info->quant_tbl_ptrs[0]->quantval[2]+ jpeg_info->quant_tbl_ptrs[0]->quantval[53]+ jpeg_info->quant_tbl_ptrs[1]->quantval[0]+ jpeg_info->quant_tbl_ptrs[1]->quantval[DCTSIZE2-1]); for (i=0; i < 100; i++) { if ((qvalue < hash[i]) && (sum < sums[i])) continue; if (((qvalue <= hash[i]) && (sum <= sums[i])) || (i >= 50)) image->quality=(size_t) i+1; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Quality: %.20g (%s)",(double) i+1,(qvalue <= hash[i]) && (sum <= sums[i]) ? "exact" : "approximate"); break; } } else if (jpeg_info->quant_tbl_ptrs[0] != NULL) { ssize_t hash[101] = { 510, 505, 422, 380, 355, 338, 326, 318, 311, 305, 300, 297, 293, 291, 288, 286, 284, 283, 281, 280, 279, 278, 277, 273, 262, 251, 243, 233, 225, 218, 211, 205, 198, 193, 186, 181, 177, 172, 168, 164, 158, 156, 152, 148, 145, 142, 139, 136, 133, 131, 129, 126, 123, 120, 118, 115, 113, 110, 107, 105, 102, 100, 97, 94, 92, 89, 87, 83, 81, 79, 76, 74, 70, 68, 66, 63, 61, 57, 55, 52, 50, 48, 44, 42, 39, 37, 34, 31, 29, 26, 24, 21, 18, 16, 13, 11, 8, 6, 3, 2, 0 }, sums[101] = { 16320, 16315, 15946, 15277, 14655, 14073, 13623, 13230, 12859, 12560, 12240, 11861, 11456, 11081, 10714, 10360, 10027, 9679, 9368, 9056, 8680, 8331, 7995, 7668, 7376, 7084, 6823, 6562, 6345, 6125, 5939, 5756, 5571, 5421, 5240, 5086, 4976, 4829, 4719, 4616, 4463, 4393, 4280, 4166, 4092, 3980, 3909, 3835, 3755, 3688, 3621, 3541, 3467, 3396, 3323, 3247, 3170, 3096, 3021, 2952, 2874, 2804, 2727, 2657, 2583, 2509, 2437, 2362, 2290, 2211, 2136, 2068, 1996, 1915, 1858, 1773, 1692, 1620, 1552, 1477, 1398, 1326, 1251, 1179, 1109, 1031, 961, 884, 814, 736, 667, 592, 518, 441, 369, 292, 221, 151, 86, 64, 0 }; qvalue=(ssize_t) (jpeg_info->quant_tbl_ptrs[0]->quantval[2]+ jpeg_info->quant_tbl_ptrs[0]->quantval[53]); for (i=0; i < 100; i++) { if ((qvalue < hash[i]) && (sum < sums[i])) continue; if (((qvalue <= hash[i]) && (sum <= sums[i])) || (i >= 50)) image->quality=(size_t) i+1; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Quality: %.20g (%s)",(double) i+1,(qvalue <= hash[i]) && (sum <= sums[i]) ? "exact" : "approximate"); break; } } } } static void JPEGSetImageSamplingFactor(struct jpeg_decompress_struct *jpeg_info, Image *image) { char sampling_factor[MaxTextExtent]; switch (jpeg_info->out_color_space) { case JCS_CMYK: { (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Colorspace: CMYK"); (void) FormatLocaleString(sampling_factor,MaxTextExtent, "%dx%d,%dx%d,%dx%d,%dx%d",jpeg_info->comp_info[0].h_samp_factor, jpeg_info->comp_info[0].v_samp_factor, jpeg_info->comp_info[1].h_samp_factor, jpeg_info->comp_info[1].v_samp_factor, jpeg_info->comp_info[2].h_samp_factor, jpeg_info->comp_info[2].v_samp_factor, jpeg_info->comp_info[3].h_samp_factor, jpeg_info->comp_info[3].v_samp_factor); break; } case JCS_GRAYSCALE: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Colorspace: GRAYSCALE"); (void) FormatLocaleString(sampling_factor,MaxTextExtent,"%dx%d", jpeg_info->comp_info[0].h_samp_factor, jpeg_info->comp_info[0].v_samp_factor); break; } case JCS_RGB: { (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Colorspace: RGB"); (void) FormatLocaleString(sampling_factor,MaxTextExtent, "%dx%d,%dx%d,%dx%d",jpeg_info->comp_info[0].h_samp_factor, jpeg_info->comp_info[0].v_samp_factor, jpeg_info->comp_info[1].h_samp_factor, jpeg_info->comp_info[1].v_samp_factor, jpeg_info->comp_info[2].h_samp_factor, jpeg_info->comp_info[2].v_samp_factor); break; } default: { (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Colorspace: %d", jpeg_info->out_color_space); (void) FormatLocaleString(sampling_factor,MaxTextExtent, "%dx%d,%dx%d,%dx%d,%dx%d",jpeg_info->comp_info[0].h_samp_factor, jpeg_info->comp_info[0].v_samp_factor, jpeg_info->comp_info[1].h_samp_factor, jpeg_info->comp_info[1].v_samp_factor, jpeg_info->comp_info[2].h_samp_factor, jpeg_info->comp_info[2].v_samp_factor, jpeg_info->comp_info[3].h_samp_factor, jpeg_info->comp_info[3].v_samp_factor); break; } } (void) SetImageProperty(image,"jpeg:sampling-factor",sampling_factor); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Sampling Factors: %s", sampling_factor); } static Image *ReadJPEGImage(const ImageInfo *image_info, ExceptionInfo *exception) { char value[MaxTextExtent]; const char *option; ErrorManager error_manager; Image *image; IndexPacket index; JSAMPLE *volatile jpeg_pixels; JSAMPROW scanline[1]; MagickBooleanType debug, status; MagickSizeType number_pixels; MemoryInfo *memory_info; register ssize_t i; struct jpeg_decompress_struct jpeg_info; struct jpeg_error_mgr jpeg_error; register JSAMPLE *p; size_t units; ssize_t y; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); debug=IsEventLogging(); (void) debug; image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Initialize JPEG parameters. */ (void) ResetMagickMemory(&error_manager,0,sizeof(error_manager)); (void) ResetMagickMemory(&jpeg_info,0,sizeof(jpeg_info)); (void) ResetMagickMemory(&jpeg_error,0,sizeof(jpeg_error)); jpeg_info.err=jpeg_std_error(&jpeg_error); jpeg_info.err->emit_message=(void (*)(j_common_ptr,int)) JPEGWarningHandler; jpeg_info.err->error_exit=(void (*)(j_common_ptr)) JPEGErrorHandler; memory_info=(MemoryInfo *) NULL; error_manager.image=image; if (setjmp(error_manager.error_recovery) != 0) { jpeg_destroy_decompress(&jpeg_info); if (error_manager.profile != (StringInfo *) NULL) error_manager.profile=DestroyStringInfo(error_manager.profile); (void) CloseBlob(image); number_pixels=(MagickSizeType) image->columns*image->rows; if (number_pixels != 0) return(GetFirstImageInList(image)); InheritException(exception,&image->exception); return(DestroyImage(image)); } jpeg_info.client_data=(void *) &error_manager; jpeg_create_decompress(&jpeg_info); JPEGSourceManager(&jpeg_info,image); jpeg_set_marker_processor(&jpeg_info,JPEG_COM,ReadComment); option=GetImageOption(image_info,"profile:skip"); if (IsOptionMember("ICC",option) == MagickFalse) jpeg_set_marker_processor(&jpeg_info,ICC_MARKER,ReadICCProfile); if (IsOptionMember("IPTC",option) == MagickFalse) jpeg_set_marker_processor(&jpeg_info,IPTC_MARKER,ReadIPTCProfile); for (i=1; i < 16; i++) if ((i != 2) && (i != 13) && (i != 14)) if (IsOptionMember("APP",option) == MagickFalse) jpeg_set_marker_processor(&jpeg_info,(int) (JPEG_APP0+i),ReadProfile); i=(ssize_t) jpeg_read_header(&jpeg_info,TRUE); if ((image_info->colorspace == YCbCrColorspace) || (image_info->colorspace == Rec601YCbCrColorspace) || (image_info->colorspace == Rec709YCbCrColorspace)) jpeg_info.out_color_space=JCS_YCbCr; /* Set image resolution. */ units=0; if ((jpeg_info.saw_JFIF_marker != 0) && (jpeg_info.X_density != 1) && (jpeg_info.Y_density != 1)) { image->x_resolution=(double) jpeg_info.X_density; image->y_resolution=(double) jpeg_info.Y_density; units=(size_t) jpeg_info.density_unit; } if (units == 1) image->units=PixelsPerInchResolution; if (units == 2) image->units=PixelsPerCentimeterResolution; number_pixels=(MagickSizeType) image->columns*image->rows; option=GetImageOption(image_info,"jpeg:size"); if ((option != (const char *) NULL) && (jpeg_info.out_color_space != JCS_YCbCr)) { double scale_factor; GeometryInfo geometry_info; MagickStatusType flags; /* Scale the image. */ flags=ParseGeometry(option,&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; jpeg_calc_output_dimensions(&jpeg_info); image->magick_columns=jpeg_info.output_width; image->magick_rows=jpeg_info.output_height; scale_factor=1.0; if (geometry_info.rho != 0.0) scale_factor=jpeg_info.output_width/geometry_info.rho; if ((geometry_info.sigma != 0.0) && (scale_factor > (jpeg_info.output_height/geometry_info.sigma))) scale_factor=jpeg_info.output_height/geometry_info.sigma; jpeg_info.scale_num=1U; jpeg_info.scale_denom=(unsigned int) scale_factor; jpeg_calc_output_dimensions(&jpeg_info); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Scale factor: %.20g",(double) scale_factor); } #if (JPEG_LIB_VERSION >= 61) && defined(D_PROGRESSIVE_SUPPORTED) #if defined(D_LOSSLESS_SUPPORTED) image->interlace=jpeg_info.process == JPROC_PROGRESSIVE ? JPEGInterlace : NoInterlace; image->compression=jpeg_info.process == JPROC_LOSSLESS ? LosslessJPEGCompression : JPEGCompression; if (jpeg_info.data_precision > 8) (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "12-bit JPEG not supported. Reducing pixel data to 8 bits","`%s'", image->filename); if (jpeg_info.data_precision == 16) jpeg_info.data_precision=12; #else image->interlace=jpeg_info.progressive_mode != 0 ? JPEGInterlace : NoInterlace; image->compression=JPEGCompression; #endif #else image->compression=JPEGCompression; image->interlace=JPEGInterlace; #endif option=GetImageOption(image_info,"jpeg:colors"); if (option != (const char *) NULL) { /* Let the JPEG library quantize for us. */ jpeg_info.quantize_colors=TRUE; jpeg_info.desired_number_of_colors=(int) StringToUnsignedLong(option); } option=GetImageOption(image_info,"jpeg:block-smoothing"); if (option != (const char *) NULL) jpeg_info.do_block_smoothing=IsStringTrue(option) != MagickFalse ? TRUE : FALSE; jpeg_info.dct_method=JDCT_FLOAT; option=GetImageOption(image_info,"jpeg:dct-method"); if (option != (const char *) NULL) switch (*option) { case 'D': case 'd': { if (LocaleCompare(option,"default") == 0) jpeg_info.dct_method=JDCT_DEFAULT; break; } case 'F': case 'f': { if (LocaleCompare(option,"fastest") == 0) jpeg_info.dct_method=JDCT_FASTEST; if (LocaleCompare(option,"float") == 0) jpeg_info.dct_method=JDCT_FLOAT; break; } case 'I': case 'i': { if (LocaleCompare(option,"ifast") == 0) jpeg_info.dct_method=JDCT_IFAST; if (LocaleCompare(option,"islow") == 0) jpeg_info.dct_method=JDCT_ISLOW; break; } } option=GetImageOption(image_info,"jpeg:fancy-upsampling"); if (option != (const char *) NULL) jpeg_info.do_fancy_upsampling=IsStringTrue(option) != MagickFalse ? TRUE : FALSE; (void) jpeg_start_decompress(&jpeg_info); image->columns=jpeg_info.output_width; image->rows=jpeg_info.output_height; image->depth=(size_t) jpeg_info.data_precision; switch (jpeg_info.out_color_space) { case JCS_RGB: default: { (void) SetImageColorspace(image,sRGBColorspace); break; } case JCS_GRAYSCALE: { (void) SetImageColorspace(image,GRAYColorspace); break; } case JCS_YCbCr: { (void) SetImageColorspace(image,YCbCrColorspace); break; } case JCS_CMYK: { (void) SetImageColorspace(image,CMYKColorspace); break; } } if (IsITUFaxImage(image) != MagickFalse) { (void) SetImageColorspace(image,LabColorspace); jpeg_info.out_color_space=JCS_YCbCr; } option=GetImageOption(image_info,"jpeg:colors"); if (option != (const char *) NULL) if (AcquireImageColormap(image,StringToUnsignedLong(option)) == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } if ((jpeg_info.output_components == 1) && (jpeg_info.quantize_colors == 0)) { size_t colors; colors=(size_t) GetQuantumRange(image->depth)+1; if (AcquireImageColormap(image,colors) == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } } if (image->debug != MagickFalse) { if (image->interlace != NoInterlace) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Interlace: progressive"); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Interlace: nonprogressive"); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Data precision: %d", (int) jpeg_info.data_precision); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Geometry: %dx%d", (int) jpeg_info.output_width,(int) jpeg_info.output_height); } JPEGSetImageQuality(&jpeg_info,image); JPEGSetImageSamplingFactor(&jpeg_info,image); (void) FormatLocaleString(value,MaxTextExtent,"%.20g",(double) jpeg_info.out_color_space); (void) SetImageProperty(image,"jpeg:colorspace",value); if (image_info->ping != MagickFalse) { jpeg_destroy_decompress(&jpeg_info); (void) CloseBlob(image); return(GetFirstImageInList(image)); } status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { jpeg_destroy_decompress(&jpeg_info); InheritException(exception,&image->exception); return(DestroyImageList(image)); } if ((jpeg_info.output_components != 1) && (jpeg_info.output_components != 3) && (jpeg_info.output_components != 4)) { jpeg_destroy_decompress(&jpeg_info); ThrowReaderException(CorruptImageError,"ImageTypeNotSupported"); } memory_info=AcquireVirtualMemory((size_t) image->columns, jpeg_info.output_components*sizeof(*jpeg_pixels)); if (memory_info == (MemoryInfo *) NULL) { jpeg_destroy_decompress(&jpeg_info); ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } jpeg_pixels=(JSAMPLE *) GetVirtualMemoryBlob(memory_info); /* Convert JPEG pixels to pixel packets. */ if (setjmp(error_manager.error_recovery) != 0) { if (memory_info != (MemoryInfo *) NULL) memory_info=RelinquishVirtualMemory(memory_info); jpeg_destroy_decompress(&jpeg_info); (void) CloseBlob(image); number_pixels=(MagickSizeType) image->columns*image->rows; if (number_pixels != 0) return(GetFirstImageInList(image)); return(DestroyImage(image)); } if (jpeg_info.quantize_colors != 0) { image->colors=(size_t) jpeg_info.actual_number_of_colors; if (jpeg_info.out_color_space == JCS_GRAYSCALE) for (i=0; i < (ssize_t) image->colors; i++) { image->colormap[i].red=ScaleCharToQuantum(jpeg_info.colormap[0][i]); image->colormap[i].green=image->colormap[i].red; image->colormap[i].blue=image->colormap[i].red; image->colormap[i].opacity=OpaqueOpacity; } else for (i=0; i < (ssize_t) image->colors; i++) { image->colormap[i].red=ScaleCharToQuantum(jpeg_info.colormap[0][i]); image->colormap[i].green=ScaleCharToQuantum(jpeg_info.colormap[1][i]); image->colormap[i].blue=ScaleCharToQuantum(jpeg_info.colormap[2][i]); image->colormap[i].opacity=OpaqueOpacity; } } scanline[0]=(JSAMPROW) jpeg_pixels; for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (jpeg_read_scanlines(&jpeg_info,scanline,1) != 1) { (void) ThrowMagickException(exception,GetMagickModule(), CorruptImageWarning,"SkipToSyncByte","`%s'",image->filename); continue; } p=jpeg_pixels; q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); if (jpeg_info.data_precision > 8) { unsigned short scale; scale=65535/(unsigned short) GetQuantumRange((size_t) jpeg_info.data_precision); if (jpeg_info.output_components == 1) for (x=0; x < (ssize_t) image->columns; x++) { size_t pixel; pixel=(size_t) (scale*GETJSAMPLE(*p)); index=ConstrainColormapIndex(image,pixel); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); p++; q++; } else if (image->colorspace != CMYKColorspace) for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ScaleShortToQuantum((unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelGreen(q,ScaleShortToQuantum((unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelBlue(q,ScaleShortToQuantum((unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelOpacity(q,OpaqueOpacity); q++; } else for (x=0; x < (ssize_t) image->columns; x++) { SetPixelCyan(q,QuantumRange-ScaleShortToQuantum( (unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelMagenta(q,QuantumRange-ScaleShortToQuantum( (unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelYellow(q,QuantumRange-ScaleShortToQuantum( (unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelBlack(indexes+x,QuantumRange-ScaleShortToQuantum( (unsigned short) (scale*GETJSAMPLE(*p++)))); SetPixelOpacity(q,OpaqueOpacity); q++; } } else if (jpeg_info.output_components == 1) for (x=0; x < (ssize_t) image->columns; x++) { index=ConstrainColormapIndex(image,(size_t) GETJSAMPLE(*p)); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); p++; q++; } else if (image->colorspace != CMYKColorspace) for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelOpacity(q,OpaqueOpacity); q++; } else for (x=0; x < (ssize_t) image->columns; x++) { SetPixelCyan(q,QuantumRange-ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelMagenta(q,QuantumRange-ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelYellow(q,QuantumRange-ScaleCharToQuantum((unsigned char) GETJSAMPLE(*p++))); SetPixelBlack(indexes+x,QuantumRange-ScaleCharToQuantum( (unsigned char) GETJSAMPLE(*p++))); SetPixelOpacity(q,OpaqueOpacity); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) { jpeg_abort_decompress(&jpeg_info); break; } } if (status != MagickFalse) { error_manager.finished=MagickTrue; if (setjmp(error_manager.error_recovery) == 0) (void) jpeg_finish_decompress(&jpeg_info); } /* Free jpeg resources. */ jpeg_destroy_decompress(&jpeg_info); memory_info=RelinquishVirtualMemory(memory_info); (void) CloseBlob(image); return(GetFirstImageInList(image)); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r J P E G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterJPEGImage() adds properties for the JPEG image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterJPEGImage method is: % % size_t RegisterJPEGImage(void) % */ ModuleExport size_t RegisterJPEGImage(void) { char version[MaxTextExtent]; MagickInfo *entry; static const char description[] = "Joint Photographic Experts Group JFIF format"; *version='\0'; #if defined(JPEG_LIB_VERSION) (void) FormatLocaleString(version,MaxTextExtent,"%d",JPEG_LIB_VERSION); #endif entry=SetMagickInfo("JPE"); #if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION) entry->thread_support=NoThreadSupport; #endif #if defined(MAGICKCORE_JPEG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadJPEGImage; entry->encoder=(EncodeImageHandler *) WriteJPEGImage; #endif entry->magick=(IsImageFormatHandler *) IsJPEG; entry->adjoin=MagickFalse; entry->description=ConstantString(description); if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("image/jpeg"); entry->module=ConstantString("JPEG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("JPEG"); #if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION) entry->thread_support=NoThreadSupport; #endif #if defined(MAGICKCORE_JPEG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadJPEGImage; entry->encoder=(EncodeImageHandler *) WriteJPEGImage; #endif entry->magick=(IsImageFormatHandler *) IsJPEG; entry->adjoin=MagickFalse; entry->description=ConstantString(description); if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("image/jpeg"); entry->module=ConstantString("JPEG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("JPG"); #if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION) entry->thread_support=NoThreadSupport; #endif #if defined(MAGICKCORE_JPEG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadJPEGImage; entry->encoder=(EncodeImageHandler *) WriteJPEGImage; #endif entry->adjoin=MagickFalse; entry->description=ConstantString(description); if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("image/jpeg"); entry->module=ConstantString("JPEG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("JPS"); #if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION) entry->thread_support=NoThreadSupport; #endif #if defined(MAGICKCORE_JPEG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadJPEGImage; entry->encoder=(EncodeImageHandler *) WriteJPEGImage; #endif entry->adjoin=MagickFalse; entry->description=ConstantString(description); if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("image/jpeg"); entry->module=ConstantString("JPEG"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PJPEG"); #if (JPEG_LIB_VERSION < 80) && !defined(LIBJPEG_TURBO_VERSION) entry->thread_support=NoThreadSupport; #endif #if defined(MAGICKCORE_JPEG_DELEGATE) entry->decoder=(DecodeImageHandler *) ReadJPEGImage; entry->encoder=(EncodeImageHandler *) WriteJPEGImage; #endif entry->adjoin=MagickFalse; entry->description=ConstantString(description); if (*version != '\0') entry->version=ConstantString(version); entry->mime_type=ConstantString("image/jpeg"); entry->module=ConstantString("JPEG"); (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r J P E G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterJPEGImage() removes format registrations made by the % JPEG module from the list of supported formats. % % The format of the UnregisterJPEGImage method is: % % UnregisterJPEGImage(void) % */ ModuleExport void UnregisterJPEGImage(void) { (void) UnregisterMagickInfo("PJPG"); (void) UnregisterMagickInfo("JPS"); (void) UnregisterMagickInfo("JPG"); (void) UnregisterMagickInfo("JPG"); (void) UnregisterMagickInfo("JPEG"); (void) UnregisterMagickInfo("JPE"); } #if defined(MAGICKCORE_JPEG_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e J P E G I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteJPEGImage() writes a JPEG image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the WriteJPEGImage method is: % % MagickBooleanType WriteJPEGImage(const ImageInfo *image_info, % Image *image) % % A description of each parameter follows: % % o image_info: the image info. % % o jpeg_image: The image. % % */ static QuantizationTable *DestroyQuantizationTable(QuantizationTable *table) { assert(table != (QuantizationTable *) NULL); if (table->slot != (char *) NULL) table->slot=DestroyString(table->slot); if (table->description != (char *) NULL) table->description=DestroyString(table->description); if (table->levels != (unsigned int *) NULL) table->levels=(unsigned int *) RelinquishMagickMemory(table->levels); table=(QuantizationTable *) RelinquishMagickMemory(table); return(table); } static boolean EmptyOutputBuffer(j_compress_ptr cinfo) { DestinationManager *destination; destination=(DestinationManager *) cinfo->dest; destination->manager.free_in_buffer=(size_t) WriteBlob(destination->image, MaxBufferExtent,destination->buffer); if (destination->manager.free_in_buffer != MaxBufferExtent) ERREXIT(cinfo,JERR_FILE_WRITE); destination->manager.next_output_byte=destination->buffer; return(TRUE); } static QuantizationTable *GetQuantizationTable(const char *filename, const char *slot,ExceptionInfo *exception) { char *p, *xml; const char *attribute, *content; double value; register ssize_t i; QuantizationTable *table; size_t length; ssize_t j; XMLTreeInfo *description, *levels, *quantization_tables, *table_iterator; (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading quantization tables \"%s\" ...",filename); table=(QuantizationTable *) NULL; xml=FileToString(filename,~0UL,exception); if (xml == (char *) NULL) return(table); quantization_tables=NewXMLTree(xml,exception); if (quantization_tables == (XMLTreeInfo *) NULL) { xml=DestroyString(xml); return(table); } for (table_iterator=GetXMLTreeChild(quantization_tables,"table"); table_iterator != (XMLTreeInfo *) NULL; table_iterator=GetNextXMLTreeTag(table_iterator)) { attribute=GetXMLTreeAttribute(table_iterator,"slot"); if ((attribute != (char *) NULL) && (LocaleCompare(slot,attribute) == 0)) break; attribute=GetXMLTreeAttribute(table_iterator,"alias"); if ((attribute != (char *) NULL) && (LocaleCompare(slot,attribute) == 0)) break; } if (table_iterator == (XMLTreeInfo *) NULL) { xml=DestroyString(xml); return(table); } description=GetXMLTreeChild(table_iterator,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement","<description>, slot \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); xml=DestroyString(xml); return(table); } levels=GetXMLTreeChild(table_iterator,"levels"); if (levels == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement","<levels>, slot \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); xml=DestroyString(xml); return(table); } table=(QuantizationTable *) AcquireMagickMemory(sizeof(*table)); if (table == (QuantizationTable *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAcquireQuantizationTable"); table->slot=(char *) NULL; table->description=(char *) NULL; table->levels=(unsigned int *) NULL; attribute=GetXMLTreeAttribute(table_iterator,"slot"); if (attribute != (char *) NULL) table->slot=ConstantString(attribute); content=GetXMLTreeContent(description); if (content != (char *) NULL) table->description=ConstantString(content); attribute=GetXMLTreeAttribute(levels,"width"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute","<levels width>, slot \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } table->width=StringToUnsignedLong(attribute); if (table->width == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute","<levels width>, table \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } attribute=GetXMLTreeAttribute(levels,"height"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute","<levels height>, table \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } table->height=StringToUnsignedLong(attribute); if (table->height == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute","<levels height>, table \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } attribute=GetXMLTreeAttribute(levels,"divisor"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute","<levels divisor>, table \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } table->divisor=InterpretLocaleValue(attribute,(char **) NULL); if (table->divisor == 0.0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute","<levels divisor>, table \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } content=GetXMLTreeContent(levels); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent","<levels>, table \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } length=(size_t) table->width*table->height; if (length < 64) length=64; table->levels=(unsigned int *) AcquireQuantumMemory(length, sizeof(*table->levels)); if (table->levels == (unsigned int *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAcquireQuantizationTable"); for (i=0; i < (ssize_t) (table->width*table->height); i++) { table->levels[i]=(unsigned int) (InterpretLocaleValue(content,&p)/ table->divisor+0.5); while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; content=p; } value=InterpretLocaleValue(content,&p); (void) value; if (p != content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent","<level> too many values, table \"%s\"",slot); quantization_tables=DestroyXMLTree(quantization_tables); table=DestroyQuantizationTable(table); xml=DestroyString(xml); return(table); } for (j=i; j < 64; j++) table->levels[j]=table->levels[j-1]; quantization_tables=DestroyXMLTree(quantization_tables); xml=DestroyString(xml); return(table); } static void InitializeDestination(j_compress_ptr cinfo) { DestinationManager *destination; destination=(DestinationManager *) cinfo->dest; destination->buffer=(JOCTET *) (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo,JPOOL_IMAGE,MaxBufferExtent*sizeof(JOCTET)); destination->manager.next_output_byte=destination->buffer; destination->manager.free_in_buffer=MaxBufferExtent; } static void TerminateDestination(j_compress_ptr cinfo) { DestinationManager *destination; destination=(DestinationManager *) cinfo->dest; if ((MaxBufferExtent-(int) destination->manager.free_in_buffer) > 0) { ssize_t count; count=WriteBlob(destination->image,MaxBufferExtent- destination->manager.free_in_buffer,destination->buffer); if (count != (ssize_t) (MaxBufferExtent-destination->manager.free_in_buffer)) ERREXIT(cinfo,JERR_FILE_WRITE); } } static void WriteProfile(j_compress_ptr jpeg_info,Image *image) { const char *name; const StringInfo *profile; MagickBooleanType iptc; register ssize_t i; size_t length, tag_length; StringInfo *custom_profile; /* Save image profile as a APP marker. */ iptc=MagickFalse; custom_profile=AcquireStringInfo(65535L); ResetImageProfileIterator(image); for (name=GetNextImageProfile(image); name != (const char *) NULL; ) { register unsigned char *p; profile=GetImageProfile(image,name); p=GetStringInfoDatum(custom_profile); if (LocaleCompare(name,"EXIF") == 0) { length=GetStringInfoLength(profile); if (length > 65533L) { (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderWarning,"ExifProfileSizeExceedsLimit","`%s'", image->filename); length=65533L; } jpeg_write_marker(jpeg_info,XML_MARKER,GetStringInfoDatum(profile), (unsigned int) length); } if (LocaleCompare(name,"ICC") == 0) { register unsigned char *p; tag_length=strlen(ICC_PROFILE); p=GetStringInfoDatum(custom_profile); (void) CopyMagickMemory(p,ICC_PROFILE,tag_length); p[tag_length]='\0'; for (i=0; i < (ssize_t) GetStringInfoLength(profile); i+=65519L) { length=MagickMin(GetStringInfoLength(profile)-i,65519L); p[12]=(unsigned char) ((i/65519L)+1); p[13]=(unsigned char) (GetStringInfoLength(profile)/65519L+1); (void) CopyMagickMemory(p+tag_length+3,GetStringInfoDatum(profile)+i, length); jpeg_write_marker(jpeg_info,ICC_MARKER,GetStringInfoDatum( custom_profile),(unsigned int) (length+tag_length+3)); } } if (((LocaleCompare(name,"IPTC") == 0) || (LocaleCompare(name,"8BIM") == 0)) && (iptc == MagickFalse)) { size_t roundup; iptc=MagickTrue; for (i=0; i < (ssize_t) GetStringInfoLength(profile); i+=65500L) { length=MagickMin(GetStringInfoLength(profile)-i,65500L); roundup=(size_t) (length & 0x01); if (LocaleNCompare((char *) GetStringInfoDatum(profile),"8BIM",4) == 0) { (void) memcpy(p,"Photoshop 3.0 ",14); tag_length=14; } else { (void) CopyMagickMemory(p,"Photoshop 3.0 8BIM\04\04\0\0\0\0",24); tag_length=26; p[24]=(unsigned char) (length >> 8); p[25]=(unsigned char) (length & 0xff); } p[13]=0x00; (void) memcpy(p+tag_length,GetStringInfoDatum(profile)+i,length); if (roundup != 0) p[length+tag_length]='\0'; jpeg_write_marker(jpeg_info,IPTC_MARKER,GetStringInfoDatum( custom_profile),(unsigned int) (length+tag_length+roundup)); } } if (LocaleCompare(name,"XMP") == 0) { StringInfo *xmp_profile; /* Add namespace to XMP profile. */ xmp_profile=StringToStringInfo("http://ns.adobe.com/xap/1.0/ "); if (xmp_profile != (StringInfo *) NULL) { if (profile != (StringInfo *) NULL) ConcatenateStringInfo(xmp_profile,profile); GetStringInfoDatum(xmp_profile)[28]='\0'; for (i=0; i < (ssize_t) GetStringInfoLength(xmp_profile); i+=65533L) { length=MagickMin(GetStringInfoLength(xmp_profile)-i,65533L); jpeg_write_marker(jpeg_info,XML_MARKER, GetStringInfoDatum(xmp_profile)+i,(unsigned int) length); } xmp_profile=DestroyStringInfo(xmp_profile); } } (void) LogMagickEvent(CoderEvent,GetMagickModule(), "%s profile: %.20g bytes",name,(double) GetStringInfoLength(profile)); name=GetNextImageProfile(image); } custom_profile=DestroyStringInfo(custom_profile); } static void JPEGDestinationManager(j_compress_ptr cinfo,Image * image) { DestinationManager *destination; cinfo->dest=(struct jpeg_destination_mgr *) (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo,JPOOL_IMAGE,sizeof(DestinationManager)); destination=(DestinationManager *) cinfo->dest; destination->manager.init_destination=InitializeDestination; destination->manager.empty_output_buffer=EmptyOutputBuffer; destination->manager.term_destination=TerminateDestination; destination->image=image; } static char **SamplingFactorToList(const char *text) { char **textlist; register char *q; register const char *p; register ssize_t i; if (text == (char *) NULL) return((char **) NULL); /* Convert string to an ASCII list. */ textlist=(char **) AcquireQuantumMemory((size_t) MAX_COMPONENTS, sizeof(*textlist)); if (textlist == (char **) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToConvertText"); p=text; for (i=0; i < (ssize_t) MAX_COMPONENTS; i++) { for (q=(char *) p; *q != '\0'; q++) if (*q == ',') break; textlist[i]=(char *) AcquireQuantumMemory((size_t) (q-p)+MaxTextExtent, sizeof(*textlist[i])); if (textlist[i] == (char *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToConvertText"); (void) CopyMagickString(textlist[i],p,(size_t) (q-p+1)); if (*q == '\r') q++; if (*q == '\0') break; p=q+1; } for (i++; i < (ssize_t) MAX_COMPONENTS; i++) textlist[i]=ConstantString("1x1"); return(textlist); } static MagickBooleanType WriteJPEGImage(const ImageInfo *image_info, Image *image) { const char *option, *sampling_factor, *value; ErrorManager error_manager; ExceptionInfo *exception; Image *volatile volatile_image; int colorspace, quality; JSAMPLE *volatile jpeg_pixels; JSAMPROW scanline[1]; MagickBooleanType status; MemoryInfo *memory_info; register JSAMPLE *q; register ssize_t i; ssize_t y; struct jpeg_compress_struct jpeg_info; struct jpeg_error_mgr jpeg_error; unsigned short scale; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=(&image->exception); if ((LocaleCompare(image_info->magick,"JPS") == 0) && (image->next != (Image *) NULL)) image=AppendImages(image,MagickFalse,exception); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); /* Initialize JPEG parameters. */ (void) ResetMagickMemory(&error_manager,0,sizeof(error_manager)); (void) ResetMagickMemory(&jpeg_info,0,sizeof(jpeg_info)); (void) ResetMagickMemory(&jpeg_error,0,sizeof(jpeg_error)); volatile_image=image; jpeg_info.client_data=(void *) volatile_image; jpeg_info.err=jpeg_std_error(&jpeg_error); jpeg_info.err->emit_message=(void (*)(j_common_ptr,int)) JPEGWarningHandler; jpeg_info.err->error_exit=(void (*)(j_common_ptr)) JPEGErrorHandler; error_manager.image=volatile_image; memory_info=(MemoryInfo *) NULL; if (setjmp(error_manager.error_recovery) != 0) { jpeg_destroy_compress(&jpeg_info); (void) CloseBlob(volatile_image); return(MagickFalse); } jpeg_info.client_data=(void *) &error_manager; jpeg_create_compress(&jpeg_info); JPEGDestinationManager(&jpeg_info,image); if ((image->columns != (unsigned int) image->columns) || (image->rows != (unsigned int) image->rows)) ThrowWriterException(ImageError,"WidthOrHeightExceedsLimit"); jpeg_info.image_width=(unsigned int) image->columns; jpeg_info.image_height=(unsigned int) image->rows; jpeg_info.input_components=3; jpeg_info.data_precision=8; jpeg_info.in_color_space=JCS_RGB; switch (image->colorspace) { case CMYKColorspace: { jpeg_info.input_components=4; jpeg_info.in_color_space=JCS_CMYK; break; } case YCbCrColorspace: case Rec601YCbCrColorspace: case Rec709YCbCrColorspace: { jpeg_info.in_color_space=JCS_YCbCr; break; } case GRAYColorspace: case Rec601LumaColorspace: case Rec709LumaColorspace: { if (image_info->type == TrueColorType) break; jpeg_info.input_components=1; jpeg_info.in_color_space=JCS_GRAYSCALE; break; } default: { (void) TransformImageColorspace(image,sRGBColorspace); if (image_info->type == TrueColorType) break; if (SetImageGray(image,&image->exception) != MagickFalse) { jpeg_info.input_components=1; jpeg_info.in_color_space=JCS_GRAYSCALE; } break; } } jpeg_set_defaults(&jpeg_info); if (jpeg_info.in_color_space == JCS_CMYK) jpeg_set_colorspace(&jpeg_info,JCS_YCCK); if ((jpeg_info.data_precision != 12) && (image->depth <= 8)) jpeg_info.data_precision=8; else jpeg_info.data_precision=BITS_IN_JSAMPLE; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Image resolution: %.20g,%.20g",image->x_resolution,image->y_resolution); if ((image->x_resolution != 0.0) && (image->y_resolution != 0.0)) { /* Set image resolution. */ jpeg_info.write_JFIF_header=TRUE; jpeg_info.X_density=(UINT16) image->x_resolution; jpeg_info.Y_density=(UINT16) image->y_resolution; /* Set image resolution units. */ if (image->units == PixelsPerInchResolution) jpeg_info.density_unit=(UINT8) 1; if (image->units == PixelsPerCentimeterResolution) jpeg_info.density_unit=(UINT8) 2; } jpeg_info.dct_method=JDCT_FLOAT; option=GetImageOption(image_info,"jpeg:dct-method"); if (option != (const char *) NULL) switch (*option) { case 'D': case 'd': { if (LocaleCompare(option,"default") == 0) jpeg_info.dct_method=JDCT_DEFAULT; break; } case 'F': case 'f': { if (LocaleCompare(option,"fastest") == 0) jpeg_info.dct_method=JDCT_FASTEST; if (LocaleCompare(option,"float") == 0) jpeg_info.dct_method=JDCT_FLOAT; break; } case 'I': case 'i': { if (LocaleCompare(option,"ifast") == 0) jpeg_info.dct_method=JDCT_IFAST; if (LocaleCompare(option,"islow") == 0) jpeg_info.dct_method=JDCT_ISLOW; break; } } option=GetImageOption(image_info,"jpeg:optimize-coding"); if (option != (const char *) NULL) jpeg_info.optimize_coding=IsStringTrue(option) != MagickFalse ? TRUE : FALSE; else { MagickSizeType length; length=(MagickSizeType) jpeg_info.input_components*image->columns* image->rows*sizeof(JSAMPLE); if (length == (MagickSizeType) ((size_t) length)) { /* Perform optimization only if available memory resources permit it. */ status=AcquireMagickResource(MemoryResource,length); RelinquishMagickResource(MemoryResource,length); jpeg_info.optimize_coding=status == MagickFalse ? FALSE : TRUE; } } #if (JPEG_LIB_VERSION >= 61) && defined(C_PROGRESSIVE_SUPPORTED) if ((LocaleCompare(image_info->magick,"PJPEG") == 0) || (image_info->interlace != NoInterlace)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Interlace: progressive"); jpeg_simple_progression(&jpeg_info); } else if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Interlace: non-progressive"); #else if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Interlace: nonprogressive"); #endif quality=92; if ((image_info->compression != LosslessJPEGCompression) && (image->quality <= 100)) { if (image->quality != UndefinedCompressionQuality) quality=(int) image->quality; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Quality: %.20g", (double) image->quality); } else { #if !defined(C_LOSSLESS_SUPPORTED) quality=100; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Quality: 100"); #else if (image->quality < 100) (void) ThrowMagickException(&image->exception,GetMagickModule(), CoderWarning,"LosslessToLossyJPEGConversion","`%s'",image->filename); else { int point_transform, predictor; predictor=image->quality/100; /* range 1-7 */ point_transform=image->quality % 20; /* range 0-15 */ jpeg_simple_lossless(&jpeg_info,predictor,point_transform); if (image->debug != MagickFalse) { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Compression: lossless"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Predictor: %d",predictor); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Point Transform: %d",point_transform); } } #endif } option=GetImageOption(image_info,"jpeg:extent"); if (option != (const char *) NULL) { Image *jpeg_image; ImageInfo *jpeg_info; jpeg_info=CloneImageInfo(image_info); jpeg_info->blob=NULL; jpeg_image=CloneImage(image,0,0,MagickTrue,&image->exception); if (jpeg_image != (Image *) NULL) { MagickSizeType extent; size_t maximum, minimum; /* Search for compression quality that does not exceed image extent. */ jpeg_image->quality=0; extent=(MagickSizeType) SiPrefixToDoubleInterval(option,100.0); (void) DeleteImageOption(jpeg_info,"jpeg:extent"); (void) DeleteImageArtifact(jpeg_image,"jpeg:extent"); maximum=image_info->quality; if (maximum < 2) maximum=101; for (minimum=2; minimum < maximum; ) { (void) AcquireUniqueFilename(jpeg_image->filename); jpeg_image->quality=minimum+(maximum-minimum+1)/2; (void) WriteJPEGImage(jpeg_info,jpeg_image); if (GetBlobSize(jpeg_image) <= extent) minimum=jpeg_image->quality+1; else maximum=jpeg_image->quality-1; (void) RelinquishUniqueFileResource(jpeg_image->filename); } quality=(int) minimum-1; jpeg_image=DestroyImage(jpeg_image); } jpeg_info=DestroyImageInfo(jpeg_info); } jpeg_set_quality(&jpeg_info,quality,TRUE); #if (JPEG_LIB_VERSION >= 70) option=GetImageOption(image_info,"quality"); if (option != (const char *) NULL) { GeometryInfo geometry_info; int flags; /* Set quality scaling for luminance and chrominance separately. */ flags=ParseGeometry(option,&geometry_info); if (((flags & RhoValue) != 0) && ((flags & SigmaValue) != 0)) { jpeg_info.q_scale_factor[0]=jpeg_quality_scaling((int) (geometry_info.rho+0.5)); jpeg_info.q_scale_factor[1]=jpeg_quality_scaling((int) (geometry_info.sigma+0.5)); jpeg_default_qtables(&jpeg_info,TRUE); } } #endif colorspace=jpeg_info.in_color_space; value=GetImageOption(image_info,"jpeg:colorspace"); if (value == (char *) NULL) value=GetImageProperty(image,"jpeg:colorspace"); if (value != (char *) NULL) colorspace=StringToInteger(value); sampling_factor=(const char *) NULL; if (colorspace == jpeg_info.in_color_space) { value=GetImageOption(image_info,"jpeg:sampling-factor"); if (value == (char *) NULL) value=GetImageProperty(image,"jpeg:sampling-factor"); if (value != (char *) NULL) { sampling_factor=value; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Input sampling-factors=%s",sampling_factor); } } if (image_info->sampling_factor != (char *) NULL) sampling_factor=image_info->sampling_factor; if (sampling_factor == (const char *) NULL) { if (quality >= 90) for (i=0; i < MAX_COMPONENTS; i++) { jpeg_info.comp_info[i].h_samp_factor=1; jpeg_info.comp_info[i].v_samp_factor=1; } } else { char **factors; GeometryInfo geometry_info; MagickStatusType flags; /* Set sampling factor. */ i=0; factors=SamplingFactorToList(sampling_factor); if (factors != (char **) NULL) { for (i=0; i < MAX_COMPONENTS; i++) { if (factors[i] == (char *) NULL) break; flags=ParseGeometry(factors[i],&geometry_info); if ((flags & SigmaValue) == 0) geometry_info.sigma=geometry_info.rho; jpeg_info.comp_info[i].h_samp_factor=(int) geometry_info.rho; jpeg_info.comp_info[i].v_samp_factor=(int) geometry_info.sigma; factors[i]=(char *) RelinquishMagickMemory(factors[i]); } factors=(char **) RelinquishMagickMemory(factors); } for ( ; i < MAX_COMPONENTS; i++) { jpeg_info.comp_info[i].h_samp_factor=1; jpeg_info.comp_info[i].v_samp_factor=1; } } option=GetImageOption(image_info,"jpeg:q-table"); if (option != (const char *) NULL) { QuantizationTable *table; /* Custom quantization tables. */ table=GetQuantizationTable(option,"0",&image->exception); if (table != (QuantizationTable *) NULL) { for (i=0; i < MAX_COMPONENTS; i++) jpeg_info.comp_info[i].quant_tbl_no=0; jpeg_add_quant_table(&jpeg_info,0,table->levels, jpeg_quality_scaling(quality),0); table=DestroyQuantizationTable(table); } table=GetQuantizationTable(option,"1",&image->exception); if (table != (QuantizationTable *) NULL) { for (i=1; i < MAX_COMPONENTS; i++) jpeg_info.comp_info[i].quant_tbl_no=1; jpeg_add_quant_table(&jpeg_info,1,table->levels, jpeg_quality_scaling(quality),0); table=DestroyQuantizationTable(table); } table=GetQuantizationTable(option,"2",&image->exception); if (table != (QuantizationTable *) NULL) { for (i=2; i < MAX_COMPONENTS; i++) jpeg_info.comp_info[i].quant_tbl_no=2; jpeg_add_quant_table(&jpeg_info,2,table->levels, jpeg_quality_scaling(quality),0); table=DestroyQuantizationTable(table); } table=GetQuantizationTable(option,"3",&image->exception); if (table != (QuantizationTable *) NULL) { for (i=3; i < MAX_COMPONENTS; i++) jpeg_info.comp_info[i].quant_tbl_no=3; jpeg_add_quant_table(&jpeg_info,3,table->levels, jpeg_quality_scaling(quality),0); table=DestroyQuantizationTable(table); } } jpeg_start_compress(&jpeg_info,TRUE); if (image->debug != MagickFalse) { if (image->storage_class == PseudoClass) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Storage class: PseudoClass"); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Storage class: DirectClass"); (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Depth: %.20g", (double) image->depth); if (image->colors != 0) (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Number of colors: %.20g",(double) image->colors); else (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Number of colors: unspecified"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "JPEG data precision: %d",(int) jpeg_info.data_precision); switch (image->colorspace) { case CMYKColorspace: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Storage class: DirectClass"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Colorspace: CMYK"); break; } case YCbCrColorspace: case Rec601YCbCrColorspace: case Rec709YCbCrColorspace: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Colorspace: YCbCr"); break; } default: break; } switch (image->colorspace) { case CMYKColorspace: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Colorspace: CMYK"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Sampling factors: %dx%d,%dx%d,%dx%d,%dx%d", jpeg_info.comp_info[0].h_samp_factor, jpeg_info.comp_info[0].v_samp_factor, jpeg_info.comp_info[1].h_samp_factor, jpeg_info.comp_info[1].v_samp_factor, jpeg_info.comp_info[2].h_samp_factor, jpeg_info.comp_info[2].v_samp_factor, jpeg_info.comp_info[3].h_samp_factor, jpeg_info.comp_info[3].v_samp_factor); break; } case GRAYColorspace: case Rec601LumaColorspace: case Rec709LumaColorspace: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Colorspace: GRAY"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Sampling factors: %dx%d",jpeg_info.comp_info[0].h_samp_factor, jpeg_info.comp_info[0].v_samp_factor); break; } case sRGBColorspace: case RGBColorspace: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Image colorspace is RGB"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Sampling factors: %dx%d,%dx%d,%dx%d", jpeg_info.comp_info[0].h_samp_factor, jpeg_info.comp_info[0].v_samp_factor, jpeg_info.comp_info[1].h_samp_factor, jpeg_info.comp_info[1].v_samp_factor, jpeg_info.comp_info[2].h_samp_factor, jpeg_info.comp_info[2].v_samp_factor); break; } case YCbCrColorspace: case Rec601YCbCrColorspace: case Rec709YCbCrColorspace: { (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Colorspace: YCbCr"); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Sampling factors: %dx%d,%dx%d,%dx%d", jpeg_info.comp_info[0].h_samp_factor, jpeg_info.comp_info[0].v_samp_factor, jpeg_info.comp_info[1].h_samp_factor, jpeg_info.comp_info[1].v_samp_factor, jpeg_info.comp_info[2].h_samp_factor, jpeg_info.comp_info[2].v_samp_factor); break; } default: { (void) LogMagickEvent(CoderEvent,GetMagickModule(),"Colorspace: %d", image->colorspace); (void) LogMagickEvent(CoderEvent,GetMagickModule(), "Sampling factors: %dx%d,%dx%d,%dx%d,%dx%d", jpeg_info.comp_info[0].h_samp_factor, jpeg_info.comp_info[0].v_samp_factor, jpeg_info.comp_info[1].h_samp_factor, jpeg_info.comp_info[1].v_samp_factor, jpeg_info.comp_info[2].h_samp_factor, jpeg_info.comp_info[2].v_samp_factor, jpeg_info.comp_info[3].h_samp_factor, jpeg_info.comp_info[3].v_samp_factor); break; } } } /* Write JPEG profiles. */ value=GetImageProperty(image,"comment"); if (value != (char *) NULL) for (i=0; i < (ssize_t) strlen(value); i+=65533L) jpeg_write_marker(&jpeg_info,JPEG_COM,(unsigned char *) value+i, (unsigned int) MagickMin((size_t) strlen(value+i),65533L)); if (image->profiles != (void *) NULL) WriteProfile(&jpeg_info,image); /* Convert MIFF to JPEG raster pixels. */ memory_info=AcquireVirtualMemory((size_t) image->columns, jpeg_info.input_components*sizeof(*jpeg_pixels)); if (memory_info == (MemoryInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); jpeg_pixels=(JSAMPLE *) GetVirtualMemoryBlob(memory_info); if (setjmp(error_manager.error_recovery) != 0) { jpeg_destroy_compress(&jpeg_info); if (memory_info != (MemoryInfo *) NULL) memory_info=RelinquishVirtualMemory(memory_info); (void) CloseBlob(image); return(MagickFalse); } scanline[0]=(JSAMPROW) jpeg_pixels; scale=65535/(unsigned short) GetQuantumRange((size_t) jpeg_info.data_precision); if (scale == 0) scale=1; if (jpeg_info.data_precision <= 8) { if ((jpeg_info.in_color_space == JCS_RGB) || (jpeg_info.in_color_space == JCS_YCbCr)) for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; q=jpeg_pixels; for (x=0; x < (ssize_t) image->columns; x++) { *q++=(JSAMPLE) ScaleQuantumToChar(GetPixelRed(p)); *q++=(JSAMPLE) ScaleQuantumToChar(GetPixelGreen(p)); *q++=(JSAMPLE) ScaleQuantumToChar(GetPixelBlue(p)); p++; } (void) jpeg_write_scanlines(&jpeg_info,scanline,1); status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } else if (jpeg_info.in_color_space == JCS_GRAYSCALE) for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; q=jpeg_pixels; for (x=0; x < (ssize_t) image->columns; x++) { *q++=(JSAMPLE) ScaleQuantumToChar(ClampToQuantum( GetPixelLuma(image,p))); p++; } (void) jpeg_write_scanlines(&jpeg_info,scanline,1); status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } else for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; q=jpeg_pixels; indexes=GetVirtualIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { /* Convert DirectClass packets to contiguous CMYK scanlines. */ *q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange- GetPixelCyan(p)))); *q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange- GetPixelMagenta(p)))); *q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange- GetPixelYellow(p)))); *q++=(JSAMPLE) (ScaleQuantumToChar((Quantum) (QuantumRange- GetPixelBlack(indexes+x)))); p++; } (void) jpeg_write_scanlines(&jpeg_info,scanline,1); status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } else if (jpeg_info.in_color_space == JCS_GRAYSCALE) for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; q=jpeg_pixels; for (x=0; x < (ssize_t) image->columns; x++) { *q++=(JSAMPLE) (ScaleQuantumToShort(ClampToQuantum( GetPixelLuma(image,p)))/scale); p++; } (void) jpeg_write_scanlines(&jpeg_info,scanline,1); status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } else if ((jpeg_info.in_color_space == JCS_RGB) || (jpeg_info.in_color_space == JCS_YCbCr)) for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; q=jpeg_pixels; for (x=0; x < (ssize_t) image->columns; x++) { *q++=(JSAMPLE) (ScaleQuantumToShort(GetPixelRed(p))/scale); *q++=(JSAMPLE) (ScaleQuantumToShort(GetPixelGreen(p))/scale); *q++=(JSAMPLE) (ScaleQuantumToShort(GetPixelBlue(p))/scale); p++; } (void) jpeg_write_scanlines(&jpeg_info,scanline,1); status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } else for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t x; p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; q=jpeg_pixels; indexes=GetVirtualIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { /* Convert DirectClass packets to contiguous CMYK scanlines. */ *q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange-GetPixelRed(p))/ scale); *q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange-GetPixelGreen(p))/ scale); *q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange-GetPixelBlue(p))/ scale); *q++=(JSAMPLE) (ScaleQuantumToShort(QuantumRange- GetPixelIndex(indexes+x))/scale); p++; } (void) jpeg_write_scanlines(&jpeg_info,scanline,1); status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } if (y == (ssize_t) image->rows) jpeg_finish_compress(&jpeg_info); /* Relinquish resources. */ jpeg_destroy_compress(&jpeg_info); memory_info=RelinquishVirtualMemory(memory_info); (void) CloseBlob(image); return(MagickTrue); } #endif
./CrossVul/dataset_final_sorted/CWE-20/c/bad_2578_0
crossvul-cpp_data_bad_2024_0
/* Copyright (C) 2009 Red Hat, Inc. * Author: Michael S. Tsirkin <mst@redhat.com> * * This work is licensed under the terms of the GNU GPL, version 2. * * virtio-net server in host kernel. */ #include <linux/compat.h> #include <linux/eventfd.h> #include <linux/vhost.h> #include <linux/virtio_net.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/mutex.h> #include <linux/workqueue.h> #include <linux/file.h> #include <linux/slab.h> #include <linux/net.h> #include <linux/if_packet.h> #include <linux/if_arp.h> #include <linux/if_tun.h> #include <linux/if_macvlan.h> #include <linux/if_vlan.h> #include <net/sock.h> #include "vhost.h" static int experimental_zcopytx = 1; module_param(experimental_zcopytx, int, 0444); MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;" " 1 -Enable; 0 - Disable"); /* Max number of bytes transferred before requeueing the job. * Using this limit prevents one virtqueue from starving others. */ #define VHOST_NET_WEIGHT 0x80000 /* MAX number of TX used buffers for outstanding zerocopy */ #define VHOST_MAX_PEND 128 #define VHOST_GOODCOPY_LEN 256 /* * For transmit, used buffer len is unused; we override it to track buffer * status internally; used for zerocopy tx only. */ /* Lower device DMA failed */ #define VHOST_DMA_FAILED_LEN 3 /* Lower device DMA done */ #define VHOST_DMA_DONE_LEN 2 /* Lower device DMA in progress */ #define VHOST_DMA_IN_PROGRESS 1 /* Buffer unused */ #define VHOST_DMA_CLEAR_LEN 0 #define VHOST_DMA_IS_DONE(len) ((len) >= VHOST_DMA_DONE_LEN) enum { VHOST_NET_FEATURES = VHOST_FEATURES | (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) | (1ULL << VIRTIO_NET_F_MRG_RXBUF), }; enum { VHOST_NET_VQ_RX = 0, VHOST_NET_VQ_TX = 1, VHOST_NET_VQ_MAX = 2, }; struct vhost_net_ubuf_ref { /* refcount follows semantics similar to kref: * 0: object is released * 1: no outstanding ubufs * >1: outstanding ubufs */ atomic_t refcount; wait_queue_head_t wait; struct vhost_virtqueue *vq; }; struct vhost_net_virtqueue { struct vhost_virtqueue vq; /* hdr is used to store the virtio header. * Since each iovec has >= 1 byte length, we never need more than * header length entries to store the header. */ struct iovec hdr[sizeof(struct virtio_net_hdr_mrg_rxbuf)]; size_t vhost_hlen; size_t sock_hlen; /* vhost zerocopy support fields below: */ /* last used idx for outstanding DMA zerocopy buffers */ int upend_idx; /* first used idx for DMA done zerocopy buffers */ int done_idx; /* an array of userspace buffers info */ struct ubuf_info *ubuf_info; /* Reference counting for outstanding ubufs. * Protected by vq mutex. Writers must also take device mutex. */ struct vhost_net_ubuf_ref *ubufs; }; struct vhost_net { struct vhost_dev dev; struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX]; struct vhost_poll poll[VHOST_NET_VQ_MAX]; /* Number of TX recently submitted. * Protected by tx vq lock. */ unsigned tx_packets; /* Number of times zerocopy TX recently failed. * Protected by tx vq lock. */ unsigned tx_zcopy_err; /* Flush in progress. Protected by tx vq lock. */ bool tx_flush; }; static unsigned vhost_net_zcopy_mask __read_mostly; static void vhost_net_enable_zcopy(int vq) { vhost_net_zcopy_mask |= 0x1 << vq; } static struct vhost_net_ubuf_ref * vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy) { struct vhost_net_ubuf_ref *ubufs; /* No zero copy backend? Nothing to count. */ if (!zcopy) return NULL; ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL); if (!ubufs) return ERR_PTR(-ENOMEM); atomic_set(&ubufs->refcount, 1); init_waitqueue_head(&ubufs->wait); ubufs->vq = vq; return ubufs; } static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) { int r = atomic_sub_return(1, &ubufs->refcount); if (unlikely(!r)) wake_up(&ubufs->wait); return r; } static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs) { vhost_net_ubuf_put(ubufs); wait_event(ubufs->wait, !atomic_read(&ubufs->refcount)); } static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs) { vhost_net_ubuf_put_and_wait(ubufs); kfree(ubufs); } static void vhost_net_clear_ubuf_info(struct vhost_net *n) { int i; for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { kfree(n->vqs[i].ubuf_info); n->vqs[i].ubuf_info = NULL; } } static int vhost_net_set_ubuf_info(struct vhost_net *n) { bool zcopy; int i; for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { zcopy = vhost_net_zcopy_mask & (0x1 << i); if (!zcopy) continue; n->vqs[i].ubuf_info = kmalloc(sizeof(*n->vqs[i].ubuf_info) * UIO_MAXIOV, GFP_KERNEL); if (!n->vqs[i].ubuf_info) goto err; } return 0; err: vhost_net_clear_ubuf_info(n); return -ENOMEM; } static void vhost_net_vq_reset(struct vhost_net *n) { int i; vhost_net_clear_ubuf_info(n); for (i = 0; i < VHOST_NET_VQ_MAX; i++) { n->vqs[i].done_idx = 0; n->vqs[i].upend_idx = 0; n->vqs[i].ubufs = NULL; n->vqs[i].vhost_hlen = 0; n->vqs[i].sock_hlen = 0; } } static void vhost_net_tx_packet(struct vhost_net *net) { ++net->tx_packets; if (net->tx_packets < 1024) return; net->tx_packets = 0; net->tx_zcopy_err = 0; } static void vhost_net_tx_err(struct vhost_net *net) { ++net->tx_zcopy_err; } static bool vhost_net_tx_select_zcopy(struct vhost_net *net) { /* TX flush waits for outstanding DMAs to be done. * Don't start new DMAs. */ return !net->tx_flush && net->tx_packets / 64 >= net->tx_zcopy_err; } static bool vhost_sock_zcopy(struct socket *sock) { return unlikely(experimental_zcopytx) && sock_flag(sock->sk, SOCK_ZEROCOPY); } /* Pop first len bytes from iovec. Return number of segments used. */ static int move_iovec_hdr(struct iovec *from, struct iovec *to, size_t len, int iov_count) { int seg = 0; size_t size; while (len && seg < iov_count) { size = min(from->iov_len, len); to->iov_base = from->iov_base; to->iov_len = size; from->iov_len -= size; from->iov_base += size; len -= size; ++from; ++to; ++seg; } return seg; } /* Copy iovec entries for len bytes from iovec. */ static void copy_iovec_hdr(const struct iovec *from, struct iovec *to, size_t len, int iovcount) { int seg = 0; size_t size; while (len && seg < iovcount) { size = min(from->iov_len, len); to->iov_base = from->iov_base; to->iov_len = size; len -= size; ++from; ++to; ++seg; } } /* In case of DMA done not in order in lower device driver for some reason. * upend_idx is used to track end of used idx, done_idx is used to track head * of used idx. Once lower device DMA done contiguously, we will signal KVM * guest used idx. */ static void vhost_zerocopy_signal_used(struct vhost_net *net, struct vhost_virtqueue *vq) { struct vhost_net_virtqueue *nvq = container_of(vq, struct vhost_net_virtqueue, vq); int i, add; int j = 0; for (i = nvq->done_idx; i != nvq->upend_idx; i = (i + 1) % UIO_MAXIOV) { if (vq->heads[i].len == VHOST_DMA_FAILED_LEN) vhost_net_tx_err(net); if (VHOST_DMA_IS_DONE(vq->heads[i].len)) { vq->heads[i].len = VHOST_DMA_CLEAR_LEN; ++j; } else break; } while (j) { add = min(UIO_MAXIOV - nvq->done_idx, j); vhost_add_used_and_signal_n(vq->dev, vq, &vq->heads[nvq->done_idx], add); nvq->done_idx = (nvq->done_idx + add) % UIO_MAXIOV; j -= add; } } static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success) { struct vhost_net_ubuf_ref *ubufs = ubuf->ctx; struct vhost_virtqueue *vq = ubufs->vq; int cnt; rcu_read_lock_bh(); /* set len to mark this desc buffers done DMA */ vq->heads[ubuf->desc].len = success ? VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN; cnt = vhost_net_ubuf_put(ubufs); /* * Trigger polling thread if guest stopped submitting new buffers: * in this case, the refcount after decrement will eventually reach 1. * We also trigger polling periodically after each 16 packets * (the value 16 here is more or less arbitrary, it's tuned to trigger * less than 10% of times). */ if (cnt <= 1 || !(cnt % 16)) vhost_poll_queue(&vq->poll); rcu_read_unlock_bh(); } /* Expects to be always run from workqueue - which acts as * read-size critical section for our kind of RCU. */ static void handle_tx(struct vhost_net *net) { struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; struct vhost_virtqueue *vq = &nvq->vq; unsigned out, in, s; int head; struct msghdr msg = { .msg_name = NULL, .msg_namelen = 0, .msg_control = NULL, .msg_controllen = 0, .msg_iov = vq->iov, .msg_flags = MSG_DONTWAIT, }; size_t len, total_len = 0; int err; size_t hdr_size; struct socket *sock; struct vhost_net_ubuf_ref *uninitialized_var(ubufs); bool zcopy, zcopy_used; mutex_lock(&vq->mutex); sock = vq->private_data; if (!sock) goto out; vhost_disable_notify(&net->dev, vq); hdr_size = nvq->vhost_hlen; zcopy = nvq->ubufs; for (;;) { /* Release DMAs done buffers first */ if (zcopy) vhost_zerocopy_signal_used(net, vq); /* If more outstanding DMAs, queue the work. * Handle upend_idx wrap around */ if (unlikely((nvq->upend_idx + vq->num - VHOST_MAX_PEND) % UIO_MAXIOV == nvq->done_idx)) break; head = vhost_get_vq_desc(&net->dev, vq, vq->iov, ARRAY_SIZE(vq->iov), &out, &in, NULL, NULL); /* On error, stop handling until the next kick. */ if (unlikely(head < 0)) break; /* Nothing new? Wait for eventfd to tell us they refilled. */ if (head == vq->num) { if (unlikely(vhost_enable_notify(&net->dev, vq))) { vhost_disable_notify(&net->dev, vq); continue; } break; } if (in) { vq_err(vq, "Unexpected descriptor format for TX: " "out %d, int %d\n", out, in); break; } /* Skip header. TODO: support TSO. */ s = move_iovec_hdr(vq->iov, nvq->hdr, hdr_size, out); msg.msg_iovlen = out; len = iov_length(vq->iov, out); /* Sanity check */ if (!len) { vq_err(vq, "Unexpected header len for TX: " "%zd expected %zd\n", iov_length(nvq->hdr, s), hdr_size); break; } zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN && (nvq->upend_idx + 1) % UIO_MAXIOV != nvq->done_idx && vhost_net_tx_select_zcopy(net); /* use msg_control to pass vhost zerocopy ubuf info to skb */ if (zcopy_used) { struct ubuf_info *ubuf; ubuf = nvq->ubuf_info + nvq->upend_idx; vq->heads[nvq->upend_idx].id = head; vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS; ubuf->callback = vhost_zerocopy_callback; ubuf->ctx = nvq->ubufs; ubuf->desc = nvq->upend_idx; msg.msg_control = ubuf; msg.msg_controllen = sizeof(ubuf); ubufs = nvq->ubufs; atomic_inc(&ubufs->refcount); nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV; } else { msg.msg_control = NULL; ubufs = NULL; } /* TODO: Check specific error and bomb out unless ENOBUFS? */ err = sock->ops->sendmsg(NULL, sock, &msg, len); if (unlikely(err < 0)) { if (zcopy_used) { vhost_net_ubuf_put(ubufs); nvq->upend_idx = ((unsigned)nvq->upend_idx - 1) % UIO_MAXIOV; } vhost_discard_vq_desc(vq, 1); break; } if (err != len) pr_debug("Truncated TX packet: " " len %d != %zd\n", err, len); if (!zcopy_used) vhost_add_used_and_signal(&net->dev, vq, head, 0); else vhost_zerocopy_signal_used(net, vq); total_len += len; vhost_net_tx_packet(net); if (unlikely(total_len >= VHOST_NET_WEIGHT)) { vhost_poll_queue(&vq->poll); break; } } out: mutex_unlock(&vq->mutex); } static int peek_head_len(struct sock *sk) { struct sk_buff *head; int len = 0; unsigned long flags; spin_lock_irqsave(&sk->sk_receive_queue.lock, flags); head = skb_peek(&sk->sk_receive_queue); if (likely(head)) { len = head->len; if (vlan_tx_tag_present(head)) len += VLAN_HLEN; } spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags); return len; } /* This is a multi-buffer version of vhost_get_desc, that works if * vq has read descriptors only. * @vq - the relevant virtqueue * @datalen - data length we'll be reading * @iovcount - returned count of io vectors we fill * @log - vhost log * @log_num - log offset * @quota - headcount quota, 1 for big buffer * returns number of buffer heads allocated, negative on error */ static int get_rx_bufs(struct vhost_virtqueue *vq, struct vring_used_elem *heads, int datalen, unsigned *iovcount, struct vhost_log *log, unsigned *log_num, unsigned int quota) { unsigned int out, in; int seg = 0; int headcount = 0; unsigned d; int r, nlogs = 0; while (datalen > 0 && headcount < quota) { if (unlikely(seg >= UIO_MAXIOV)) { r = -ENOBUFS; goto err; } d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg, ARRAY_SIZE(vq->iov) - seg, &out, &in, log, log_num); if (d == vq->num) { r = 0; goto err; } if (unlikely(out || in <= 0)) { vq_err(vq, "unexpected descriptor format for RX: " "out %d, in %d\n", out, in); r = -EINVAL; goto err; } if (unlikely(log)) { nlogs += *log_num; log += *log_num; } heads[headcount].id = d; heads[headcount].len = iov_length(vq->iov + seg, in); datalen -= heads[headcount].len; ++headcount; seg += in; } heads[headcount - 1].len += datalen; *iovcount = seg; if (unlikely(log)) *log_num = nlogs; return headcount; err: vhost_discard_vq_desc(vq, headcount); return r; } /* Expects to be always run from workqueue - which acts as * read-size critical section for our kind of RCU. */ static void handle_rx(struct vhost_net *net) { struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX]; struct vhost_virtqueue *vq = &nvq->vq; unsigned uninitialized_var(in), log; struct vhost_log *vq_log; struct msghdr msg = { .msg_name = NULL, .msg_namelen = 0, .msg_control = NULL, /* FIXME: get and handle RX aux data. */ .msg_controllen = 0, .msg_iov = vq->iov, .msg_flags = MSG_DONTWAIT, }; struct virtio_net_hdr_mrg_rxbuf hdr = { .hdr.flags = 0, .hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE }; size_t total_len = 0; int err, mergeable; s16 headcount; size_t vhost_hlen, sock_hlen; size_t vhost_len, sock_len; struct socket *sock; mutex_lock(&vq->mutex); sock = vq->private_data; if (!sock) goto out; vhost_disable_notify(&net->dev, vq); vhost_hlen = nvq->vhost_hlen; sock_hlen = nvq->sock_hlen; vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ? vq->log : NULL; mergeable = vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF); while ((sock_len = peek_head_len(sock->sk))) { sock_len += sock_hlen; vhost_len = sock_len + vhost_hlen; headcount = get_rx_bufs(vq, vq->heads, vhost_len, &in, vq_log, &log, likely(mergeable) ? UIO_MAXIOV : 1); /* On error, stop handling until the next kick. */ if (unlikely(headcount < 0)) break; /* OK, now we need to know about added descriptors. */ if (!headcount) { if (unlikely(vhost_enable_notify(&net->dev, vq))) { /* They have slipped one in as we were * doing that: check again. */ vhost_disable_notify(&net->dev, vq); continue; } /* Nothing new? Wait for eventfd to tell us * they refilled. */ break; } /* We don't need to be notified again. */ if (unlikely((vhost_hlen))) /* Skip header. TODO: support TSO. */ move_iovec_hdr(vq->iov, nvq->hdr, vhost_hlen, in); else /* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF: * needed because recvmsg can modify msg_iov. */ copy_iovec_hdr(vq->iov, nvq->hdr, sock_hlen, in); msg.msg_iovlen = in; err = sock->ops->recvmsg(NULL, sock, &msg, sock_len, MSG_DONTWAIT | MSG_TRUNC); /* Userspace might have consumed the packet meanwhile: * it's not supposed to do this usually, but might be hard * to prevent. Discard data we got (if any) and keep going. */ if (unlikely(err != sock_len)) { pr_debug("Discarded rx packet: " " len %d, expected %zd\n", err, sock_len); vhost_discard_vq_desc(vq, headcount); continue; } if (unlikely(vhost_hlen) && memcpy_toiovecend(nvq->hdr, (unsigned char *)&hdr, 0, vhost_hlen)) { vq_err(vq, "Unable to write vnet_hdr at addr %p\n", vq->iov->iov_base); break; } /* TODO: Should check and handle checksum. */ if (likely(mergeable) && memcpy_toiovecend(nvq->hdr, (unsigned char *)&headcount, offsetof(typeof(hdr), num_buffers), sizeof hdr.num_buffers)) { vq_err(vq, "Failed num_buffers write"); vhost_discard_vq_desc(vq, headcount); break; } vhost_add_used_and_signal_n(&net->dev, vq, vq->heads, headcount); if (unlikely(vq_log)) vhost_log_write(vq, vq_log, log, vhost_len); total_len += vhost_len; if (unlikely(total_len >= VHOST_NET_WEIGHT)) { vhost_poll_queue(&vq->poll); break; } } out: mutex_unlock(&vq->mutex); } static void handle_tx_kick(struct vhost_work *work) { struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, poll.work); struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); handle_tx(net); } static void handle_rx_kick(struct vhost_work *work) { struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, poll.work); struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); handle_rx(net); } static void handle_tx_net(struct vhost_work *work) { struct vhost_net *net = container_of(work, struct vhost_net, poll[VHOST_NET_VQ_TX].work); handle_tx(net); } static void handle_rx_net(struct vhost_work *work) { struct vhost_net *net = container_of(work, struct vhost_net, poll[VHOST_NET_VQ_RX].work); handle_rx(net); } static int vhost_net_open(struct inode *inode, struct file *f) { struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL); struct vhost_dev *dev; struct vhost_virtqueue **vqs; int i; if (!n) return -ENOMEM; vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL); if (!vqs) { kfree(n); return -ENOMEM; } dev = &n->dev; vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq; vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq; n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick; n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick; for (i = 0; i < VHOST_NET_VQ_MAX; i++) { n->vqs[i].ubufs = NULL; n->vqs[i].ubuf_info = NULL; n->vqs[i].upend_idx = 0; n->vqs[i].done_idx = 0; n->vqs[i].vhost_hlen = 0; n->vqs[i].sock_hlen = 0; } vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX); vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev); vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev); f->private_data = n; return 0; } static void vhost_net_disable_vq(struct vhost_net *n, struct vhost_virtqueue *vq) { struct vhost_net_virtqueue *nvq = container_of(vq, struct vhost_net_virtqueue, vq); struct vhost_poll *poll = n->poll + (nvq - n->vqs); if (!vq->private_data) return; vhost_poll_stop(poll); } static int vhost_net_enable_vq(struct vhost_net *n, struct vhost_virtqueue *vq) { struct vhost_net_virtqueue *nvq = container_of(vq, struct vhost_net_virtqueue, vq); struct vhost_poll *poll = n->poll + (nvq - n->vqs); struct socket *sock; sock = vq->private_data; if (!sock) return 0; return vhost_poll_start(poll, sock->file); } static struct socket *vhost_net_stop_vq(struct vhost_net *n, struct vhost_virtqueue *vq) { struct socket *sock; mutex_lock(&vq->mutex); sock = vq->private_data; vhost_net_disable_vq(n, vq); vq->private_data = NULL; mutex_unlock(&vq->mutex); return sock; } static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock, struct socket **rx_sock) { *tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq); *rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq); } static void vhost_net_flush_vq(struct vhost_net *n, int index) { vhost_poll_flush(n->poll + index); vhost_poll_flush(&n->vqs[index].vq.poll); } static void vhost_net_flush(struct vhost_net *n) { vhost_net_flush_vq(n, VHOST_NET_VQ_TX); vhost_net_flush_vq(n, VHOST_NET_VQ_RX); if (n->vqs[VHOST_NET_VQ_TX].ubufs) { mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); n->tx_flush = true; mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); /* Wait for all lower device DMAs done. */ vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs); mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); n->tx_flush = false; atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1); mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); } } static int vhost_net_release(struct inode *inode, struct file *f) { struct vhost_net *n = f->private_data; struct socket *tx_sock; struct socket *rx_sock; vhost_net_stop(n, &tx_sock, &rx_sock); vhost_net_flush(n); vhost_dev_stop(&n->dev); vhost_dev_cleanup(&n->dev, false); vhost_net_vq_reset(n); if (tx_sock) fput(tx_sock->file); if (rx_sock) fput(rx_sock->file); /* Make sure no callbacks are outstanding */ synchronize_rcu_bh(); /* We do an extra flush before freeing memory, * since jobs can re-queue themselves. */ vhost_net_flush(n); kfree(n->dev.vqs); kfree(n); return 0; } static struct socket *get_raw_socket(int fd) { struct { struct sockaddr_ll sa; char buf[MAX_ADDR_LEN]; } uaddr; int uaddr_len = sizeof uaddr, r; struct socket *sock = sockfd_lookup(fd, &r); if (!sock) return ERR_PTR(-ENOTSOCK); /* Parameter checking */ if (sock->sk->sk_type != SOCK_RAW) { r = -ESOCKTNOSUPPORT; goto err; } r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa, &uaddr_len, 0); if (r) goto err; if (uaddr.sa.sll_family != AF_PACKET) { r = -EPFNOSUPPORT; goto err; } return sock; err: fput(sock->file); return ERR_PTR(r); } static struct socket *get_tap_socket(int fd) { struct file *file = fget(fd); struct socket *sock; if (!file) return ERR_PTR(-EBADF); sock = tun_get_socket(file); if (!IS_ERR(sock)) return sock; sock = macvtap_get_socket(file); if (IS_ERR(sock)) fput(file); return sock; } static struct socket *get_socket(int fd) { struct socket *sock; /* special case to disable backend */ if (fd == -1) return NULL; sock = get_raw_socket(fd); if (!IS_ERR(sock)) return sock; sock = get_tap_socket(fd); if (!IS_ERR(sock)) return sock; return ERR_PTR(-ENOTSOCK); } static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) { struct socket *sock, *oldsock; struct vhost_virtqueue *vq; struct vhost_net_virtqueue *nvq; struct vhost_net_ubuf_ref *ubufs, *oldubufs = NULL; int r; mutex_lock(&n->dev.mutex); r = vhost_dev_check_owner(&n->dev); if (r) goto err; if (index >= VHOST_NET_VQ_MAX) { r = -ENOBUFS; goto err; } vq = &n->vqs[index].vq; nvq = &n->vqs[index]; mutex_lock(&vq->mutex); /* Verify that ring has been setup correctly. */ if (!vhost_vq_access_ok(vq)) { r = -EFAULT; goto err_vq; } sock = get_socket(fd); if (IS_ERR(sock)) { r = PTR_ERR(sock); goto err_vq; } /* start polling new socket */ oldsock = vq->private_data; if (sock != oldsock) { ubufs = vhost_net_ubuf_alloc(vq, sock && vhost_sock_zcopy(sock)); if (IS_ERR(ubufs)) { r = PTR_ERR(ubufs); goto err_ubufs; } vhost_net_disable_vq(n, vq); vq->private_data = sock; r = vhost_init_used(vq); if (r) goto err_used; r = vhost_net_enable_vq(n, vq); if (r) goto err_used; oldubufs = nvq->ubufs; nvq->ubufs = ubufs; n->tx_packets = 0; n->tx_zcopy_err = 0; n->tx_flush = false; } mutex_unlock(&vq->mutex); if (oldubufs) { vhost_net_ubuf_put_wait_and_free(oldubufs); mutex_lock(&vq->mutex); vhost_zerocopy_signal_used(n, vq); mutex_unlock(&vq->mutex); } if (oldsock) { vhost_net_flush_vq(n, index); fput(oldsock->file); } mutex_unlock(&n->dev.mutex); return 0; err_used: vq->private_data = oldsock; vhost_net_enable_vq(n, vq); if (ubufs) vhost_net_ubuf_put_wait_and_free(ubufs); err_ubufs: fput(sock->file); err_vq: mutex_unlock(&vq->mutex); err: mutex_unlock(&n->dev.mutex); return r; } static long vhost_net_reset_owner(struct vhost_net *n) { struct socket *tx_sock = NULL; struct socket *rx_sock = NULL; long err; struct vhost_memory *memory; mutex_lock(&n->dev.mutex); err = vhost_dev_check_owner(&n->dev); if (err) goto done; memory = vhost_dev_reset_owner_prepare(); if (!memory) { err = -ENOMEM; goto done; } vhost_net_stop(n, &tx_sock, &rx_sock); vhost_net_flush(n); vhost_dev_reset_owner(&n->dev, memory); vhost_net_vq_reset(n); done: mutex_unlock(&n->dev.mutex); if (tx_sock) fput(tx_sock->file); if (rx_sock) fput(rx_sock->file); return err; } static int vhost_net_set_features(struct vhost_net *n, u64 features) { size_t vhost_hlen, sock_hlen, hdr_len; int i; hdr_len = (features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct virtio_net_hdr); if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) { /* vhost provides vnet_hdr */ vhost_hlen = hdr_len; sock_hlen = 0; } else { /* socket provides vnet_hdr */ vhost_hlen = 0; sock_hlen = hdr_len; } mutex_lock(&n->dev.mutex); if ((features & (1 << VHOST_F_LOG_ALL)) && !vhost_log_access_ok(&n->dev)) { mutex_unlock(&n->dev.mutex); return -EFAULT; } n->dev.acked_features = features; smp_wmb(); for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { mutex_lock(&n->vqs[i].vq.mutex); n->vqs[i].vhost_hlen = vhost_hlen; n->vqs[i].sock_hlen = sock_hlen; mutex_unlock(&n->vqs[i].vq.mutex); } vhost_net_flush(n); mutex_unlock(&n->dev.mutex); return 0; } static long vhost_net_set_owner(struct vhost_net *n) { int r; mutex_lock(&n->dev.mutex); if (vhost_dev_has_owner(&n->dev)) { r = -EBUSY; goto out; } r = vhost_net_set_ubuf_info(n); if (r) goto out; r = vhost_dev_set_owner(&n->dev); if (r) vhost_net_clear_ubuf_info(n); vhost_net_flush(n); out: mutex_unlock(&n->dev.mutex); return r; } static long vhost_net_ioctl(struct file *f, unsigned int ioctl, unsigned long arg) { struct vhost_net *n = f->private_data; void __user *argp = (void __user *)arg; u64 __user *featurep = argp; struct vhost_vring_file backend; u64 features; int r; switch (ioctl) { case VHOST_NET_SET_BACKEND: if (copy_from_user(&backend, argp, sizeof backend)) return -EFAULT; return vhost_net_set_backend(n, backend.index, backend.fd); case VHOST_GET_FEATURES: features = VHOST_NET_FEATURES; if (copy_to_user(featurep, &features, sizeof features)) return -EFAULT; return 0; case VHOST_SET_FEATURES: if (copy_from_user(&features, featurep, sizeof features)) return -EFAULT; if (features & ~VHOST_NET_FEATURES) return -EOPNOTSUPP; return vhost_net_set_features(n, features); case VHOST_RESET_OWNER: return vhost_net_reset_owner(n); case VHOST_SET_OWNER: return vhost_net_set_owner(n); default: mutex_lock(&n->dev.mutex); r = vhost_dev_ioctl(&n->dev, ioctl, argp); if (r == -ENOIOCTLCMD) r = vhost_vring_ioctl(&n->dev, ioctl, argp); else vhost_net_flush(n); mutex_unlock(&n->dev.mutex); return r; } } #ifdef CONFIG_COMPAT static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl, unsigned long arg) { return vhost_net_ioctl(f, ioctl, (unsigned long)compat_ptr(arg)); } #endif static const struct file_operations vhost_net_fops = { .owner = THIS_MODULE, .release = vhost_net_release, .unlocked_ioctl = vhost_net_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = vhost_net_compat_ioctl, #endif .open = vhost_net_open, .llseek = noop_llseek, }; static struct miscdevice vhost_net_misc = { .minor = VHOST_NET_MINOR, .name = "vhost-net", .fops = &vhost_net_fops, }; static int vhost_net_init(void) { if (experimental_zcopytx) vhost_net_enable_zcopy(VHOST_NET_VQ_TX); return misc_register(&vhost_net_misc); } module_init(vhost_net_init); static void vhost_net_exit(void) { misc_deregister(&vhost_net_misc); } module_exit(vhost_net_exit); MODULE_VERSION("0.0.1"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Michael S. Tsirkin"); MODULE_DESCRIPTION("Host kernel accelerator for virtio net"); MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR); MODULE_ALIAS("devname:vhost-net");
./CrossVul/dataset_final_sorted/CWE-20/c/bad_2024_0
crossvul-cpp_data_good_5066_0
/*- * Copyright (c) 2003-2007 Tim Kientzle * Copyright (c) 2010-2012 Michihiro NAKAJIMA * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "archive_platform.h" __FBSDID("$FreeBSD: head/lib/libarchive/archive_read_support_format_cpio.c 201163 2009-12-29 05:50:34Z kientzle $"); #ifdef HAVE_ERRNO_H #include <errno.h> #endif /* #include <stdint.h> */ /* See archive_platform.h */ #ifdef HAVE_STDLIB_H #include <stdlib.h> #endif #ifdef HAVE_STRING_H #include <string.h> #endif #include "archive.h" #include "archive_entry.h" #include "archive_entry_locale.h" #include "archive_private.h" #include "archive_read_private.h" #define bin_magic_offset 0 #define bin_magic_size 2 #define bin_dev_offset 2 #define bin_dev_size 2 #define bin_ino_offset 4 #define bin_ino_size 2 #define bin_mode_offset 6 #define bin_mode_size 2 #define bin_uid_offset 8 #define bin_uid_size 2 #define bin_gid_offset 10 #define bin_gid_size 2 #define bin_nlink_offset 12 #define bin_nlink_size 2 #define bin_rdev_offset 14 #define bin_rdev_size 2 #define bin_mtime_offset 16 #define bin_mtime_size 4 #define bin_namesize_offset 20 #define bin_namesize_size 2 #define bin_filesize_offset 22 #define bin_filesize_size 4 #define bin_header_size 26 #define odc_magic_offset 0 #define odc_magic_size 6 #define odc_dev_offset 6 #define odc_dev_size 6 #define odc_ino_offset 12 #define odc_ino_size 6 #define odc_mode_offset 18 #define odc_mode_size 6 #define odc_uid_offset 24 #define odc_uid_size 6 #define odc_gid_offset 30 #define odc_gid_size 6 #define odc_nlink_offset 36 #define odc_nlink_size 6 #define odc_rdev_offset 42 #define odc_rdev_size 6 #define odc_mtime_offset 48 #define odc_mtime_size 11 #define odc_namesize_offset 59 #define odc_namesize_size 6 #define odc_filesize_offset 65 #define odc_filesize_size 11 #define odc_header_size 76 #define newc_magic_offset 0 #define newc_magic_size 6 #define newc_ino_offset 6 #define newc_ino_size 8 #define newc_mode_offset 14 #define newc_mode_size 8 #define newc_uid_offset 22 #define newc_uid_size 8 #define newc_gid_offset 30 #define newc_gid_size 8 #define newc_nlink_offset 38 #define newc_nlink_size 8 #define newc_mtime_offset 46 #define newc_mtime_size 8 #define newc_filesize_offset 54 #define newc_filesize_size 8 #define newc_devmajor_offset 62 #define newc_devmajor_size 8 #define newc_devminor_offset 70 #define newc_devminor_size 8 #define newc_rdevmajor_offset 78 #define newc_rdevmajor_size 8 #define newc_rdevminor_offset 86 #define newc_rdevminor_size 8 #define newc_namesize_offset 94 #define newc_namesize_size 8 #define newc_checksum_offset 102 #define newc_checksum_size 8 #define newc_header_size 110 /* * An afio large ASCII header, which they named itself. * afio utility uses this header, if a file size is larger than 2G bytes * or inode/uid/gid is bigger than 65535(0xFFFF) or mtime is bigger than * 0x7fffffff, which we cannot record to odc header because of its limit. * If not, uses odc header. */ #define afiol_magic_offset 0 #define afiol_magic_size 6 #define afiol_dev_offset 6 #define afiol_dev_size 8 /* hex */ #define afiol_ino_offset 14 #define afiol_ino_size 16 /* hex */ #define afiol_ino_m_offset 30 /* 'm' */ #define afiol_mode_offset 31 #define afiol_mode_size 6 /* oct */ #define afiol_uid_offset 37 #define afiol_uid_size 8 /* hex */ #define afiol_gid_offset 45 #define afiol_gid_size 8 /* hex */ #define afiol_nlink_offset 53 #define afiol_nlink_size 8 /* hex */ #define afiol_rdev_offset 61 #define afiol_rdev_size 8 /* hex */ #define afiol_mtime_offset 69 #define afiol_mtime_size 16 /* hex */ #define afiol_mtime_n_offset 85 /* 'n' */ #define afiol_namesize_offset 86 #define afiol_namesize_size 4 /* hex */ #define afiol_flag_offset 90 #define afiol_flag_size 4 /* hex */ #define afiol_xsize_offset 94 #define afiol_xsize_size 4 /* hex */ #define afiol_xsize_s_offset 98 /* 's' */ #define afiol_filesize_offset 99 #define afiol_filesize_size 16 /* hex */ #define afiol_filesize_c_offset 115 /* ':' */ #define afiol_header_size 116 struct links_entry { struct links_entry *next; struct links_entry *previous; int links; dev_t dev; int64_t ino; char *name; }; #define CPIO_MAGIC 0x13141516 struct cpio { int magic; int (*read_header)(struct archive_read *, struct cpio *, struct archive_entry *, size_t *, size_t *); struct links_entry *links_head; int64_t entry_bytes_remaining; int64_t entry_bytes_unconsumed; int64_t entry_offset; int64_t entry_padding; struct archive_string_conv *opt_sconv; struct archive_string_conv *sconv_default; int init_default_conversion; }; static int64_t atol16(const char *, unsigned); static int64_t atol8(const char *, unsigned); static int archive_read_format_cpio_bid(struct archive_read *, int); static int archive_read_format_cpio_options(struct archive_read *, const char *, const char *); static int archive_read_format_cpio_cleanup(struct archive_read *); static int archive_read_format_cpio_read_data(struct archive_read *, const void **, size_t *, int64_t *); static int archive_read_format_cpio_read_header(struct archive_read *, struct archive_entry *); static int archive_read_format_cpio_skip(struct archive_read *); static int64_t be4(const unsigned char *); static int find_odc_header(struct archive_read *); static int find_newc_header(struct archive_read *); static int header_bin_be(struct archive_read *, struct cpio *, struct archive_entry *, size_t *, size_t *); static int header_bin_le(struct archive_read *, struct cpio *, struct archive_entry *, size_t *, size_t *); static int header_newc(struct archive_read *, struct cpio *, struct archive_entry *, size_t *, size_t *); static int header_odc(struct archive_read *, struct cpio *, struct archive_entry *, size_t *, size_t *); static int header_afiol(struct archive_read *, struct cpio *, struct archive_entry *, size_t *, size_t *); static int is_octal(const char *, size_t); static int is_hex(const char *, size_t); static int64_t le4(const unsigned char *); static int record_hardlink(struct archive_read *a, struct cpio *cpio, struct archive_entry *entry); int archive_read_support_format_cpio(struct archive *_a) { struct archive_read *a = (struct archive_read *)_a; struct cpio *cpio; int r; archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW, "archive_read_support_format_cpio"); cpio = (struct cpio *)calloc(1, sizeof(*cpio)); if (cpio == NULL) { archive_set_error(&a->archive, ENOMEM, "Can't allocate cpio data"); return (ARCHIVE_FATAL); } cpio->magic = CPIO_MAGIC; r = __archive_read_register_format(a, cpio, "cpio", archive_read_format_cpio_bid, archive_read_format_cpio_options, archive_read_format_cpio_read_header, archive_read_format_cpio_read_data, archive_read_format_cpio_skip, NULL, archive_read_format_cpio_cleanup, NULL, NULL); if (r != ARCHIVE_OK) free(cpio); return (ARCHIVE_OK); } static int archive_read_format_cpio_bid(struct archive_read *a, int best_bid) { const unsigned char *p; struct cpio *cpio; int bid; (void)best_bid; /* UNUSED */ cpio = (struct cpio *)(a->format->data); if ((p = __archive_read_ahead(a, 6, NULL)) == NULL) return (-1); bid = 0; if (memcmp(p, "070707", 6) == 0) { /* ASCII cpio archive (odc, POSIX.1) */ cpio->read_header = header_odc; bid += 48; /* * XXX TODO: More verification; Could check that only octal * digits appear in appropriate header locations. XXX */ } else if (memcmp(p, "070727", 6) == 0) { /* afio large ASCII cpio archive */ cpio->read_header = header_odc; bid += 48; /* * XXX TODO: More verification; Could check that almost hex * digits appear in appropriate header locations. XXX */ } else if (memcmp(p, "070701", 6) == 0) { /* ASCII cpio archive (SVR4 without CRC) */ cpio->read_header = header_newc; bid += 48; /* * XXX TODO: More verification; Could check that only hex * digits appear in appropriate header locations. XXX */ } else if (memcmp(p, "070702", 6) == 0) { /* ASCII cpio archive (SVR4 with CRC) */ /* XXX TODO: Flag that we should check the CRC. XXX */ cpio->read_header = header_newc; bid += 48; /* * XXX TODO: More verification; Could check that only hex * digits appear in appropriate header locations. XXX */ } else if (p[0] * 256 + p[1] == 070707) { /* big-endian binary cpio archives */ cpio->read_header = header_bin_be; bid += 16; /* Is more verification possible here? */ } else if (p[0] + p[1] * 256 == 070707) { /* little-endian binary cpio archives */ cpio->read_header = header_bin_le; bid += 16; /* Is more verification possible here? */ } else return (ARCHIVE_WARN); return (bid); } static int archive_read_format_cpio_options(struct archive_read *a, const char *key, const char *val) { struct cpio *cpio; int ret = ARCHIVE_FAILED; cpio = (struct cpio *)(a->format->data); if (strcmp(key, "compat-2x") == 0) { /* Handle filnames as libarchive 2.x */ cpio->init_default_conversion = (val != NULL)?1:0; return (ARCHIVE_OK); } else if (strcmp(key, "hdrcharset") == 0) { if (val == NULL || val[0] == 0) archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "cpio: hdrcharset option needs a character-set name"); else { cpio->opt_sconv = archive_string_conversion_from_charset( &a->archive, val, 0); if (cpio->opt_sconv != NULL) ret = ARCHIVE_OK; else ret = ARCHIVE_FATAL; } return (ret); } /* Note: The "warn" return is just to inform the options * supervisor that we didn't handle it. It will generate * a suitable error if no one used this option. */ return (ARCHIVE_WARN); } static int archive_read_format_cpio_read_header(struct archive_read *a, struct archive_entry *entry) { struct cpio *cpio; const void *h; struct archive_string_conv *sconv; size_t namelength; size_t name_pad; int r; cpio = (struct cpio *)(a->format->data); sconv = cpio->opt_sconv; if (sconv == NULL) { if (!cpio->init_default_conversion) { cpio->sconv_default = archive_string_default_conversion_for_read( &(a->archive)); cpio->init_default_conversion = 1; } sconv = cpio->sconv_default; } r = (cpio->read_header(a, cpio, entry, &namelength, &name_pad)); if (r < ARCHIVE_WARN) return (r); /* Read name from buffer. */ h = __archive_read_ahead(a, namelength + name_pad, NULL); if (h == NULL) return (ARCHIVE_FATAL); if (archive_entry_copy_pathname_l(entry, (const char *)h, namelength, sconv) != 0) { if (errno == ENOMEM) { archive_set_error(&a->archive, ENOMEM, "Can't allocate memory for Pathname"); return (ARCHIVE_FATAL); } archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Pathname can't be converted from %s to current locale.", archive_string_conversion_charset_name(sconv)); r = ARCHIVE_WARN; } cpio->entry_offset = 0; __archive_read_consume(a, namelength + name_pad); /* If this is a symlink, read the link contents. */ if (archive_entry_filetype(entry) == AE_IFLNK) { if (cpio->entry_bytes_remaining > 1024 * 1024) { archive_set_error(&a->archive, ENOMEM, "Rejecting malformed cpio archive: symlink contents exceed 1 megabyte"); return (ARCHIVE_FATAL); } h = __archive_read_ahead(a, (size_t)cpio->entry_bytes_remaining, NULL); if (h == NULL) return (ARCHIVE_FATAL); if (archive_entry_copy_symlink_l(entry, (const char *)h, (size_t)cpio->entry_bytes_remaining, sconv) != 0) { if (errno == ENOMEM) { archive_set_error(&a->archive, ENOMEM, "Can't allocate memory for Linkname"); return (ARCHIVE_FATAL); } archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Linkname can't be converted from %s to " "current locale.", archive_string_conversion_charset_name(sconv)); r = ARCHIVE_WARN; } __archive_read_consume(a, cpio->entry_bytes_remaining); cpio->entry_bytes_remaining = 0; } /* XXX TODO: If the full mode is 0160200, then this is a Solaris * ACL description for the following entry. Read this body * and parse it as a Solaris-style ACL, then read the next * header. XXX */ /* Compare name to "TRAILER!!!" to test for end-of-archive. */ if (namelength == 11 && strcmp((const char *)h, "TRAILER!!!") == 0) { /* TODO: Store file location of start of block. */ archive_clear_error(&a->archive); return (ARCHIVE_EOF); } /* Detect and record hardlinks to previously-extracted entries. */ if (record_hardlink(a, cpio, entry) != ARCHIVE_OK) { return (ARCHIVE_FATAL); } return (r); } static int archive_read_format_cpio_read_data(struct archive_read *a, const void **buff, size_t *size, int64_t *offset) { ssize_t bytes_read; struct cpio *cpio; cpio = (struct cpio *)(a->format->data); if (cpio->entry_bytes_unconsumed) { __archive_read_consume(a, cpio->entry_bytes_unconsumed); cpio->entry_bytes_unconsumed = 0; } if (cpio->entry_bytes_remaining > 0) { *buff = __archive_read_ahead(a, 1, &bytes_read); if (bytes_read <= 0) return (ARCHIVE_FATAL); if (bytes_read > cpio->entry_bytes_remaining) bytes_read = (ssize_t)cpio->entry_bytes_remaining; *size = bytes_read; cpio->entry_bytes_unconsumed = bytes_read; *offset = cpio->entry_offset; cpio->entry_offset += bytes_read; cpio->entry_bytes_remaining -= bytes_read; return (ARCHIVE_OK); } else { if (cpio->entry_padding != __archive_read_consume(a, cpio->entry_padding)) { return (ARCHIVE_FATAL); } cpio->entry_padding = 0; *buff = NULL; *size = 0; *offset = cpio->entry_offset; return (ARCHIVE_EOF); } } static int archive_read_format_cpio_skip(struct archive_read *a) { struct cpio *cpio = (struct cpio *)(a->format->data); int64_t to_skip = cpio->entry_bytes_remaining + cpio->entry_padding + cpio->entry_bytes_unconsumed; if (to_skip != __archive_read_consume(a, to_skip)) { return (ARCHIVE_FATAL); } cpio->entry_bytes_remaining = 0; cpio->entry_padding = 0; cpio->entry_bytes_unconsumed = 0; return (ARCHIVE_OK); } /* * Skip forward to the next cpio newc header by searching for the * 07070[12] string. This should be generalized and merged with * find_odc_header below. */ static int is_hex(const char *p, size_t len) { while (len-- > 0) { if ((*p >= '0' && *p <= '9') || (*p >= 'a' && *p <= 'f') || (*p >= 'A' && *p <= 'F')) ++p; else return (0); } return (1); } static int find_newc_header(struct archive_read *a) { const void *h; const char *p, *q; size_t skip, skipped = 0; ssize_t bytes; for (;;) { h = __archive_read_ahead(a, newc_header_size, &bytes); if (h == NULL) return (ARCHIVE_FATAL); p = h; q = p + bytes; /* Try the typical case first, then go into the slow search.*/ if (memcmp("07070", p, 5) == 0 && (p[5] == '1' || p[5] == '2') && is_hex(p, newc_header_size)) return (ARCHIVE_OK); /* * Scan ahead until we find something that looks * like a newc header. */ while (p + newc_header_size <= q) { switch (p[5]) { case '1': case '2': if (memcmp("07070", p, 5) == 0 && is_hex(p, newc_header_size)) { skip = p - (const char *)h; __archive_read_consume(a, skip); skipped += skip; if (skipped > 0) { archive_set_error(&a->archive, 0, "Skipped %d bytes before " "finding valid header", (int)skipped); return (ARCHIVE_WARN); } return (ARCHIVE_OK); } p += 2; break; case '0': p++; break; default: p += 6; break; } } skip = p - (const char *)h; __archive_read_consume(a, skip); skipped += skip; } } static int header_newc(struct archive_read *a, struct cpio *cpio, struct archive_entry *entry, size_t *namelength, size_t *name_pad) { const void *h; const char *header; int r; r = find_newc_header(a); if (r < ARCHIVE_WARN) return (r); /* Read fixed-size portion of header. */ h = __archive_read_ahead(a, newc_header_size, NULL); if (h == NULL) return (ARCHIVE_FATAL); /* Parse out hex fields. */ header = (const char *)h; if (memcmp(header + newc_magic_offset, "070701", 6) == 0) { a->archive.archive_format = ARCHIVE_FORMAT_CPIO_SVR4_NOCRC; a->archive.archive_format_name = "ASCII cpio (SVR4 with no CRC)"; } else if (memcmp(header + newc_magic_offset, "070702", 6) == 0) { a->archive.archive_format = ARCHIVE_FORMAT_CPIO_SVR4_CRC; a->archive.archive_format_name = "ASCII cpio (SVR4 with CRC)"; } else { /* TODO: Abort here? */ } archive_entry_set_devmajor(entry, (dev_t)atol16(header + newc_devmajor_offset, newc_devmajor_size)); archive_entry_set_devminor(entry, (dev_t)atol16(header + newc_devminor_offset, newc_devminor_size)); archive_entry_set_ino(entry, atol16(header + newc_ino_offset, newc_ino_size)); archive_entry_set_mode(entry, (mode_t)atol16(header + newc_mode_offset, newc_mode_size)); archive_entry_set_uid(entry, atol16(header + newc_uid_offset, newc_uid_size)); archive_entry_set_gid(entry, atol16(header + newc_gid_offset, newc_gid_size)); archive_entry_set_nlink(entry, (unsigned int)atol16(header + newc_nlink_offset, newc_nlink_size)); archive_entry_set_rdevmajor(entry, (dev_t)atol16(header + newc_rdevmajor_offset, newc_rdevmajor_size)); archive_entry_set_rdevminor(entry, (dev_t)atol16(header + newc_rdevminor_offset, newc_rdevminor_size)); archive_entry_set_mtime(entry, atol16(header + newc_mtime_offset, newc_mtime_size), 0); *namelength = (size_t)atol16(header + newc_namesize_offset, newc_namesize_size); /* Pad name to 2 more than a multiple of 4. */ *name_pad = (2 - *namelength) & 3; /* * Note: entry_bytes_remaining is at least 64 bits and * therefore guaranteed to be big enough for a 33-bit file * size. */ cpio->entry_bytes_remaining = atol16(header + newc_filesize_offset, newc_filesize_size); archive_entry_set_size(entry, cpio->entry_bytes_remaining); /* Pad file contents to a multiple of 4. */ cpio->entry_padding = 3 & -cpio->entry_bytes_remaining; __archive_read_consume(a, newc_header_size); return (r); } /* * Skip forward to the next cpio odc header by searching for the * 070707 string. This is a hand-optimized search that could * probably be easily generalized to handle all character-based * cpio variants. */ static int is_octal(const char *p, size_t len) { while (len-- > 0) { if (*p < '0' || *p > '7') return (0); ++p; } return (1); } static int is_afio_large(const char *h, size_t len) { if (len < afiol_header_size) return (0); if (h[afiol_ino_m_offset] != 'm' || h[afiol_mtime_n_offset] != 'n' || h[afiol_xsize_s_offset] != 's' || h[afiol_filesize_c_offset] != ':') return (0); if (!is_hex(h + afiol_dev_offset, afiol_ino_m_offset - afiol_dev_offset)) return (0); if (!is_hex(h + afiol_mode_offset, afiol_mtime_n_offset - afiol_mode_offset)) return (0); if (!is_hex(h + afiol_namesize_offset, afiol_xsize_s_offset - afiol_namesize_offset)) return (0); if (!is_hex(h + afiol_filesize_offset, afiol_filesize_size)) return (0); return (1); } static int find_odc_header(struct archive_read *a) { const void *h; const char *p, *q; size_t skip, skipped = 0; ssize_t bytes; for (;;) { h = __archive_read_ahead(a, odc_header_size, &bytes); if (h == NULL) return (ARCHIVE_FATAL); p = h; q = p + bytes; /* Try the typical case first, then go into the slow search.*/ if (memcmp("070707", p, 6) == 0 && is_octal(p, odc_header_size)) return (ARCHIVE_OK); if (memcmp("070727", p, 6) == 0 && is_afio_large(p, bytes)) { a->archive.archive_format = ARCHIVE_FORMAT_CPIO_AFIO_LARGE; return (ARCHIVE_OK); } /* * Scan ahead until we find something that looks * like an odc header. */ while (p + odc_header_size <= q) { switch (p[5]) { case '7': if ((memcmp("070707", p, 6) == 0 && is_octal(p, odc_header_size)) || (memcmp("070727", p, 6) == 0 && is_afio_large(p, q - p))) { skip = p - (const char *)h; __archive_read_consume(a, skip); skipped += skip; if (p[4] == '2') a->archive.archive_format = ARCHIVE_FORMAT_CPIO_AFIO_LARGE; if (skipped > 0) { archive_set_error(&a->archive, 0, "Skipped %d bytes before " "finding valid header", (int)skipped); return (ARCHIVE_WARN); } return (ARCHIVE_OK); } p += 2; break; case '0': p++; break; default: p += 6; break; } } skip = p - (const char *)h; __archive_read_consume(a, skip); skipped += skip; } } static int header_odc(struct archive_read *a, struct cpio *cpio, struct archive_entry *entry, size_t *namelength, size_t *name_pad) { const void *h; int r; const char *header; a->archive.archive_format = ARCHIVE_FORMAT_CPIO_POSIX; a->archive.archive_format_name = "POSIX octet-oriented cpio"; /* Find the start of the next header. */ r = find_odc_header(a); if (r < ARCHIVE_WARN) return (r); if (a->archive.archive_format == ARCHIVE_FORMAT_CPIO_AFIO_LARGE) { int r2 = (header_afiol(a, cpio, entry, namelength, name_pad)); if (r2 == ARCHIVE_OK) return (r); else return (r2); } /* Read fixed-size portion of header. */ h = __archive_read_ahead(a, odc_header_size, NULL); if (h == NULL) return (ARCHIVE_FATAL); /* Parse out octal fields. */ header = (const char *)h; archive_entry_set_dev(entry, (dev_t)atol8(header + odc_dev_offset, odc_dev_size)); archive_entry_set_ino(entry, atol8(header + odc_ino_offset, odc_ino_size)); archive_entry_set_mode(entry, (mode_t)atol8(header + odc_mode_offset, odc_mode_size)); archive_entry_set_uid(entry, atol8(header + odc_uid_offset, odc_uid_size)); archive_entry_set_gid(entry, atol8(header + odc_gid_offset, odc_gid_size)); archive_entry_set_nlink(entry, (unsigned int)atol8(header + odc_nlink_offset, odc_nlink_size)); archive_entry_set_rdev(entry, (dev_t)atol8(header + odc_rdev_offset, odc_rdev_size)); archive_entry_set_mtime(entry, atol8(header + odc_mtime_offset, odc_mtime_size), 0); *namelength = (size_t)atol8(header + odc_namesize_offset, odc_namesize_size); *name_pad = 0; /* No padding of filename. */ /* * Note: entry_bytes_remaining is at least 64 bits and * therefore guaranteed to be big enough for a 33-bit file * size. */ cpio->entry_bytes_remaining = atol8(header + odc_filesize_offset, odc_filesize_size); archive_entry_set_size(entry, cpio->entry_bytes_remaining); cpio->entry_padding = 0; __archive_read_consume(a, odc_header_size); return (r); } /* * NOTE: if a filename suffix is ".z", it is the file gziped by afio. * it would be nice that we can show uncompressed file size and we can * uncompressed file contents automatically, unfortunately we have nothing * to get a uncompressed file size while reading each header. it means * we also cannot uncompressed file contens under the our framework. */ static int header_afiol(struct archive_read *a, struct cpio *cpio, struct archive_entry *entry, size_t *namelength, size_t *name_pad) { const void *h; const char *header; a->archive.archive_format = ARCHIVE_FORMAT_CPIO_AFIO_LARGE; a->archive.archive_format_name = "afio large ASCII"; /* Read fixed-size portion of header. */ h = __archive_read_ahead(a, afiol_header_size, NULL); if (h == NULL) return (ARCHIVE_FATAL); /* Parse out octal fields. */ header = (const char *)h; archive_entry_set_dev(entry, (dev_t)atol16(header + afiol_dev_offset, afiol_dev_size)); archive_entry_set_ino(entry, atol16(header + afiol_ino_offset, afiol_ino_size)); archive_entry_set_mode(entry, (mode_t)atol8(header + afiol_mode_offset, afiol_mode_size)); archive_entry_set_uid(entry, atol16(header + afiol_uid_offset, afiol_uid_size)); archive_entry_set_gid(entry, atol16(header + afiol_gid_offset, afiol_gid_size)); archive_entry_set_nlink(entry, (unsigned int)atol16(header + afiol_nlink_offset, afiol_nlink_size)); archive_entry_set_rdev(entry, (dev_t)atol16(header + afiol_rdev_offset, afiol_rdev_size)); archive_entry_set_mtime(entry, atol16(header + afiol_mtime_offset, afiol_mtime_size), 0); *namelength = (size_t)atol16(header + afiol_namesize_offset, afiol_namesize_size); *name_pad = 0; /* No padding of filename. */ cpio->entry_bytes_remaining = atol16(header + afiol_filesize_offset, afiol_filesize_size); archive_entry_set_size(entry, cpio->entry_bytes_remaining); cpio->entry_padding = 0; __archive_read_consume(a, afiol_header_size); return (ARCHIVE_OK); } static int header_bin_le(struct archive_read *a, struct cpio *cpio, struct archive_entry *entry, size_t *namelength, size_t *name_pad) { const void *h; const unsigned char *header; a->archive.archive_format = ARCHIVE_FORMAT_CPIO_BIN_LE; a->archive.archive_format_name = "cpio (little-endian binary)"; /* Read fixed-size portion of header. */ h = __archive_read_ahead(a, bin_header_size, NULL); if (h == NULL) { archive_set_error(&a->archive, 0, "End of file trying to read next cpio header"); return (ARCHIVE_FATAL); } /* Parse out binary fields. */ header = (const unsigned char *)h; archive_entry_set_dev(entry, header[bin_dev_offset] + header[bin_dev_offset + 1] * 256); archive_entry_set_ino(entry, header[bin_ino_offset] + header[bin_ino_offset + 1] * 256); archive_entry_set_mode(entry, header[bin_mode_offset] + header[bin_mode_offset + 1] * 256); archive_entry_set_uid(entry, header[bin_uid_offset] + header[bin_uid_offset + 1] * 256); archive_entry_set_gid(entry, header[bin_gid_offset] + header[bin_gid_offset + 1] * 256); archive_entry_set_nlink(entry, header[bin_nlink_offset] + header[bin_nlink_offset + 1] * 256); archive_entry_set_rdev(entry, header[bin_rdev_offset] + header[bin_rdev_offset + 1] * 256); archive_entry_set_mtime(entry, le4(header + bin_mtime_offset), 0); *namelength = header[bin_namesize_offset] + header[bin_namesize_offset + 1] * 256; *name_pad = *namelength & 1; /* Pad to even. */ cpio->entry_bytes_remaining = le4(header + bin_filesize_offset); archive_entry_set_size(entry, cpio->entry_bytes_remaining); cpio->entry_padding = cpio->entry_bytes_remaining & 1; /* Pad to even. */ __archive_read_consume(a, bin_header_size); return (ARCHIVE_OK); } static int header_bin_be(struct archive_read *a, struct cpio *cpio, struct archive_entry *entry, size_t *namelength, size_t *name_pad) { const void *h; const unsigned char *header; a->archive.archive_format = ARCHIVE_FORMAT_CPIO_BIN_BE; a->archive.archive_format_name = "cpio (big-endian binary)"; /* Read fixed-size portion of header. */ h = __archive_read_ahead(a, bin_header_size, NULL); if (h == NULL) { archive_set_error(&a->archive, 0, "End of file trying to read next cpio header"); return (ARCHIVE_FATAL); } /* Parse out binary fields. */ header = (const unsigned char *)h; archive_entry_set_dev(entry, header[bin_dev_offset] * 256 + header[bin_dev_offset + 1]); archive_entry_set_ino(entry, header[bin_ino_offset] * 256 + header[bin_ino_offset + 1]); archive_entry_set_mode(entry, header[bin_mode_offset] * 256 + header[bin_mode_offset + 1]); archive_entry_set_uid(entry, header[bin_uid_offset] * 256 + header[bin_uid_offset + 1]); archive_entry_set_gid(entry, header[bin_gid_offset] * 256 + header[bin_gid_offset + 1]); archive_entry_set_nlink(entry, header[bin_nlink_offset] * 256 + header[bin_nlink_offset + 1]); archive_entry_set_rdev(entry, header[bin_rdev_offset] * 256 + header[bin_rdev_offset + 1]); archive_entry_set_mtime(entry, be4(header + bin_mtime_offset), 0); *namelength = header[bin_namesize_offset] * 256 + header[bin_namesize_offset + 1]; *name_pad = *namelength & 1; /* Pad to even. */ cpio->entry_bytes_remaining = be4(header + bin_filesize_offset); archive_entry_set_size(entry, cpio->entry_bytes_remaining); cpio->entry_padding = cpio->entry_bytes_remaining & 1; /* Pad to even. */ __archive_read_consume(a, bin_header_size); return (ARCHIVE_OK); } static int archive_read_format_cpio_cleanup(struct archive_read *a) { struct cpio *cpio; cpio = (struct cpio *)(a->format->data); /* Free inode->name map */ while (cpio->links_head != NULL) { struct links_entry *lp = cpio->links_head->next; if (cpio->links_head->name) free(cpio->links_head->name); free(cpio->links_head); cpio->links_head = lp; } free(cpio); (a->format->data) = NULL; return (ARCHIVE_OK); } static int64_t le4(const unsigned char *p) { return ((p[0] << 16) + (((int64_t)p[1]) << 24) + (p[2] << 0) + (p[3] << 8)); } static int64_t be4(const unsigned char *p) { return ((((int64_t)p[0]) << 24) + (p[1] << 16) + (p[2] << 8) + (p[3])); } /* * Note that this implementation does not (and should not!) obey * locale settings; you cannot simply substitute strtol here, since * it does obey locale. */ static int64_t atol8(const char *p, unsigned char_cnt) { int64_t l; int digit; l = 0; while (char_cnt-- > 0) { if (*p >= '0' && *p <= '7') digit = *p - '0'; else return (l); p++; l <<= 3; l |= digit; } return (l); } static int64_t atol16(const char *p, unsigned char_cnt) { int64_t l; int digit; l = 0; while (char_cnt-- > 0) { if (*p >= 'a' && *p <= 'f') digit = *p - 'a' + 10; else if (*p >= 'A' && *p <= 'F') digit = *p - 'A' + 10; else if (*p >= '0' && *p <= '9') digit = *p - '0'; else return (l); p++; l <<= 4; l |= digit; } return (l); } static int record_hardlink(struct archive_read *a, struct cpio *cpio, struct archive_entry *entry) { struct links_entry *le; dev_t dev; int64_t ino; if (archive_entry_nlink(entry) <= 1) return (ARCHIVE_OK); dev = archive_entry_dev(entry); ino = archive_entry_ino64(entry); /* * First look in the list of multiply-linked files. If we've * already dumped it, convert this entry to a hard link entry. */ for (le = cpio->links_head; le; le = le->next) { if (le->dev == dev && le->ino == ino) { archive_entry_copy_hardlink(entry, le->name); if (--le->links <= 0) { if (le->previous != NULL) le->previous->next = le->next; if (le->next != NULL) le->next->previous = le->previous; if (cpio->links_head == le) cpio->links_head = le->next; free(le->name); free(le); } return (ARCHIVE_OK); } } le = (struct links_entry *)malloc(sizeof(struct links_entry)); if (le == NULL) { archive_set_error(&a->archive, ENOMEM, "Out of memory adding file to list"); return (ARCHIVE_FATAL); } if (cpio->links_head != NULL) cpio->links_head->previous = le; le->next = cpio->links_head; le->previous = NULL; cpio->links_head = le; le->dev = dev; le->ino = ino; le->links = archive_entry_nlink(entry) - 1; le->name = strdup(archive_entry_pathname(entry)); if (le->name == NULL) { archive_set_error(&a->archive, ENOMEM, "Out of memory adding file to list"); return (ARCHIVE_FATAL); } return (ARCHIVE_OK); }
./CrossVul/dataset_final_sorted/CWE-20/c/good_5066_0
crossvul-cpp_data_good_5711_2
/* Generated by re2c 0.13.5 on Mon May 20 00:45:38 2013 */ #line 1 "Zend/zend_language_scanner.l" /* +----------------------------------------------------------------------+ | Zend Engine | +----------------------------------------------------------------------+ | Copyright (c) 1998-2013 Zend Technologies Ltd. (http://www.zend.com) | +----------------------------------------------------------------------+ | This source file is subject to version 2.00 of the Zend license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.zend.com/license/2_00.txt. | | If you did not receive a copy of the Zend license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@zend.com so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Authors: Marcus Boerger <helly@php.net> | | Nuno Lopes <nlopess@php.net> | | Scott MacVicar <scottmac@php.net> | | Flex version authors: | | Andi Gutmans <andi@zend.com> | | Zeev Suraski <zeev@zend.com> | +----------------------------------------------------------------------+ */ /* $Id$ */ #if 0 # define YYDEBUG(s, c) printf("state: %d char: %c\n", s, c) #else # define YYDEBUG(s, c) #endif #include "zend_language_scanner_defs.h" #include <errno.h> #include "zend.h" #ifdef PHP_WIN32 # include <Winuser.h> #endif #include "zend_alloc.h" #include <zend_language_parser.h> #include "zend_compile.h" #include "zend_language_scanner.h" #include "zend_highlight.h" #include "zend_constants.h" #include "zend_variables.h" #include "zend_operators.h" #include "zend_API.h" #include "zend_strtod.h" #include "zend_exceptions.h" #include "tsrm_virtual_cwd.h" #include "tsrm_config_common.h" #define YYCTYPE unsigned char #define YYFILL(n) { if ((YYCURSOR + n) >= (YYLIMIT + ZEND_MMAP_AHEAD)) { return 0; } } #define YYCURSOR SCNG(yy_cursor) #define YYLIMIT SCNG(yy_limit) #define YYMARKER SCNG(yy_marker) #define YYGETCONDITION() SCNG(yy_state) #define YYSETCONDITION(s) SCNG(yy_state) = s #define STATE(name) yyc##name /* emulate flex constructs */ #define BEGIN(state) YYSETCONDITION(STATE(state)) #define YYSTATE YYGETCONDITION() #define yytext ((char*)SCNG(yy_text)) #define yyleng SCNG(yy_leng) #define yyless(x) do { YYCURSOR = (unsigned char*)yytext + x; \ yyleng = (unsigned int)x; } while(0) #define yymore() goto yymore_restart /* perform sanity check. If this message is triggered you should increase the ZEND_MMAP_AHEAD value in the zend_streams.h file */ #define YYMAXFILL 16 #if ZEND_MMAP_AHEAD < YYMAXFILL # error ZEND_MMAP_AHEAD should be greater than or equal to YYMAXFILL #endif #ifdef HAVE_STDARG_H # include <stdarg.h> #endif #ifdef HAVE_UNISTD_H # include <unistd.h> #endif /* Globals Macros */ #define SCNG LANG_SCNG #ifdef ZTS ZEND_API ts_rsrc_id language_scanner_globals_id; #else ZEND_API zend_php_scanner_globals language_scanner_globals; #endif #define HANDLE_NEWLINES(s, l) \ do { \ char *p = (s), *boundary = p+(l); \ \ while (p<boundary) { \ if (*p == '\n' || (*p == '\r' && (*(p+1) != '\n'))) { \ CG(zend_lineno)++; \ } \ p++; \ } \ } while (0) #define HANDLE_NEWLINE(c) \ { \ if (c == '\n' || c == '\r') { \ CG(zend_lineno)++; \ } \ } /* To save initial string length after scanning to first variable, CG(doc_comment_len) can be reused */ #define SET_DOUBLE_QUOTES_SCANNED_LENGTH(len) CG(doc_comment_len) = (len) #define GET_DOUBLE_QUOTES_SCANNED_LENGTH() CG(doc_comment_len) #define IS_LABEL_START(c) (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z') || (c) == '_' || (c) >= 0x7F) #define ZEND_IS_OCT(c) ((c)>='0' && (c)<='7') #define ZEND_IS_HEX(c) (((c)>='0' && (c)<='9') || ((c)>='a' && (c)<='f') || ((c)>='A' && (c)<='F')) BEGIN_EXTERN_C() static size_t encoding_filter_script_to_internal(unsigned char **to, size_t *to_length, const unsigned char *from, size_t from_length TSRMLS_DC) { const zend_encoding *internal_encoding = zend_multibyte_get_internal_encoding(TSRMLS_C); assert(internal_encoding && zend_multibyte_check_lexer_compatibility(internal_encoding)); return zend_multibyte_encoding_converter(to, to_length, from, from_length, internal_encoding, LANG_SCNG(script_encoding) TSRMLS_CC); } static size_t encoding_filter_script_to_intermediate(unsigned char **to, size_t *to_length, const unsigned char *from, size_t from_length TSRMLS_DC) { return zend_multibyte_encoding_converter(to, to_length, from, from_length, zend_multibyte_encoding_utf8, LANG_SCNG(script_encoding) TSRMLS_CC); } static size_t encoding_filter_intermediate_to_script(unsigned char **to, size_t *to_length, const unsigned char *from, size_t from_length TSRMLS_DC) { return zend_multibyte_encoding_converter(to, to_length, from, from_length, LANG_SCNG(script_encoding), zend_multibyte_encoding_utf8 TSRMLS_CC); } static size_t encoding_filter_intermediate_to_internal(unsigned char **to, size_t *to_length, const unsigned char *from, size_t from_length TSRMLS_DC) { const zend_encoding *internal_encoding = zend_multibyte_get_internal_encoding(TSRMLS_C); assert(internal_encoding && zend_multibyte_check_lexer_compatibility(internal_encoding)); return zend_multibyte_encoding_converter(to, to_length, from, from_length, internal_encoding, zend_multibyte_encoding_utf8 TSRMLS_CC); } static void _yy_push_state(int new_state TSRMLS_DC) { zend_stack_push(&SCNG(state_stack), (void *) &YYGETCONDITION(), sizeof(int)); YYSETCONDITION(new_state); } #define yy_push_state(state_and_tsrm) _yy_push_state(yyc##state_and_tsrm) static void yy_pop_state(TSRMLS_D) { int *stack_state; zend_stack_top(&SCNG(state_stack), (void **) &stack_state); YYSETCONDITION(*stack_state); zend_stack_del_top(&SCNG(state_stack)); } static void yy_scan_buffer(char *str, unsigned int len TSRMLS_DC) { YYCURSOR = (YYCTYPE*)str; YYLIMIT = YYCURSOR + len; if (!SCNG(yy_start)) { SCNG(yy_start) = YYCURSOR; } } void startup_scanner(TSRMLS_D) { CG(parse_error) = 0; CG(heredoc) = NULL; CG(heredoc_len) = 0; CG(doc_comment) = NULL; CG(doc_comment_len) = 0; zend_stack_init(&SCNG(state_stack)); } void shutdown_scanner(TSRMLS_D) { if (CG(heredoc)) { efree(CG(heredoc)); CG(heredoc_len)=0; } CG(parse_error) = 0; zend_stack_destroy(&SCNG(state_stack)); RESET_DOC_COMMENT(); } ZEND_API void zend_save_lexical_state(zend_lex_state *lex_state TSRMLS_DC) { lex_state->yy_leng = SCNG(yy_leng); lex_state->yy_start = SCNG(yy_start); lex_state->yy_text = SCNG(yy_text); lex_state->yy_cursor = SCNG(yy_cursor); lex_state->yy_marker = SCNG(yy_marker); lex_state->yy_limit = SCNG(yy_limit); lex_state->state_stack = SCNG(state_stack); zend_stack_init(&SCNG(state_stack)); lex_state->in = SCNG(yy_in); lex_state->yy_state = YYSTATE; lex_state->filename = zend_get_compiled_filename(TSRMLS_C); lex_state->lineno = CG(zend_lineno); lex_state->script_org = SCNG(script_org); lex_state->script_org_size = SCNG(script_org_size); lex_state->script_filtered = SCNG(script_filtered); lex_state->script_filtered_size = SCNG(script_filtered_size); lex_state->input_filter = SCNG(input_filter); lex_state->output_filter = SCNG(output_filter); lex_state->script_encoding = SCNG(script_encoding); } ZEND_API void zend_restore_lexical_state(zend_lex_state *lex_state TSRMLS_DC) { SCNG(yy_leng) = lex_state->yy_leng; SCNG(yy_start) = lex_state->yy_start; SCNG(yy_text) = lex_state->yy_text; SCNG(yy_cursor) = lex_state->yy_cursor; SCNG(yy_marker) = lex_state->yy_marker; SCNG(yy_limit) = lex_state->yy_limit; zend_stack_destroy(&SCNG(state_stack)); SCNG(state_stack) = lex_state->state_stack; SCNG(yy_in) = lex_state->in; YYSETCONDITION(lex_state->yy_state); CG(zend_lineno) = lex_state->lineno; zend_restore_compiled_filename(lex_state->filename TSRMLS_CC); if (SCNG(script_filtered)) { efree(SCNG(script_filtered)); SCNG(script_filtered) = NULL; } SCNG(script_org) = lex_state->script_org; SCNG(script_org_size) = lex_state->script_org_size; SCNG(script_filtered) = lex_state->script_filtered; SCNG(script_filtered_size) = lex_state->script_filtered_size; SCNG(input_filter) = lex_state->input_filter; SCNG(output_filter) = lex_state->output_filter; SCNG(script_encoding) = lex_state->script_encoding; if (CG(heredoc)) { efree(CG(heredoc)); CG(heredoc) = NULL; CG(heredoc_len) = 0; } } ZEND_API void zend_destroy_file_handle(zend_file_handle *file_handle TSRMLS_DC) { zend_llist_del_element(&CG(open_files), file_handle, (int (*)(void *, void *)) zend_compare_file_handles); /* zend_file_handle_dtor() operates on the copy, so we have to NULLify the original here */ file_handle->opened_path = NULL; if (file_handle->free_filename) { file_handle->filename = NULL; } } #define BOM_UTF32_BE "\x00\x00\xfe\xff" #define BOM_UTF32_LE "\xff\xfe\x00\x00" #define BOM_UTF16_BE "\xfe\xff" #define BOM_UTF16_LE "\xff\xfe" #define BOM_UTF8 "\xef\xbb\xbf" static const zend_encoding *zend_multibyte_detect_utf_encoding(const unsigned char *script, size_t script_size TSRMLS_DC) { const unsigned char *p; int wchar_size = 2; int le = 0; /* utf-16 or utf-32? */ p = script; while ((p-script) < script_size) { p = memchr(p, 0, script_size-(p-script)-2); if (!p) { break; } if (*(p+1) == '\0' && *(p+2) == '\0') { wchar_size = 4; break; } /* searching for UTF-32 specific byte orders, so this will do */ p += 4; } /* BE or LE? */ p = script; while ((p-script) < script_size) { if (*p == '\0' && *(p+wchar_size-1) != '\0') { /* BE */ le = 0; break; } else if (*p != '\0' && *(p+wchar_size-1) == '\0') { /* LE* */ le = 1; break; } p += wchar_size; } if (wchar_size == 2) { return le ? zend_multibyte_encoding_utf16le : zend_multibyte_encoding_utf16be; } else { return le ? zend_multibyte_encoding_utf32le : zend_multibyte_encoding_utf32be; } return NULL; } static const zend_encoding* zend_multibyte_detect_unicode(TSRMLS_D) { const zend_encoding *script_encoding = NULL; int bom_size; unsigned char *pos1, *pos2; if (LANG_SCNG(script_org_size) < sizeof(BOM_UTF32_LE)-1) { return NULL; } /* check out BOM */ if (!memcmp(LANG_SCNG(script_org), BOM_UTF32_BE, sizeof(BOM_UTF32_BE)-1)) { script_encoding = zend_multibyte_encoding_utf32be; bom_size = sizeof(BOM_UTF32_BE)-1; } else if (!memcmp(LANG_SCNG(script_org), BOM_UTF32_LE, sizeof(BOM_UTF32_LE)-1)) { script_encoding = zend_multibyte_encoding_utf32le; bom_size = sizeof(BOM_UTF32_LE)-1; } else if (!memcmp(LANG_SCNG(script_org), BOM_UTF16_BE, sizeof(BOM_UTF16_BE)-1)) { script_encoding = zend_multibyte_encoding_utf16be; bom_size = sizeof(BOM_UTF16_BE)-1; } else if (!memcmp(LANG_SCNG(script_org), BOM_UTF16_LE, sizeof(BOM_UTF16_LE)-1)) { script_encoding = zend_multibyte_encoding_utf16le; bom_size = sizeof(BOM_UTF16_LE)-1; } else if (!memcmp(LANG_SCNG(script_org), BOM_UTF8, sizeof(BOM_UTF8)-1)) { script_encoding = zend_multibyte_encoding_utf8; bom_size = sizeof(BOM_UTF8)-1; } if (script_encoding) { /* remove BOM */ LANG_SCNG(script_org) += bom_size; LANG_SCNG(script_org_size) -= bom_size; return script_encoding; } /* script contains NULL bytes -> auto-detection */ if ((pos1 = memchr(LANG_SCNG(script_org), 0, LANG_SCNG(script_org_size)))) { /* check if the NULL byte is after the __HALT_COMPILER(); */ pos2 = LANG_SCNG(script_org); while (pos1 - pos2 >= sizeof("__HALT_COMPILER();")-1) { pos2 = memchr(pos2, '_', pos1 - pos2); if (!pos2) break; pos2++; if (strncasecmp((char*)pos2, "_HALT_COMPILER", sizeof("_HALT_COMPILER")-1) == 0) { pos2 += sizeof("_HALT_COMPILER")-1; while (*pos2 == ' ' || *pos2 == '\t' || *pos2 == '\r' || *pos2 == '\n') { pos2++; } if (*pos2 == '(') { pos2++; while (*pos2 == ' ' || *pos2 == '\t' || *pos2 == '\r' || *pos2 == '\n') { pos2++; } if (*pos2 == ')') { pos2++; while (*pos2 == ' ' || *pos2 == '\t' || *pos2 == '\r' || *pos2 == '\n') { pos2++; } if (*pos2 == ';') { return NULL; } } } } } /* make best effort if BOM is missing */ return zend_multibyte_detect_utf_encoding(LANG_SCNG(script_org), LANG_SCNG(script_org_size) TSRMLS_CC); } return NULL; } static const zend_encoding* zend_multibyte_find_script_encoding(TSRMLS_D) { const zend_encoding *script_encoding; if (CG(detect_unicode)) { /* check out bom(byte order mark) and see if containing wchars */ script_encoding = zend_multibyte_detect_unicode(TSRMLS_C); if (script_encoding != NULL) { /* bom or wchar detection is prior to 'script_encoding' option */ return script_encoding; } } /* if no script_encoding specified, just leave alone */ if (!CG(script_encoding_list) || !CG(script_encoding_list_size)) { return NULL; } /* if multiple encodings specified, detect automagically */ if (CG(script_encoding_list_size) > 1) { return zend_multibyte_encoding_detector(LANG_SCNG(script_org), LANG_SCNG(script_org_size), CG(script_encoding_list), CG(script_encoding_list_size) TSRMLS_CC); } return CG(script_encoding_list)[0]; } ZEND_API int zend_multibyte_set_filter(const zend_encoding *onetime_encoding TSRMLS_DC) { const zend_encoding *internal_encoding = zend_multibyte_get_internal_encoding(TSRMLS_C); const zend_encoding *script_encoding = onetime_encoding ? onetime_encoding: zend_multibyte_find_script_encoding(TSRMLS_C); if (!script_encoding) { return FAILURE; } /* judge input/output filter */ LANG_SCNG(script_encoding) = script_encoding; LANG_SCNG(input_filter) = NULL; LANG_SCNG(output_filter) = NULL; if (!internal_encoding || LANG_SCNG(script_encoding) == internal_encoding) { if (!zend_multibyte_check_lexer_compatibility(LANG_SCNG(script_encoding))) { /* and if not, work around w/ script_encoding -> utf-8 -> script_encoding conversion */ LANG_SCNG(input_filter) = encoding_filter_script_to_intermediate; LANG_SCNG(output_filter) = encoding_filter_intermediate_to_script; } else { LANG_SCNG(input_filter) = NULL; LANG_SCNG(output_filter) = NULL; } return SUCCESS; } if (zend_multibyte_check_lexer_compatibility(internal_encoding)) { LANG_SCNG(input_filter) = encoding_filter_script_to_internal; LANG_SCNG(output_filter) = NULL; } else if (zend_multibyte_check_lexer_compatibility(LANG_SCNG(script_encoding))) { LANG_SCNG(input_filter) = NULL; LANG_SCNG(output_filter) = encoding_filter_script_to_internal; } else { /* both script and internal encodings are incompatible w/ flex */ LANG_SCNG(input_filter) = encoding_filter_script_to_intermediate; LANG_SCNG(output_filter) = encoding_filter_intermediate_to_internal; } return 0; } ZEND_API int open_file_for_scanning(zend_file_handle *file_handle TSRMLS_DC) { const char *file_path = NULL; char *buf; size_t size, offset = 0; /* The shebang line was read, get the current position to obtain the buffer start */ if (CG(start_lineno) == 2 && file_handle->type == ZEND_HANDLE_FP && file_handle->handle.fp) { if ((offset = ftell(file_handle->handle.fp)) == -1) { offset = 0; } } if (zend_stream_fixup(file_handle, &buf, &size TSRMLS_CC) == FAILURE) { return FAILURE; } zend_llist_add_element(&CG(open_files), file_handle); if (file_handle->handle.stream.handle >= (void*)file_handle && file_handle->handle.stream.handle <= (void*)(file_handle+1)) { zend_file_handle *fh = (zend_file_handle*)zend_llist_get_last(&CG(open_files)); size_t diff = (char*)file_handle->handle.stream.handle - (char*)file_handle; fh->handle.stream.handle = (void*)(((char*)fh) + diff); file_handle->handle.stream.handle = fh->handle.stream.handle; } /* Reset the scanner for scanning the new file */ SCNG(yy_in) = file_handle; SCNG(yy_start) = NULL; if (size != -1) { if (CG(multibyte)) { SCNG(script_org) = (unsigned char*)buf; SCNG(script_org_size) = size; SCNG(script_filtered) = NULL; zend_multibyte_set_filter(NULL TSRMLS_CC); if (SCNG(input_filter)) { if ((size_t)-1 == SCNG(input_filter)(&SCNG(script_filtered), &SCNG(script_filtered_size), SCNG(script_org), SCNG(script_org_size) TSRMLS_CC)) { zend_error_noreturn(E_COMPILE_ERROR, "Could not convert the script from the detected " "encoding \"%s\" to a compatible encoding", zend_multibyte_get_encoding_name(LANG_SCNG(script_encoding))); } buf = (char*)SCNG(script_filtered); size = SCNG(script_filtered_size); } } SCNG(yy_start) = (unsigned char *)buf - offset; yy_scan_buffer(buf, size TSRMLS_CC); } else { zend_error_noreturn(E_COMPILE_ERROR, "zend_stream_mmap() failed"); } BEGIN(INITIAL); if (file_handle->opened_path) { file_path = file_handle->opened_path; } else { file_path = file_handle->filename; } zend_set_compiled_filename(file_path TSRMLS_CC); if (CG(start_lineno)) { CG(zend_lineno) = CG(start_lineno); CG(start_lineno) = 0; } else { CG(zend_lineno) = 1; } CG(increment_lineno) = 0; return SUCCESS; } END_EXTERN_C() ZEND_API zend_op_array *compile_file(zend_file_handle *file_handle, int type TSRMLS_DC) { zend_lex_state original_lex_state; zend_op_array *op_array = (zend_op_array *) emalloc(sizeof(zend_op_array)); zend_op_array *original_active_op_array = CG(active_op_array); zend_op_array *retval=NULL; int compiler_result; zend_bool compilation_successful=0; znode retval_znode; zend_bool original_in_compilation = CG(in_compilation); retval_znode.op_type = IS_CONST; retval_znode.u.constant.type = IS_LONG; retval_znode.u.constant.value.lval = 1; Z_UNSET_ISREF(retval_znode.u.constant); Z_SET_REFCOUNT(retval_znode.u.constant, 1); zend_save_lexical_state(&original_lex_state TSRMLS_CC); retval = op_array; /* success oriented */ if (open_file_for_scanning(file_handle TSRMLS_CC)==FAILURE) { if (type==ZEND_REQUIRE) { zend_message_dispatcher(ZMSG_FAILED_REQUIRE_FOPEN, file_handle->filename TSRMLS_CC); zend_bailout(); } else { zend_message_dispatcher(ZMSG_FAILED_INCLUDE_FOPEN, file_handle->filename TSRMLS_CC); } compilation_successful=0; } else { init_op_array(op_array, ZEND_USER_FUNCTION, INITIAL_OP_ARRAY_SIZE TSRMLS_CC); CG(in_compilation) = 1; CG(active_op_array) = op_array; zend_stack_push(&CG(context_stack), (void *) &CG(context), sizeof(CG(context))); zend_init_compiler_context(TSRMLS_C); compiler_result = zendparse(TSRMLS_C); zend_do_return(&retval_znode, 0 TSRMLS_CC); CG(in_compilation) = original_in_compilation; if (compiler_result != 0) { /* parser error */ zend_bailout(); } compilation_successful=1; } if (retval) { CG(active_op_array) = original_active_op_array; if (compilation_successful) { pass_two(op_array TSRMLS_CC); zend_release_labels(0 TSRMLS_CC); } else { efree(op_array); retval = NULL; } } zend_restore_lexical_state(&original_lex_state TSRMLS_CC); return retval; } zend_op_array *compile_filename(int type, zval *filename TSRMLS_DC) { zend_file_handle file_handle; zval tmp; zend_op_array *retval; char *opened_path = NULL; if (filename->type != IS_STRING) { tmp = *filename; zval_copy_ctor(&tmp); convert_to_string(&tmp); filename = &tmp; } file_handle.filename = filename->value.str.val; file_handle.free_filename = 0; file_handle.type = ZEND_HANDLE_FILENAME; file_handle.opened_path = NULL; file_handle.handle.fp = NULL; retval = zend_compile_file(&file_handle, type TSRMLS_CC); if (retval && file_handle.handle.stream.handle) { int dummy = 1; if (!file_handle.opened_path) { file_handle.opened_path = opened_path = estrndup(filename->value.str.val, filename->value.str.len); } zend_hash_add(&EG(included_files), file_handle.opened_path, strlen(file_handle.opened_path)+1, (void *)&dummy, sizeof(int), NULL); if (opened_path) { efree(opened_path); } } zend_destroy_file_handle(&file_handle TSRMLS_CC); if (filename==&tmp) { zval_dtor(&tmp); } return retval; } ZEND_API int zend_prepare_string_for_scanning(zval *str, char *filename TSRMLS_DC) { char *buf; size_t size; /* enforce two trailing NULLs for flex... */ if (IS_INTERNED(str->value.str.val)) { char *tmp = safe_emalloc(1, str->value.str.len, ZEND_MMAP_AHEAD); memcpy(tmp, str->value.str.val, str->value.str.len + ZEND_MMAP_AHEAD); str->value.str.val = tmp; } else { str->value.str.val = safe_erealloc(str->value.str.val, 1, str->value.str.len, ZEND_MMAP_AHEAD); } memset(str->value.str.val + str->value.str.len, 0, ZEND_MMAP_AHEAD); SCNG(yy_in) = NULL; SCNG(yy_start) = NULL; buf = str->value.str.val; size = str->value.str.len; if (CG(multibyte)) { SCNG(script_org) = (unsigned char*)buf; SCNG(script_org_size) = size; SCNG(script_filtered) = NULL; zend_multibyte_set_filter(zend_multibyte_get_internal_encoding(TSRMLS_C) TSRMLS_CC); if (SCNG(input_filter)) { if ((size_t)-1 == SCNG(input_filter)(&SCNG(script_filtered), &SCNG(script_filtered_size), SCNG(script_org), SCNG(script_org_size) TSRMLS_CC)) { zend_error_noreturn(E_COMPILE_ERROR, "Could not convert the script from the detected " "encoding \"%s\" to a compatible encoding", zend_multibyte_get_encoding_name(LANG_SCNG(script_encoding))); } buf = (char*)SCNG(script_filtered); size = SCNG(script_filtered_size); } } yy_scan_buffer(buf, size TSRMLS_CC); zend_set_compiled_filename(filename TSRMLS_CC); CG(zend_lineno) = 1; CG(increment_lineno) = 0; return SUCCESS; } ZEND_API size_t zend_get_scanned_file_offset(TSRMLS_D) { size_t offset = SCNG(yy_cursor) - SCNG(yy_start); if (SCNG(input_filter)) { size_t original_offset = offset, length = 0; do { unsigned char *p = NULL; if ((size_t)-1 == SCNG(input_filter)(&p, &length, SCNG(script_org), offset TSRMLS_CC)) { return (size_t)-1; } efree(p); if (length > original_offset) { offset--; } else if (length < original_offset) { offset++; } } while (original_offset != length); } return offset; } zend_op_array *compile_string(zval *source_string, char *filename TSRMLS_DC) { zend_lex_state original_lex_state; zend_op_array *op_array = (zend_op_array *) emalloc(sizeof(zend_op_array)); zend_op_array *original_active_op_array = CG(active_op_array); zend_op_array *retval; zval tmp; int compiler_result; zend_bool original_in_compilation = CG(in_compilation); if (source_string->value.str.len==0) { efree(op_array); return NULL; } CG(in_compilation) = 1; tmp = *source_string; zval_copy_ctor(&tmp); convert_to_string(&tmp); source_string = &tmp; zend_save_lexical_state(&original_lex_state TSRMLS_CC); if (zend_prepare_string_for_scanning(source_string, filename TSRMLS_CC)==FAILURE) { efree(op_array); retval = NULL; } else { zend_bool orig_interactive = CG(interactive); CG(interactive) = 0; init_op_array(op_array, ZEND_EVAL_CODE, INITIAL_OP_ARRAY_SIZE TSRMLS_CC); CG(interactive) = orig_interactive; CG(active_op_array) = op_array; zend_stack_push(&CG(context_stack), (void *) &CG(context), sizeof(CG(context))); zend_init_compiler_context(TSRMLS_C); BEGIN(ST_IN_SCRIPTING); compiler_result = zendparse(TSRMLS_C); if (SCNG(script_filtered)) { efree(SCNG(script_filtered)); SCNG(script_filtered) = NULL; } if (compiler_result != 0) { CG(active_op_array) = original_active_op_array; CG(unclean_shutdown)=1; destroy_op_array(op_array TSRMLS_CC); efree(op_array); retval = NULL; } else { zend_do_return(NULL, 0 TSRMLS_CC); CG(active_op_array) = original_active_op_array; pass_two(op_array TSRMLS_CC); zend_release_labels(0 TSRMLS_CC); retval = op_array; } } zend_restore_lexical_state(&original_lex_state TSRMLS_CC); zval_dtor(&tmp); CG(in_compilation) = original_in_compilation; return retval; } BEGIN_EXTERN_C() int highlight_file(char *filename, zend_syntax_highlighter_ini *syntax_highlighter_ini TSRMLS_DC) { zend_lex_state original_lex_state; zend_file_handle file_handle; file_handle.type = ZEND_HANDLE_FILENAME; file_handle.filename = filename; file_handle.free_filename = 0; file_handle.opened_path = NULL; zend_save_lexical_state(&original_lex_state TSRMLS_CC); if (open_file_for_scanning(&file_handle TSRMLS_CC)==FAILURE) { zend_message_dispatcher(ZMSG_FAILED_HIGHLIGHT_FOPEN, filename TSRMLS_CC); zend_restore_lexical_state(&original_lex_state TSRMLS_CC); return FAILURE; } zend_highlight(syntax_highlighter_ini TSRMLS_CC); if (SCNG(script_filtered)) { efree(SCNG(script_filtered)); SCNG(script_filtered) = NULL; } zend_destroy_file_handle(&file_handle TSRMLS_CC); zend_restore_lexical_state(&original_lex_state TSRMLS_CC); return SUCCESS; } int highlight_string(zval *str, zend_syntax_highlighter_ini *syntax_highlighter_ini, char *str_name TSRMLS_DC) { zend_lex_state original_lex_state; zval tmp = *str; str = &tmp; zval_copy_ctor(str); zend_save_lexical_state(&original_lex_state TSRMLS_CC); if (zend_prepare_string_for_scanning(str, str_name TSRMLS_CC)==FAILURE) { zend_restore_lexical_state(&original_lex_state TSRMLS_CC); return FAILURE; } BEGIN(INITIAL); zend_highlight(syntax_highlighter_ini TSRMLS_CC); if (SCNG(script_filtered)) { efree(SCNG(script_filtered)); SCNG(script_filtered) = NULL; } zend_restore_lexical_state(&original_lex_state TSRMLS_CC); zval_dtor(str); return SUCCESS; } ZEND_API void zend_multibyte_yyinput_again(zend_encoding_filter old_input_filter, const zend_encoding *old_encoding TSRMLS_DC) { size_t length; unsigned char *new_yy_start; /* convert and set */ if (!SCNG(input_filter)) { if (SCNG(script_filtered)) { efree(SCNG(script_filtered)); SCNG(script_filtered) = NULL; } SCNG(script_filtered_size) = 0; length = SCNG(script_org_size); new_yy_start = SCNG(script_org); } else { if ((size_t)-1 == SCNG(input_filter)(&new_yy_start, &length, SCNG(script_org), SCNG(script_org_size) TSRMLS_CC)) { zend_error_noreturn(E_COMPILE_ERROR, "Could not convert the script from the detected " "encoding \"%s\" to a compatible encoding", zend_multibyte_get_encoding_name(LANG_SCNG(script_encoding))); } SCNG(script_filtered) = new_yy_start; SCNG(script_filtered_size) = length; } SCNG(yy_cursor) = new_yy_start + (SCNG(yy_cursor) - SCNG(yy_start)); SCNG(yy_marker) = new_yy_start + (SCNG(yy_marker) - SCNG(yy_start)); SCNG(yy_text) = new_yy_start + (SCNG(yy_text) - SCNG(yy_start)); SCNG(yy_limit) = new_yy_start + (SCNG(yy_limit) - SCNG(yy_start)); SCNG(yy_start) = new_yy_start; } # define zend_copy_value(zendlval, yytext, yyleng) \ if (SCNG(output_filter)) { \ size_t sz = 0; \ SCNG(output_filter)((unsigned char **)&(zendlval->value.str.val), &sz, (unsigned char *)yytext, (size_t)yyleng TSRMLS_CC); \ zendlval->value.str.len = sz; \ } else { \ zendlval->value.str.val = (char *) estrndup(yytext, yyleng); \ zendlval->value.str.len = yyleng; \ } static void zend_scan_escape_string(zval *zendlval, char *str, int len, char quote_type TSRMLS_DC) { register char *s, *t; char *end; ZVAL_STRINGL(zendlval, str, len, 1); /* convert escape sequences */ s = t = zendlval->value.str.val; end = s+zendlval->value.str.len; while (s<end) { if (*s=='\\') { s++; if (s >= end) { *t++ = '\\'; break; } switch(*s) { case 'n': *t++ = '\n'; zendlval->value.str.len--; break; case 'r': *t++ = '\r'; zendlval->value.str.len--; break; case 't': *t++ = '\t'; zendlval->value.str.len--; break; case 'f': *t++ = '\f'; zendlval->value.str.len--; break; case 'v': *t++ = '\v'; zendlval->value.str.len--; break; case 'e': #ifdef PHP_WIN32 *t++ = VK_ESCAPE; #else *t++ = '\e'; #endif zendlval->value.str.len--; break; case '"': case '`': if (*s != quote_type) { *t++ = '\\'; *t++ = *s; break; } case '\\': case '$': *t++ = *s; zendlval->value.str.len--; break; case 'x': case 'X': if (ZEND_IS_HEX(*(s+1))) { char hex_buf[3] = { 0, 0, 0 }; zendlval->value.str.len--; /* for the 'x' */ hex_buf[0] = *(++s); zendlval->value.str.len--; if (ZEND_IS_HEX(*(s+1))) { hex_buf[1] = *(++s); zendlval->value.str.len--; } *t++ = (char) strtol(hex_buf, NULL, 16); } else { *t++ = '\\'; *t++ = *s; } break; default: /* check for an octal */ if (ZEND_IS_OCT(*s)) { char octal_buf[4] = { 0, 0, 0, 0 }; octal_buf[0] = *s; zendlval->value.str.len--; if (ZEND_IS_OCT(*(s+1))) { octal_buf[1] = *(++s); zendlval->value.str.len--; if (ZEND_IS_OCT(*(s+1))) { octal_buf[2] = *(++s); zendlval->value.str.len--; } } *t++ = (char) strtol(octal_buf, NULL, 8); } else { *t++ = '\\'; *t++ = *s; } break; } } else { *t++ = *s; } if (*s == '\n' || (*s == '\r' && (*(s+1) != '\n'))) { CG(zend_lineno)++; } s++; } *t = 0; if (SCNG(output_filter)) { size_t sz = 0; s = zendlval->value.str.val; SCNG(output_filter)((unsigned char **)&(zendlval->value.str.val), &sz, (unsigned char *)s, (size_t)zendlval->value.str.len TSRMLS_CC); zendlval->value.str.len = sz; efree(s); } } int lex_scan(zval *zendlval TSRMLS_DC) { restart: SCNG(yy_text) = YYCURSOR; yymore_restart: #line 1004 "Zend/zend_language_scanner.c" { YYCTYPE yych; unsigned int yyaccept = 0; if (YYGETCONDITION() < 5) { if (YYGETCONDITION() < 2) { if (YYGETCONDITION() < 1) { goto yyc_ST_IN_SCRIPTING; } else { goto yyc_ST_LOOKING_FOR_PROPERTY; } } else { if (YYGETCONDITION() < 3) { goto yyc_ST_BACKQUOTE; } else { if (YYGETCONDITION() < 4) { goto yyc_ST_DOUBLE_QUOTES; } else { goto yyc_ST_HEREDOC; } } } } else { if (YYGETCONDITION() < 7) { if (YYGETCONDITION() < 6) { goto yyc_ST_LOOKING_FOR_VARNAME; } else { goto yyc_ST_VAR_OFFSET; } } else { if (YYGETCONDITION() < 8) { goto yyc_INITIAL; } else { if (YYGETCONDITION() < 9) { goto yyc_ST_END_HEREDOC; } else { goto yyc_ST_NOWDOC; } } } } /* *********************************** */ yyc_INITIAL: { static const unsigned char yybm[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 128, 0, 0, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; YYDEBUG(0, *YYCURSOR); YYFILL(8); yych = *YYCURSOR; if (yych != '<') goto yy4; YYDEBUG(2, *YYCURSOR); yyaccept = 0; yych = *(YYMARKER = ++YYCURSOR); if (yych <= '?') { if (yych == '%') goto yy7; if (yych >= '?') goto yy5; } else { if (yych <= 'S') { if (yych >= 'S') goto yy9; } else { if (yych == 's') goto yy9; } } yy3: YYDEBUG(3, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1792 "Zend/zend_language_scanner.l" { if (YYCURSOR > YYLIMIT) { return 0; } inline_char_handler: while (1) { YYCTYPE *ptr = memchr(YYCURSOR, '<', YYLIMIT - YYCURSOR); YYCURSOR = ptr ? ptr + 1 : YYLIMIT; if (YYCURSOR < YYLIMIT) { switch (*YYCURSOR) { case '?': if (CG(short_tags) || !strncasecmp((char*)YYCURSOR + 1, "php", 3) || (*(YYCURSOR + 1) == '=')) { /* Assume [ \t\n\r] follows "php" */ break; } continue; case '%': if (CG(asp_tags)) { break; } continue; case 's': case 'S': /* Probably NOT an opening PHP <script> tag, so don't end the HTML chunk yet * If it is, the PHP <script> tag rule checks for any HTML scanned before it */ YYCURSOR--; yymore(); default: continue; } YYCURSOR--; } break; } inline_html: yyleng = YYCURSOR - SCNG(yy_text); if (SCNG(output_filter)) { int readsize; size_t sz = 0; readsize = SCNG(output_filter)((unsigned char **)&(zendlval->value.str.val), &sz, (unsigned char *)yytext, (size_t)yyleng TSRMLS_CC); zendlval->value.str.len = sz; if (readsize < yyleng) { yyless(readsize); } } else { zendlval->value.str.val = (char *) estrndup(yytext, yyleng); zendlval->value.str.len = yyleng; } zendlval->type = IS_STRING; HANDLE_NEWLINES(yytext, yyleng); return T_INLINE_HTML; } #line 1163 "Zend/zend_language_scanner.c" yy4: YYDEBUG(4, *YYCURSOR); yych = *++YYCURSOR; goto yy3; yy5: YYDEBUG(5, *YYCURSOR); yyaccept = 1; yych = *(YYMARKER = ++YYCURSOR); if (yych <= 'O') { if (yych == '=') goto yy45; } else { if (yych <= 'P') goto yy47; if (yych == 'p') goto yy47; } yy6: YYDEBUG(6, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1780 "Zend/zend_language_scanner.l" { if (CG(short_tags)) { zendlval->value.str.val = yytext; /* no copying - intentional */ zendlval->value.str.len = yyleng; zendlval->type = IS_STRING; BEGIN(ST_IN_SCRIPTING); return T_OPEN_TAG; } else { goto inline_char_handler; } } #line 1193 "Zend/zend_language_scanner.c" yy7: YYDEBUG(7, *YYCURSOR); ++YYCURSOR; if ((yych = *YYCURSOR) == '=') goto yy43; YYDEBUG(8, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1757 "Zend/zend_language_scanner.l" { if (CG(asp_tags)) { zendlval->value.str.val = yytext; /* no copying - intentional */ zendlval->value.str.len = yyleng; zendlval->type = IS_STRING; BEGIN(ST_IN_SCRIPTING); return T_OPEN_TAG; } else { goto inline_char_handler; } } #line 1212 "Zend/zend_language_scanner.c" yy9: YYDEBUG(9, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'C') goto yy11; if (yych == 'c') goto yy11; yy10: YYDEBUG(10, *YYCURSOR); YYCURSOR = YYMARKER; if (yyaccept <= 0) { goto yy3; } else { goto yy6; } yy11: YYDEBUG(11, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'R') goto yy12; if (yych != 'r') goto yy10; yy12: YYDEBUG(12, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'I') goto yy13; if (yych != 'i') goto yy10; yy13: YYDEBUG(13, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'P') goto yy14; if (yych != 'p') goto yy10; yy14: YYDEBUG(14, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'T') goto yy15; if (yych != 't') goto yy10; yy15: YYDEBUG(15, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'L') goto yy10; if (yych == 'l') goto yy10; goto yy17; yy16: YYDEBUG(16, *YYCURSOR); ++YYCURSOR; YYFILL(8); yych = *YYCURSOR; yy17: YYDEBUG(17, *YYCURSOR); if (yybm[0+yych] & 128) { goto yy16; } if (yych == 'L') goto yy18; if (yych != 'l') goto yy10; yy18: YYDEBUG(18, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'A') goto yy19; if (yych != 'a') goto yy10; yy19: YYDEBUG(19, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'N') goto yy20; if (yych != 'n') goto yy10; yy20: YYDEBUG(20, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'G') goto yy21; if (yych != 'g') goto yy10; yy21: YYDEBUG(21, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'U') goto yy22; if (yych != 'u') goto yy10; yy22: YYDEBUG(22, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'A') goto yy23; if (yych != 'a') goto yy10; yy23: YYDEBUG(23, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'G') goto yy24; if (yych != 'g') goto yy10; yy24: YYDEBUG(24, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy25; if (yych != 'e') goto yy10; yy25: YYDEBUG(25, *YYCURSOR); ++YYCURSOR; YYFILL(1); yych = *YYCURSOR; YYDEBUG(26, *YYCURSOR); if (yych <= '\r') { if (yych <= 0x08) goto yy10; if (yych <= '\n') goto yy25; if (yych <= '\f') goto yy10; goto yy25; } else { if (yych <= ' ') { if (yych <= 0x1F) goto yy10; goto yy25; } else { if (yych != '=') goto yy10; } } yy27: YYDEBUG(27, *YYCURSOR); ++YYCURSOR; YYFILL(5); yych = *YYCURSOR; YYDEBUG(28, *YYCURSOR); if (yych <= '!') { if (yych <= '\f') { if (yych <= 0x08) goto yy10; if (yych <= '\n') goto yy27; goto yy10; } else { if (yych <= '\r') goto yy27; if (yych == ' ') goto yy27; goto yy10; } } else { if (yych <= 'O') { if (yych <= '"') goto yy30; if (yych == '\'') goto yy31; goto yy10; } else { if (yych <= 'P') goto yy29; if (yych != 'p') goto yy10; } } yy29: YYDEBUG(29, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'H') goto yy42; if (yych == 'h') goto yy42; goto yy10; yy30: YYDEBUG(30, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'P') goto yy39; if (yych == 'p') goto yy39; goto yy10; yy31: YYDEBUG(31, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'P') goto yy32; if (yych != 'p') goto yy10; yy32: YYDEBUG(32, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'H') goto yy33; if (yych != 'h') goto yy10; yy33: YYDEBUG(33, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'P') goto yy34; if (yych != 'p') goto yy10; yy34: YYDEBUG(34, *YYCURSOR); yych = *++YYCURSOR; if (yych != '\'') goto yy10; yy35: YYDEBUG(35, *YYCURSOR); ++YYCURSOR; YYFILL(1); yych = *YYCURSOR; YYDEBUG(36, *YYCURSOR); if (yych <= '\r') { if (yych <= 0x08) goto yy10; if (yych <= '\n') goto yy35; if (yych <= '\f') goto yy10; goto yy35; } else { if (yych <= ' ') { if (yych <= 0x1F) goto yy10; goto yy35; } else { if (yych != '>') goto yy10; } } YYDEBUG(37, *YYCURSOR); ++YYCURSOR; YYDEBUG(38, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1717 "Zend/zend_language_scanner.l" { YYCTYPE *bracket = (YYCTYPE*)zend_memrchr(yytext, '<', yyleng - (sizeof("script language=php>") - 1)); if (bracket != SCNG(yy_text)) { /* Handle previously scanned HTML, as possible <script> tags found are assumed to not be PHP's */ YYCURSOR = bracket; goto inline_html; } HANDLE_NEWLINES(yytext, yyleng); zendlval->value.str.val = yytext; /* no copying - intentional */ zendlval->value.str.len = yyleng; zendlval->type = IS_STRING; BEGIN(ST_IN_SCRIPTING); return T_OPEN_TAG; } #line 1415 "Zend/zend_language_scanner.c" yy39: YYDEBUG(39, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'H') goto yy40; if (yych != 'h') goto yy10; yy40: YYDEBUG(40, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'P') goto yy41; if (yych != 'p') goto yy10; yy41: YYDEBUG(41, *YYCURSOR); yych = *++YYCURSOR; if (yych == '"') goto yy35; goto yy10; yy42: YYDEBUG(42, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'P') goto yy35; if (yych == 'p') goto yy35; goto yy10; yy43: YYDEBUG(43, *YYCURSOR); ++YYCURSOR; YYDEBUG(44, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1735 "Zend/zend_language_scanner.l" { if (CG(asp_tags)) { zendlval->value.str.val = yytext; /* no copying - intentional */ zendlval->value.str.len = yyleng; zendlval->type = IS_STRING; BEGIN(ST_IN_SCRIPTING); return T_OPEN_TAG_WITH_ECHO; } else { goto inline_char_handler; } } #line 1454 "Zend/zend_language_scanner.c" yy45: YYDEBUG(45, *YYCURSOR); ++YYCURSOR; YYDEBUG(46, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1748 "Zend/zend_language_scanner.l" { zendlval->value.str.val = yytext; /* no copying - intentional */ zendlval->value.str.len = yyleng; zendlval->type = IS_STRING; BEGIN(ST_IN_SCRIPTING); return T_OPEN_TAG_WITH_ECHO; } #line 1468 "Zend/zend_language_scanner.c" yy47: YYDEBUG(47, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'H') goto yy48; if (yych != 'h') goto yy10; yy48: YYDEBUG(48, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'P') goto yy49; if (yych != 'p') goto yy10; yy49: YYDEBUG(49, *YYCURSOR); yych = *++YYCURSOR; if (yych <= '\f') { if (yych <= 0x08) goto yy10; if (yych >= '\v') goto yy10; } else { if (yych <= '\r') goto yy52; if (yych != ' ') goto yy10; } yy50: YYDEBUG(50, *YYCURSOR); ++YYCURSOR; yy51: YYDEBUG(51, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1770 "Zend/zend_language_scanner.l" { zendlval->value.str.val = yytext; /* no copying - intentional */ zendlval->value.str.len = yyleng; zendlval->type = IS_STRING; HANDLE_NEWLINE(yytext[yyleng-1]); BEGIN(ST_IN_SCRIPTING); return T_OPEN_TAG; } #line 1504 "Zend/zend_language_scanner.c" yy52: YYDEBUG(52, *YYCURSOR); ++YYCURSOR; if ((yych = *YYCURSOR) == '\n') goto yy50; goto yy51; } /* *********************************** */ yyc_ST_BACKQUOTE: { static const unsigned char yybm[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 0, 0, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 128, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, }; YYDEBUG(53, *YYCURSOR); YYFILL(2); yych = *YYCURSOR; if (yych <= '_') { if (yych != '$') goto yy60; } else { if (yych <= '`') goto yy58; if (yych == '{') goto yy57; goto yy60; } YYDEBUG(55, *YYCURSOR); ++YYCURSOR; if ((yych = *YYCURSOR) <= '_') { if (yych <= '@') goto yy56; if (yych <= 'Z') goto yy63; if (yych >= '_') goto yy63; } else { if (yych <= 'z') { if (yych >= 'a') goto yy63; } else { if (yych <= '{') goto yy66; if (yych >= 0x7F) goto yy63; } } yy56: YYDEBUG(56, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 2243 "Zend/zend_language_scanner.l" { if (YYCURSOR > YYLIMIT) { return 0; } if (yytext[0] == '\\' && YYCURSOR < YYLIMIT) { YYCURSOR++; } while (YYCURSOR < YYLIMIT) { switch (*YYCURSOR++) { case '`': break; case '$': if (IS_LABEL_START(*YYCURSOR) || *YYCURSOR == '{') { break; } continue; case '{': if (*YYCURSOR == '$') { break; } continue; case '\\': if (YYCURSOR < YYLIMIT) { YYCURSOR++; } /* fall through */ default: continue; } YYCURSOR--; break; } yyleng = YYCURSOR - SCNG(yy_text); zend_scan_escape_string(zendlval, yytext, yyleng, '`' TSRMLS_CC); return T_ENCAPSED_AND_WHITESPACE; } #line 1616 "Zend/zend_language_scanner.c" yy57: YYDEBUG(57, *YYCURSOR); yych = *++YYCURSOR; if (yych == '$') goto yy61; goto yy56; yy58: YYDEBUG(58, *YYCURSOR); ++YYCURSOR; YYDEBUG(59, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 2187 "Zend/zend_language_scanner.l" { BEGIN(ST_IN_SCRIPTING); return '`'; } #line 1632 "Zend/zend_language_scanner.c" yy60: YYDEBUG(60, *YYCURSOR); yych = *++YYCURSOR; goto yy56; yy61: YYDEBUG(61, *YYCURSOR); ++YYCURSOR; YYDEBUG(62, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 2174 "Zend/zend_language_scanner.l" { zendlval->value.lval = (long) '{'; yy_push_state(ST_IN_SCRIPTING TSRMLS_CC); yyless(1); return T_CURLY_OPEN; } #line 1649 "Zend/zend_language_scanner.c" yy63: YYDEBUG(63, *YYCURSOR); yyaccept = 0; YYMARKER = ++YYCURSOR; YYFILL(3); yych = *YYCURSOR; YYDEBUG(64, *YYCURSOR); if (yybm[0+yych] & 128) { goto yy63; } if (yych == '-') goto yy68; if (yych == '[') goto yy70; yy65: YYDEBUG(65, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1874 "Zend/zend_language_scanner.l" { zend_copy_value(zendlval, (yytext+1), (yyleng-1)); zendlval->type = IS_STRING; return T_VARIABLE; } #line 1671 "Zend/zend_language_scanner.c" yy66: YYDEBUG(66, *YYCURSOR); ++YYCURSOR; YYDEBUG(67, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1451 "Zend/zend_language_scanner.l" { yy_push_state(ST_LOOKING_FOR_VARNAME TSRMLS_CC); return T_DOLLAR_OPEN_CURLY_BRACES; } #line 1682 "Zend/zend_language_scanner.c" yy68: YYDEBUG(68, *YYCURSOR); yych = *++YYCURSOR; if (yych == '>') goto yy72; yy69: YYDEBUG(69, *YYCURSOR); YYCURSOR = YYMARKER; goto yy65; yy70: YYDEBUG(70, *YYCURSOR); ++YYCURSOR; YYDEBUG(71, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1866 "Zend/zend_language_scanner.l" { yyless(yyleng - 1); yy_push_state(ST_VAR_OFFSET TSRMLS_CC); zend_copy_value(zendlval, (yytext+1), (yyleng-1)); zendlval->type = IS_STRING; return T_VARIABLE; } #line 1704 "Zend/zend_language_scanner.c" yy72: YYDEBUG(72, *YYCURSOR); yych = *++YYCURSOR; if (yych <= '_') { if (yych <= '@') goto yy69; if (yych <= 'Z') goto yy73; if (yych <= '^') goto yy69; } else { if (yych <= '`') goto yy69; if (yych <= 'z') goto yy73; if (yych <= '~') goto yy69; } yy73: YYDEBUG(73, *YYCURSOR); ++YYCURSOR; YYDEBUG(74, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1856 "Zend/zend_language_scanner.l" { yyless(yyleng - 3); yy_push_state(ST_LOOKING_FOR_PROPERTY TSRMLS_CC); zend_copy_value(zendlval, (yytext+1), (yyleng-1)); zendlval->type = IS_STRING; return T_VARIABLE; } #line 1730 "Zend/zend_language_scanner.c" } /* *********************************** */ yyc_ST_DOUBLE_QUOTES: { static const unsigned char yybm[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 0, 0, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 128, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, }; YYDEBUG(75, *YYCURSOR); YYFILL(2); yych = *YYCURSOR; if (yych <= '#') { if (yych == '"') goto yy80; goto yy82; } else { if (yych <= '$') goto yy77; if (yych == '{') goto yy79; goto yy82; } yy77: YYDEBUG(77, *YYCURSOR); ++YYCURSOR; if ((yych = *YYCURSOR) <= '_') { if (yych <= '@') goto yy78; if (yych <= 'Z') goto yy85; if (yych >= '_') goto yy85; } else { if (yych <= 'z') { if (yych >= 'a') goto yy85; } else { if (yych <= '{') goto yy88; if (yych >= 0x7F) goto yy85; } } yy78: YYDEBUG(78, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 2193 "Zend/zend_language_scanner.l" { if (GET_DOUBLE_QUOTES_SCANNED_LENGTH()) { YYCURSOR += GET_DOUBLE_QUOTES_SCANNED_LENGTH() - 1; SET_DOUBLE_QUOTES_SCANNED_LENGTH(0); goto double_quotes_scan_done; } if (YYCURSOR > YYLIMIT) { return 0; } if (yytext[0] == '\\' && YYCURSOR < YYLIMIT) { YYCURSOR++; } while (YYCURSOR < YYLIMIT) { switch (*YYCURSOR++) { case '"': break; case '$': if (IS_LABEL_START(*YYCURSOR) || *YYCURSOR == '{') { break; } continue; case '{': if (*YYCURSOR == '$') { break; } continue; case '\\': if (YYCURSOR < YYLIMIT) { YYCURSOR++; } /* fall through */ default: continue; } YYCURSOR--; break; } double_quotes_scan_done: yyleng = YYCURSOR - SCNG(yy_text); zend_scan_escape_string(zendlval, yytext, yyleng, '"' TSRMLS_CC); return T_ENCAPSED_AND_WHITESPACE; } #line 1847 "Zend/zend_language_scanner.c" yy79: YYDEBUG(79, *YYCURSOR); yych = *++YYCURSOR; if (yych == '$') goto yy83; goto yy78; yy80: YYDEBUG(80, *YYCURSOR); ++YYCURSOR; YYDEBUG(81, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 2182 "Zend/zend_language_scanner.l" { BEGIN(ST_IN_SCRIPTING); return '"'; } #line 1863 "Zend/zend_language_scanner.c" yy82: YYDEBUG(82, *YYCURSOR); yych = *++YYCURSOR; goto yy78; yy83: YYDEBUG(83, *YYCURSOR); ++YYCURSOR; YYDEBUG(84, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 2174 "Zend/zend_language_scanner.l" { zendlval->value.lval = (long) '{'; yy_push_state(ST_IN_SCRIPTING TSRMLS_CC); yyless(1); return T_CURLY_OPEN; } #line 1880 "Zend/zend_language_scanner.c" yy85: YYDEBUG(85, *YYCURSOR); yyaccept = 0; YYMARKER = ++YYCURSOR; YYFILL(3); yych = *YYCURSOR; YYDEBUG(86, *YYCURSOR); if (yybm[0+yych] & 128) { goto yy85; } if (yych == '-') goto yy90; if (yych == '[') goto yy92; yy87: YYDEBUG(87, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1874 "Zend/zend_language_scanner.l" { zend_copy_value(zendlval, (yytext+1), (yyleng-1)); zendlval->type = IS_STRING; return T_VARIABLE; } #line 1902 "Zend/zend_language_scanner.c" yy88: YYDEBUG(88, *YYCURSOR); ++YYCURSOR; YYDEBUG(89, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1451 "Zend/zend_language_scanner.l" { yy_push_state(ST_LOOKING_FOR_VARNAME TSRMLS_CC); return T_DOLLAR_OPEN_CURLY_BRACES; } #line 1913 "Zend/zend_language_scanner.c" yy90: YYDEBUG(90, *YYCURSOR); yych = *++YYCURSOR; if (yych == '>') goto yy94; yy91: YYDEBUG(91, *YYCURSOR); YYCURSOR = YYMARKER; goto yy87; yy92: YYDEBUG(92, *YYCURSOR); ++YYCURSOR; YYDEBUG(93, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1866 "Zend/zend_language_scanner.l" { yyless(yyleng - 1); yy_push_state(ST_VAR_OFFSET TSRMLS_CC); zend_copy_value(zendlval, (yytext+1), (yyleng-1)); zendlval->type = IS_STRING; return T_VARIABLE; } #line 1935 "Zend/zend_language_scanner.c" yy94: YYDEBUG(94, *YYCURSOR); yych = *++YYCURSOR; if (yych <= '_') { if (yych <= '@') goto yy91; if (yych <= 'Z') goto yy95; if (yych <= '^') goto yy91; } else { if (yych <= '`') goto yy91; if (yych <= 'z') goto yy95; if (yych <= '~') goto yy91; } yy95: YYDEBUG(95, *YYCURSOR); ++YYCURSOR; YYDEBUG(96, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1856 "Zend/zend_language_scanner.l" { yyless(yyleng - 3); yy_push_state(ST_LOOKING_FOR_PROPERTY TSRMLS_CC); zend_copy_value(zendlval, (yytext+1), (yyleng-1)); zendlval->type = IS_STRING; return T_VARIABLE; } #line 1961 "Zend/zend_language_scanner.c" } /* *********************************** */ yyc_ST_END_HEREDOC: YYDEBUG(97, *YYCURSOR); YYFILL(1); yych = *YYCURSOR; YYDEBUG(99, *YYCURSOR); ++YYCURSOR; YYDEBUG(100, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 2161 "Zend/zend_language_scanner.l" { YYCURSOR += CG(heredoc_len) - 1; yyleng = CG(heredoc_len); Z_STRVAL_P(zendlval) = CG(heredoc); Z_STRLEN_P(zendlval) = CG(heredoc_len); CG(heredoc) = NULL; CG(heredoc_len) = 0; BEGIN(ST_IN_SCRIPTING); return T_END_HEREDOC; } #line 1984 "Zend/zend_language_scanner.c" /* *********************************** */ yyc_ST_HEREDOC: { static const unsigned char yybm[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 0, 0, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 128, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, }; YYDEBUG(101, *YYCURSOR); YYFILL(2); yych = *YYCURSOR; if (yych == '$') goto yy103; if (yych == '{') goto yy105; goto yy106; yy103: YYDEBUG(103, *YYCURSOR); ++YYCURSOR; if ((yych = *YYCURSOR) <= '_') { if (yych <= '@') goto yy104; if (yych <= 'Z') goto yy109; if (yych >= '_') goto yy109; } else { if (yych <= 'z') { if (yych >= 'a') goto yy109; } else { if (yych <= '{') goto yy112; if (yych >= 0x7F) goto yy109; } } yy104: YYDEBUG(104, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 2285 "Zend/zend_language_scanner.l" { int newline = 0; if (YYCURSOR > YYLIMIT) { return 0; } YYCURSOR--; while (YYCURSOR < YYLIMIT) { switch (*YYCURSOR++) { case '\r': if (*YYCURSOR == '\n') { YYCURSOR++; } /* fall through */ case '\n': /* Check for ending label on the next line */ if (IS_LABEL_START(*YYCURSOR) && CG(heredoc_len) < YYLIMIT - YYCURSOR && !memcmp(YYCURSOR, CG(heredoc), CG(heredoc_len))) { YYCTYPE *end = YYCURSOR + CG(heredoc_len); if (*end == ';') { end++; } if (*end == '\n' || *end == '\r') { /* newline before label will be subtracted from returned text, but * yyleng/yytext will include it, for zend_highlight/strip, tokenizer, etc. */ if (YYCURSOR[-2] == '\r' && YYCURSOR[-1] == '\n') { newline = 2; /* Windows newline */ } else { newline = 1; } CG(increment_lineno) = 1; /* For newline before label */ BEGIN(ST_END_HEREDOC); goto heredoc_scan_done; } } continue; case '$': if (IS_LABEL_START(*YYCURSOR) || *YYCURSOR == '{') { break; } continue; case '{': if (*YYCURSOR == '$') { break; } continue; case '\\': if (YYCURSOR < YYLIMIT && *YYCURSOR != '\n' && *YYCURSOR != '\r') { YYCURSOR++; } /* fall through */ default: continue; } YYCURSOR--; break; } heredoc_scan_done: yyleng = YYCURSOR - SCNG(yy_text); zend_scan_escape_string(zendlval, yytext, yyleng - newline, 0 TSRMLS_CC); return T_ENCAPSED_AND_WHITESPACE; } #line 2117 "Zend/zend_language_scanner.c" yy105: YYDEBUG(105, *YYCURSOR); yych = *++YYCURSOR; if (yych == '$') goto yy107; goto yy104; yy106: YYDEBUG(106, *YYCURSOR); yych = *++YYCURSOR; goto yy104; yy107: YYDEBUG(107, *YYCURSOR); ++YYCURSOR; YYDEBUG(108, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 2174 "Zend/zend_language_scanner.l" { zendlval->value.lval = (long) '{'; yy_push_state(ST_IN_SCRIPTING TSRMLS_CC); yyless(1); return T_CURLY_OPEN; } #line 2139 "Zend/zend_language_scanner.c" yy109: YYDEBUG(109, *YYCURSOR); yyaccept = 0; YYMARKER = ++YYCURSOR; YYFILL(3); yych = *YYCURSOR; YYDEBUG(110, *YYCURSOR); if (yybm[0+yych] & 128) { goto yy109; } if (yych == '-') goto yy114; if (yych == '[') goto yy116; yy111: YYDEBUG(111, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1874 "Zend/zend_language_scanner.l" { zend_copy_value(zendlval, (yytext+1), (yyleng-1)); zendlval->type = IS_STRING; return T_VARIABLE; } #line 2161 "Zend/zend_language_scanner.c" yy112: YYDEBUG(112, *YYCURSOR); ++YYCURSOR; YYDEBUG(113, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1451 "Zend/zend_language_scanner.l" { yy_push_state(ST_LOOKING_FOR_VARNAME TSRMLS_CC); return T_DOLLAR_OPEN_CURLY_BRACES; } #line 2172 "Zend/zend_language_scanner.c" yy114: YYDEBUG(114, *YYCURSOR); yych = *++YYCURSOR; if (yych == '>') goto yy118; yy115: YYDEBUG(115, *YYCURSOR); YYCURSOR = YYMARKER; goto yy111; yy116: YYDEBUG(116, *YYCURSOR); ++YYCURSOR; YYDEBUG(117, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1866 "Zend/zend_language_scanner.l" { yyless(yyleng - 1); yy_push_state(ST_VAR_OFFSET TSRMLS_CC); zend_copy_value(zendlval, (yytext+1), (yyleng-1)); zendlval->type = IS_STRING; return T_VARIABLE; } #line 2194 "Zend/zend_language_scanner.c" yy118: YYDEBUG(118, *YYCURSOR); yych = *++YYCURSOR; if (yych <= '_') { if (yych <= '@') goto yy115; if (yych <= 'Z') goto yy119; if (yych <= '^') goto yy115; } else { if (yych <= '`') goto yy115; if (yych <= 'z') goto yy119; if (yych <= '~') goto yy115; } yy119: YYDEBUG(119, *YYCURSOR); ++YYCURSOR; YYDEBUG(120, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1856 "Zend/zend_language_scanner.l" { yyless(yyleng - 3); yy_push_state(ST_LOOKING_FOR_PROPERTY TSRMLS_CC); zend_copy_value(zendlval, (yytext+1), (yyleng-1)); zendlval->type = IS_STRING; return T_VARIABLE; } #line 2220 "Zend/zend_language_scanner.c" } /* *********************************** */ yyc_ST_IN_SCRIPTING: { static const unsigned char yybm[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 192, 64, 0, 0, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 60, 60, 44, 44, 44, 44, 44, 44, 44, 44, 0, 0, 0, 0, 0, 0, 0, 36, 36, 36, 36, 36, 36, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 4, 0, 36, 36, 36, 36, 36, 36, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 0, 0, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, }; YYDEBUG(121, *YYCURSOR); YYFILL(16); yych = *YYCURSOR; YYDEBUG(-1, yych); switch (yych) { case 0x00: case 0x01: case 0x02: case 0x03: case 0x04: case 0x05: case 0x06: case 0x07: case 0x08: case '\v': case '\f': case 0x0E: case 0x0F: case 0x10: case 0x11: case 0x12: case 0x13: case 0x14: case 0x15: case 0x16: case 0x17: case 0x18: case 0x19: case 0x1A: case 0x1B: case 0x1C: case 0x1D: case 0x1E: case 0x1F: goto yy183; case '\t': case '\n': case '\r': case ' ': goto yy139; case '!': goto yy152; case '"': goto yy179; case '#': goto yy175; case '$': goto yy164; case '%': goto yy158; case '&': goto yy159; case '\'': goto yy177; case '(': goto yy146; case ')': case ',': case ';': case '@': case '[': case ']': case '~': goto yy165; case '*': goto yy155; case '+': goto yy151; case '-': goto yy137; case '.': goto yy157; case '/': goto yy156; case '0': goto yy171; case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': goto yy173; case ':': goto yy141; case '<': goto yy153; case '=': goto yy149; case '>': goto yy154; case '?': goto yy166; case 'A': case 'a': goto yy132; case 'B': case 'b': goto yy134; case 'C': case 'c': goto yy127; case 'D': case 'd': goto yy125; case 'E': case 'e': goto yy123; case 'F': case 'f': goto yy126; case 'G': case 'g': goto yy135; case 'I': case 'i': goto yy130; case 'L': case 'l': goto yy150; case 'N': case 'n': goto yy144; case 'O': case 'o': goto yy162; case 'P': case 'p': goto yy136; case 'R': case 'r': goto yy128; case 'S': case 's': goto yy133; case 'T': case 't': goto yy129; case 'U': case 'u': goto yy147; case 'V': case 'v': goto yy145; case 'W': case 'w': goto yy131; case 'X': case 'x': goto yy163; case '\\': goto yy142; case '^': goto yy161; case '_': goto yy148; case '`': goto yy181; case '{': goto yy167; case '|': goto yy160; case '}': goto yy169; default: goto yy174; } yy123: YYDEBUG(123, *YYCURSOR); ++YYCURSOR; YYDEBUG(-1, yych); switch ((yych = *YYCURSOR)) { case 'C': case 'c': goto yy726; case 'L': case 'l': goto yy727; case 'M': case 'm': goto yy728; case 'N': case 'n': goto yy729; case 'V': case 'v': goto yy730; case 'X': case 'x': goto yy731; default: goto yy186; } yy124: YYDEBUG(124, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1897 "Zend/zend_language_scanner.l" { zend_copy_value(zendlval, yytext, yyleng); zendlval->type = IS_STRING; return T_STRING; } #line 2407 "Zend/zend_language_scanner.c" yy125: YYDEBUG(125, *YYCURSOR); yych = *++YYCURSOR; if (yych <= 'O') { if (yych <= 'H') { if (yych == 'E') goto yy708; goto yy186; } else { if (yych <= 'I') goto yy709; if (yych <= 'N') goto yy186; goto yy710; } } else { if (yych <= 'h') { if (yych == 'e') goto yy708; goto yy186; } else { if (yych <= 'i') goto yy709; if (yych == 'o') goto yy710; goto yy186; } } yy126: YYDEBUG(126, *YYCURSOR); yych = *++YYCURSOR; if (yych <= 'U') { if (yych <= 'N') { if (yych == 'I') goto yy687; goto yy186; } else { if (yych <= 'O') goto yy688; if (yych <= 'T') goto yy186; goto yy689; } } else { if (yych <= 'n') { if (yych == 'i') goto yy687; goto yy186; } else { if (yych <= 'o') goto yy688; if (yych == 'u') goto yy689; goto yy186; } } yy127: YYDEBUG(127, *YYCURSOR); yych = *++YYCURSOR; if (yych <= 'O') { if (yych <= 'K') { if (yych == 'A') goto yy652; goto yy186; } else { if (yych <= 'L') goto yy653; if (yych <= 'N') goto yy186; goto yy654; } } else { if (yych <= 'k') { if (yych == 'a') goto yy652; goto yy186; } else { if (yych <= 'l') goto yy653; if (yych == 'o') goto yy654; goto yy186; } } yy128: YYDEBUG(128, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy634; if (yych == 'e') goto yy634; goto yy186; yy129: YYDEBUG(129, *YYCURSOR); yych = *++YYCURSOR; if (yych <= 'R') { if (yych == 'H') goto yy622; if (yych <= 'Q') goto yy186; goto yy623; } else { if (yych <= 'h') { if (yych <= 'g') goto yy186; goto yy622; } else { if (yych == 'r') goto yy623; goto yy186; } } yy130: YYDEBUG(130, *YYCURSOR); yych = *++YYCURSOR; if (yych <= 'S') { if (yych <= 'L') { if (yych == 'F') goto yy569; goto yy186; } else { if (yych <= 'M') goto yy571; if (yych <= 'N') goto yy572; if (yych <= 'R') goto yy186; goto yy573; } } else { if (yych <= 'm') { if (yych == 'f') goto yy569; if (yych <= 'l') goto yy186; goto yy571; } else { if (yych <= 'n') goto yy572; if (yych == 's') goto yy573; goto yy186; } } yy131: YYDEBUG(131, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'H') goto yy564; if (yych == 'h') goto yy564; goto yy186; yy132: YYDEBUG(132, *YYCURSOR); yych = *++YYCURSOR; if (yych <= 'S') { if (yych <= 'M') { if (yych == 'B') goto yy546; goto yy186; } else { if (yych <= 'N') goto yy547; if (yych <= 'Q') goto yy186; if (yych <= 'R') goto yy548; goto yy549; } } else { if (yych <= 'n') { if (yych == 'b') goto yy546; if (yych <= 'm') goto yy186; goto yy547; } else { if (yych <= 'q') goto yy186; if (yych <= 'r') goto yy548; if (yych <= 's') goto yy549; goto yy186; } } yy133: YYDEBUG(133, *YYCURSOR); yych = *++YYCURSOR; if (yych <= 'W') { if (yych == 'T') goto yy534; if (yych <= 'V') goto yy186; goto yy535; } else { if (yych <= 't') { if (yych <= 's') goto yy186; goto yy534; } else { if (yych == 'w') goto yy535; goto yy186; } } yy134: YYDEBUG(134, *YYCURSOR); yyaccept = 0; yych = *(YYMARKER = ++YYCURSOR); if (yych <= ';') { if (yych <= '"') { if (yych <= '!') goto yy186; goto yy526; } else { if (yych == '\'') goto yy527; goto yy186; } } else { if (yych <= 'R') { if (yych <= '<') goto yy525; if (yych <= 'Q') goto yy186; goto yy528; } else { if (yych == 'r') goto yy528; goto yy186; } } yy135: YYDEBUG(135, *YYCURSOR); yych = *++YYCURSOR; if (yych <= 'O') { if (yych == 'L') goto yy515; if (yych <= 'N') goto yy186; goto yy516; } else { if (yych <= 'l') { if (yych <= 'k') goto yy186; goto yy515; } else { if (yych == 'o') goto yy516; goto yy186; } } yy136: YYDEBUG(136, *YYCURSOR); yych = *++YYCURSOR; if (yych <= 'U') { if (yych == 'R') goto yy491; if (yych <= 'T') goto yy186; goto yy492; } else { if (yych <= 'r') { if (yych <= 'q') goto yy186; goto yy491; } else { if (yych == 'u') goto yy492; goto yy186; } } yy137: YYDEBUG(137, *YYCURSOR); ++YYCURSOR; if ((yych = *YYCURSOR) <= '<') { if (yych == '-') goto yy487; } else { if (yych <= '=') goto yy485; if (yych <= '>') goto yy489; } yy138: YYDEBUG(138, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1440 "Zend/zend_language_scanner.l" { return yytext[0]; } #line 2637 "Zend/zend_language_scanner.c" yy139: YYDEBUG(139, *YYCURSOR); ++YYCURSOR; yych = *YYCURSOR; goto yy484; yy140: YYDEBUG(140, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1171 "Zend/zend_language_scanner.l" { zendlval->value.str.val = yytext; /* no copying - intentional */ zendlval->value.str.len = yyleng; zendlval->type = IS_STRING; HANDLE_NEWLINES(yytext, yyleng); return T_WHITESPACE; } #line 2654 "Zend/zend_language_scanner.c" yy141: YYDEBUG(141, *YYCURSOR); yych = *++YYCURSOR; if (yych == ':') goto yy481; goto yy138; yy142: YYDEBUG(142, *YYCURSOR); ++YYCURSOR; YYDEBUG(143, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1200 "Zend/zend_language_scanner.l" { return T_NS_SEPARATOR; } #line 2669 "Zend/zend_language_scanner.c" yy144: YYDEBUG(144, *YYCURSOR); yych = *++YYCURSOR; if (yych <= 'E') { if (yych == 'A') goto yy469; if (yych <= 'D') goto yy186; goto yy470; } else { if (yych <= 'a') { if (yych <= '`') goto yy186; goto yy469; } else { if (yych == 'e') goto yy470; goto yy186; } } yy145: YYDEBUG(145, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'A') goto yy466; if (yych == 'a') goto yy466; goto yy186; yy146: YYDEBUG(146, *YYCURSOR); yyaccept = 1; yych = *(YYMARKER = ++YYCURSOR); if (yych <= 'S') { if (yych <= 'D') { if (yych <= ' ') { if (yych == '\t') goto yy391; if (yych <= 0x1F) goto yy138; goto yy391; } else { if (yych <= '@') goto yy138; if (yych == 'C') goto yy138; goto yy391; } } else { if (yych <= 'I') { if (yych == 'F') goto yy391; if (yych <= 'H') goto yy138; goto yy391; } else { if (yych == 'O') goto yy391; if (yych <= 'Q') goto yy138; goto yy391; } } } else { if (yych <= 'f') { if (yych <= 'b') { if (yych == 'U') goto yy391; if (yych <= '`') goto yy138; goto yy391; } else { if (yych == 'd') goto yy391; if (yych <= 'e') goto yy138; goto yy391; } } else { if (yych <= 'o') { if (yych == 'i') goto yy391; if (yych <= 'n') goto yy138; goto yy391; } else { if (yych <= 's') { if (yych <= 'q') goto yy138; goto yy391; } else { if (yych == 'u') goto yy391; goto yy138; } } } } yy147: YYDEBUG(147, *YYCURSOR); yych = *++YYCURSOR; if (yych <= 'S') { if (yych == 'N') goto yy382; if (yych <= 'R') goto yy186; goto yy383; } else { if (yych <= 'n') { if (yych <= 'm') goto yy186; goto yy382; } else { if (yych == 's') goto yy383; goto yy186; } } yy148: YYDEBUG(148, *YYCURSOR); yych = *++YYCURSOR; if (yych == '_') goto yy300; goto yy186; yy149: YYDEBUG(149, *YYCURSOR); yych = *++YYCURSOR; if (yych <= '<') goto yy138; if (yych <= '=') goto yy294; if (yych <= '>') goto yy296; goto yy138; yy150: YYDEBUG(150, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'I') goto yy290; if (yych == 'i') goto yy290; goto yy186; yy151: YYDEBUG(151, *YYCURSOR); yych = *++YYCURSOR; if (yych == '+') goto yy288; if (yych == '=') goto yy286; goto yy138; yy152: YYDEBUG(152, *YYCURSOR); yych = *++YYCURSOR; if (yych == '=') goto yy283; goto yy138; yy153: YYDEBUG(153, *YYCURSOR); yyaccept = 1; yych = *(YYMARKER = ++YYCURSOR); if (yych <= ';') { if (yych == '/') goto yy255; goto yy138; } else { if (yych <= '<') goto yy253; if (yych <= '=') goto yy256; if (yych <= '>') goto yy258; goto yy138; } yy154: YYDEBUG(154, *YYCURSOR); yych = *++YYCURSOR; if (yych <= '<') goto yy138; if (yych <= '=') goto yy249; if (yych <= '>') goto yy247; goto yy138; yy155: YYDEBUG(155, *YYCURSOR); yych = *++YYCURSOR; if (yych == '=') goto yy245; goto yy138; yy156: YYDEBUG(156, *YYCURSOR); yych = *++YYCURSOR; if (yych <= '.') { if (yych == '*') goto yy237; goto yy138; } else { if (yych <= '/') goto yy239; if (yych == '=') goto yy240; goto yy138; } yy157: YYDEBUG(157, *YYCURSOR); yych = *++YYCURSOR; if (yych <= '/') goto yy138; if (yych <= '9') goto yy233; if (yych == '=') goto yy235; goto yy138; yy158: YYDEBUG(158, *YYCURSOR); yych = *++YYCURSOR; if (yych <= '<') goto yy138; if (yych <= '=') goto yy229; if (yych <= '>') goto yy227; goto yy138; yy159: YYDEBUG(159, *YYCURSOR); yych = *++YYCURSOR; if (yych == '&') goto yy223; if (yych == '=') goto yy225; goto yy138; yy160: YYDEBUG(160, *YYCURSOR); yych = *++YYCURSOR; if (yych == '=') goto yy221; if (yych == '|') goto yy219; goto yy138; yy161: YYDEBUG(161, *YYCURSOR); yych = *++YYCURSOR; if (yych == '=') goto yy217; goto yy138; yy162: YYDEBUG(162, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'R') goto yy215; if (yych == 'r') goto yy215; goto yy186; yy163: YYDEBUG(163, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'O') goto yy212; if (yych == 'o') goto yy212; goto yy186; yy164: YYDEBUG(164, *YYCURSOR); yych = *++YYCURSOR; if (yych <= '_') { if (yych <= '@') goto yy138; if (yych <= 'Z') goto yy209; if (yych <= '^') goto yy138; goto yy209; } else { if (yych <= '`') goto yy138; if (yych <= 'z') goto yy209; if (yych <= '~') goto yy138; goto yy209; } yy165: YYDEBUG(165, *YYCURSOR); yych = *++YYCURSOR; goto yy138; yy166: YYDEBUG(166, *YYCURSOR); yych = *++YYCURSOR; if (yych == '>') goto yy205; goto yy138; yy167: YYDEBUG(167, *YYCURSOR); ++YYCURSOR; YYDEBUG(168, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1445 "Zend/zend_language_scanner.l" { yy_push_state(ST_IN_SCRIPTING TSRMLS_CC); return '{'; } #line 2902 "Zend/zend_language_scanner.c" yy169: YYDEBUG(169, *YYCURSOR); ++YYCURSOR; YYDEBUG(170, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1457 "Zend/zend_language_scanner.l" { RESET_DOC_COMMENT(); if (!zend_stack_is_empty(&SCNG(state_stack))) { yy_pop_state(TSRMLS_C); } return '}'; } #line 2916 "Zend/zend_language_scanner.c" yy171: YYDEBUG(171, *YYCURSOR); yyaccept = 2; yych = *(YYMARKER = ++YYCURSOR); if (yych <= 'E') { if (yych <= '9') { if (yych == '.') goto yy187; if (yych >= '0') goto yy190; } else { if (yych == 'B') goto yy198; if (yych >= 'E') goto yy192; } } else { if (yych <= 'b') { if (yych == 'X') goto yy197; if (yych >= 'b') goto yy198; } else { if (yych <= 'e') { if (yych >= 'e') goto yy192; } else { if (yych == 'x') goto yy197; } } } yy172: YYDEBUG(172, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1507 "Zend/zend_language_scanner.l" { if (yyleng < MAX_LENGTH_OF_LONG - 1) { /* Won't overflow */ zendlval->value.lval = strtol(yytext, NULL, 0); } else { errno = 0; zendlval->value.lval = strtol(yytext, NULL, 0); if (errno == ERANGE) { /* Overflow */ if (yytext[0] == '0') { /* octal overflow */ zendlval->value.dval = zend_oct_strtod(yytext, NULL); } else { zendlval->value.dval = zend_strtod(yytext, NULL); } zendlval->type = IS_DOUBLE; return T_DNUMBER; } } zendlval->type = IS_LONG; return T_LNUMBER; } #line 2965 "Zend/zend_language_scanner.c" yy173: YYDEBUG(173, *YYCURSOR); yyaccept = 2; yych = *(YYMARKER = ++YYCURSOR); if (yych <= '9') { if (yych == '.') goto yy187; if (yych <= '/') goto yy172; goto yy190; } else { if (yych <= 'E') { if (yych <= 'D') goto yy172; goto yy192; } else { if (yych == 'e') goto yy192; goto yy172; } } yy174: YYDEBUG(174, *YYCURSOR); yych = *++YYCURSOR; goto yy186; yy175: YYDEBUG(175, *YYCURSOR); ++YYCURSOR; yy176: YYDEBUG(176, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1904 "Zend/zend_language_scanner.l" { while (YYCURSOR < YYLIMIT) { switch (*YYCURSOR++) { case '\r': if (*YYCURSOR == '\n') { YYCURSOR++; } /* fall through */ case '\n': CG(zend_lineno)++; break; case '%': if (!CG(asp_tags)) { continue; } /* fall through */ case '?': if (*YYCURSOR == '>') { YYCURSOR--; break; } /* fall through */ default: continue; } break; } yyleng = YYCURSOR - SCNG(yy_text); return T_COMMENT; } #line 3027 "Zend/zend_language_scanner.c" yy177: YYDEBUG(177, *YYCURSOR); ++YYCURSOR; yy178: YYDEBUG(178, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1995 "Zend/zend_language_scanner.l" { register char *s, *t; char *end; int bprefix = (yytext[0] != '\'') ? 1 : 0; while (1) { if (YYCURSOR < YYLIMIT) { if (*YYCURSOR == '\'') { YYCURSOR++; yyleng = YYCURSOR - SCNG(yy_text); break; } else if (*YYCURSOR++ == '\\' && YYCURSOR < YYLIMIT) { YYCURSOR++; } } else { yyleng = YYLIMIT - SCNG(yy_text); /* Unclosed single quotes; treat similar to double quotes, but without a separate token * for ' (unrecognized by parser), instead of old flex fallback to "Unexpected character..." * rule, which continued in ST_IN_SCRIPTING state after the quote */ return T_ENCAPSED_AND_WHITESPACE; } } zendlval->value.str.val = estrndup(yytext+bprefix+1, yyleng-bprefix-2); zendlval->value.str.len = yyleng-bprefix-2; zendlval->type = IS_STRING; /* convert escape sequences */ s = t = zendlval->value.str.val; end = s+zendlval->value.str.len; while (s<end) { if (*s=='\\') { s++; switch(*s) { case '\\': case '\'': *t++ = *s; zendlval->value.str.len--; break; default: *t++ = '\\'; *t++ = *s; break; } } else { *t++ = *s; } if (*s == '\n' || (*s == '\r' && (*(s+1) != '\n'))) { CG(zend_lineno)++; } s++; } *t = 0; if (SCNG(output_filter)) { size_t sz = 0; s = zendlval->value.str.val; SCNG(output_filter)((unsigned char **)&(zendlval->value.str.val), &sz, (unsigned char *)s, (size_t)zendlval->value.str.len TSRMLS_CC); zendlval->value.str.len = sz; efree(s); } return T_CONSTANT_ENCAPSED_STRING; } #line 3102 "Zend/zend_language_scanner.c" yy179: YYDEBUG(179, *YYCURSOR); ++YYCURSOR; yy180: YYDEBUG(180, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 2064 "Zend/zend_language_scanner.l" { int bprefix = (yytext[0] != '"') ? 1 : 0; while (YYCURSOR < YYLIMIT) { switch (*YYCURSOR++) { case '"': yyleng = YYCURSOR - SCNG(yy_text); zend_scan_escape_string(zendlval, yytext+bprefix+1, yyleng-bprefix-2, '"' TSRMLS_CC); return T_CONSTANT_ENCAPSED_STRING; case '$': if (IS_LABEL_START(*YYCURSOR) || *YYCURSOR == '{') { break; } continue; case '{': if (*YYCURSOR == '$') { break; } continue; case '\\': if (YYCURSOR < YYLIMIT) { YYCURSOR++; } /* fall through */ default: continue; } YYCURSOR--; break; } /* Remember how much was scanned to save rescanning */ SET_DOUBLE_QUOTES_SCANNED_LENGTH(YYCURSOR - SCNG(yy_text) - yyleng); YYCURSOR = SCNG(yy_text) + yyleng; BEGIN(ST_DOUBLE_QUOTES); return '"'; } #line 3150 "Zend/zend_language_scanner.c" yy181: YYDEBUG(181, *YYCURSOR); ++YYCURSOR; YYDEBUG(182, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 2155 "Zend/zend_language_scanner.l" { BEGIN(ST_BACKQUOTE); return '`'; } #line 3161 "Zend/zend_language_scanner.c" yy183: YYDEBUG(183, *YYCURSOR); ++YYCURSOR; YYDEBUG(184, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 2413 "Zend/zend_language_scanner.l" { if (YYCURSOR > YYLIMIT) { return 0; } zend_error(E_COMPILE_WARNING,"Unexpected character in input: '%c' (ASCII=%d) state=%d", yytext[0], yytext[0], YYSTATE); goto restart; } #line 3176 "Zend/zend_language_scanner.c" yy185: YYDEBUG(185, *YYCURSOR); ++YYCURSOR; YYFILL(1); yych = *YYCURSOR; yy186: YYDEBUG(186, *YYCURSOR); if (yybm[0+yych] & 4) { goto yy185; } goto yy124; yy187: YYDEBUG(187, *YYCURSOR); yyaccept = 3; YYMARKER = ++YYCURSOR; YYFILL(3); yych = *YYCURSOR; YYDEBUG(188, *YYCURSOR); if (yybm[0+yych] & 8) { goto yy187; } if (yych == 'E') goto yy192; if (yych == 'e') goto yy192; yy189: YYDEBUG(189, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1572 "Zend/zend_language_scanner.l" { zendlval->value.dval = zend_strtod(yytext, NULL); zendlval->type = IS_DOUBLE; return T_DNUMBER; } #line 3209 "Zend/zend_language_scanner.c" yy190: YYDEBUG(190, *YYCURSOR); yyaccept = 2; YYMARKER = ++YYCURSOR; YYFILL(3); yych = *YYCURSOR; YYDEBUG(191, *YYCURSOR); if (yych <= '9') { if (yych == '.') goto yy187; if (yych <= '/') goto yy172; goto yy190; } else { if (yych <= 'E') { if (yych <= 'D') goto yy172; } else { if (yych != 'e') goto yy172; } } yy192: YYDEBUG(192, *YYCURSOR); yych = *++YYCURSOR; if (yych <= ',') { if (yych == '+') goto yy194; } else { if (yych <= '-') goto yy194; if (yych <= '/') goto yy193; if (yych <= '9') goto yy195; } yy193: YYDEBUG(193, *YYCURSOR); YYCURSOR = YYMARKER; if (yyaccept <= 2) { if (yyaccept <= 1) { if (yyaccept <= 0) { goto yy124; } else { goto yy138; } } else { goto yy172; } } else { if (yyaccept <= 4) { if (yyaccept <= 3) { goto yy189; } else { goto yy238; } } else { goto yy254; } } yy194: YYDEBUG(194, *YYCURSOR); yych = *++YYCURSOR; if (yych <= '/') goto yy193; if (yych >= ':') goto yy193; yy195: YYDEBUG(195, *YYCURSOR); ++YYCURSOR; YYFILL(1); yych = *YYCURSOR; YYDEBUG(196, *YYCURSOR); if (yych <= '/') goto yy189; if (yych <= '9') goto yy195; goto yy189; yy197: YYDEBUG(197, *YYCURSOR); yych = *++YYCURSOR; if (yybm[0+yych] & 32) { goto yy202; } goto yy193; yy198: YYDEBUG(198, *YYCURSOR); yych = *++YYCURSOR; if (yybm[0+yych] & 16) { goto yy199; } goto yy193; yy199: YYDEBUG(199, *YYCURSOR); ++YYCURSOR; YYFILL(1); yych = *YYCURSOR; YYDEBUG(200, *YYCURSOR); if (yybm[0+yych] & 16) { goto yy199; } YYDEBUG(201, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1482 "Zend/zend_language_scanner.l" { char *bin = yytext + 2; /* Skip "0b" */ int len = yyleng - 2; /* Skip any leading 0s */ while (*bin == '0') { ++bin; --len; } if (len < SIZEOF_LONG * 8) { if (len == 0) { zendlval->value.lval = 0; } else { zendlval->value.lval = strtol(bin, NULL, 2); } zendlval->type = IS_LONG; return T_LNUMBER; } else { zendlval->value.dval = zend_bin_strtod(bin, NULL); zendlval->type = IS_DOUBLE; return T_DNUMBER; } } #line 3326 "Zend/zend_language_scanner.c" yy202: YYDEBUG(202, *YYCURSOR); ++YYCURSOR; YYFILL(1); yych = *YYCURSOR; YYDEBUG(203, *YYCURSOR); if (yybm[0+yych] & 32) { goto yy202; } YYDEBUG(204, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1528 "Zend/zend_language_scanner.l" { char *hex = yytext + 2; /* Skip "0x" */ int len = yyleng - 2; /* Skip any leading 0s */ while (*hex == '0') { hex++; len--; } if (len < SIZEOF_LONG * 2 || (len == SIZEOF_LONG * 2 && *hex <= '7')) { if (len == 0) { zendlval->value.lval = 0; } else { zendlval->value.lval = strtol(hex, NULL, 16); } zendlval->type = IS_LONG; return T_LNUMBER; } else { zendlval->value.dval = zend_hex_strtod(hex, NULL); zendlval->type = IS_DOUBLE; return T_DNUMBER; } } #line 3363 "Zend/zend_language_scanner.c" yy205: YYDEBUG(205, *YYCURSOR); ++YYCURSOR; if ((yych = *YYCURSOR) == '\n') goto yy207; if (yych == '\r') goto yy208; yy206: YYDEBUG(206, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1972 "Zend/zend_language_scanner.l" { zendlval->value.str.val = yytext; /* no copying - intentional */ zendlval->value.str.len = yyleng; zendlval->type = IS_STRING; BEGIN(INITIAL); return T_CLOSE_TAG; /* implicit ';' at php-end tag */ } #line 3380 "Zend/zend_language_scanner.c" yy207: YYDEBUG(207, *YYCURSOR); yych = *++YYCURSOR; goto yy206; yy208: YYDEBUG(208, *YYCURSOR); yych = *++YYCURSOR; if (yych == '\n') goto yy207; goto yy206; yy209: YYDEBUG(209, *YYCURSOR); ++YYCURSOR; YYFILL(1); yych = *YYCURSOR; YYDEBUG(210, *YYCURSOR); if (yych <= '^') { if (yych <= '9') { if (yych >= '0') goto yy209; } else { if (yych <= '@') goto yy211; if (yych <= 'Z') goto yy209; } } else { if (yych <= '`') { if (yych <= '_') goto yy209; } else { if (yych <= 'z') goto yy209; if (yych >= 0x7F) goto yy209; } } yy211: YYDEBUG(211, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1874 "Zend/zend_language_scanner.l" { zend_copy_value(zendlval, (yytext+1), (yyleng-1)); zendlval->type = IS_STRING; return T_VARIABLE; } #line 3420 "Zend/zend_language_scanner.c" yy212: YYDEBUG(212, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'R') goto yy213; if (yych != 'r') goto yy186; yy213: YYDEBUG(213, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(214, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1428 "Zend/zend_language_scanner.l" { return T_LOGICAL_XOR; } #line 3438 "Zend/zend_language_scanner.c" yy215: YYDEBUG(215, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(216, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1420 "Zend/zend_language_scanner.l" { return T_LOGICAL_OR; } #line 3451 "Zend/zend_language_scanner.c" yy217: YYDEBUG(217, *YYCURSOR); ++YYCURSOR; YYDEBUG(218, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1408 "Zend/zend_language_scanner.l" { return T_XOR_EQUAL; } #line 3461 "Zend/zend_language_scanner.c" yy219: YYDEBUG(219, *YYCURSOR); ++YYCURSOR; YYDEBUG(220, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1412 "Zend/zend_language_scanner.l" { return T_BOOLEAN_OR; } #line 3471 "Zend/zend_language_scanner.c" yy221: YYDEBUG(221, *YYCURSOR); ++YYCURSOR; YYDEBUG(222, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1404 "Zend/zend_language_scanner.l" { return T_OR_EQUAL; } #line 3481 "Zend/zend_language_scanner.c" yy223: YYDEBUG(223, *YYCURSOR); ++YYCURSOR; YYDEBUG(224, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1416 "Zend/zend_language_scanner.l" { return T_BOOLEAN_AND; } #line 3491 "Zend/zend_language_scanner.c" yy225: YYDEBUG(225, *YYCURSOR); ++YYCURSOR; YYDEBUG(226, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1400 "Zend/zend_language_scanner.l" { return T_AND_EQUAL; } #line 3501 "Zend/zend_language_scanner.c" yy227: YYDEBUG(227, *YYCURSOR); ++YYCURSOR; if ((yych = *YYCURSOR) == '\n') goto yy231; if (yych == '\r') goto yy232; yy228: YYDEBUG(228, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1981 "Zend/zend_language_scanner.l" { if (CG(asp_tags)) { BEGIN(INITIAL); zendlval->value.str.len = yyleng; zendlval->type = IS_STRING; zendlval->value.str.val = yytext; /* no copying - intentional */ return T_CLOSE_TAG; /* implicit ';' at php-end tag */ } else { yyless(1); return yytext[0]; } } #line 3523 "Zend/zend_language_scanner.c" yy229: YYDEBUG(229, *YYCURSOR); ++YYCURSOR; YYDEBUG(230, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1388 "Zend/zend_language_scanner.l" { return T_MOD_EQUAL; } #line 3533 "Zend/zend_language_scanner.c" yy231: YYDEBUG(231, *YYCURSOR); yych = *++YYCURSOR; goto yy228; yy232: YYDEBUG(232, *YYCURSOR); yych = *++YYCURSOR; if (yych == '\n') goto yy231; goto yy228; yy233: YYDEBUG(233, *YYCURSOR); yyaccept = 3; YYMARKER = ++YYCURSOR; YYFILL(3); yych = *YYCURSOR; YYDEBUG(234, *YYCURSOR); if (yych <= 'D') { if (yych <= '/') goto yy189; if (yych <= '9') goto yy233; goto yy189; } else { if (yych <= 'E') goto yy192; if (yych == 'e') goto yy192; goto yy189; } yy235: YYDEBUG(235, *YYCURSOR); ++YYCURSOR; YYDEBUG(236, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1384 "Zend/zend_language_scanner.l" { return T_CONCAT_EQUAL; } #line 3568 "Zend/zend_language_scanner.c" yy237: YYDEBUG(237, *YYCURSOR); yyaccept = 4; yych = *(YYMARKER = ++YYCURSOR); if (yych == '*') goto yy242; yy238: YYDEBUG(238, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1938 "Zend/zend_language_scanner.l" { int doc_com; if (yyleng > 2) { doc_com = 1; RESET_DOC_COMMENT(); } else { doc_com = 0; } while (YYCURSOR < YYLIMIT) { if (*YYCURSOR++ == '*' && *YYCURSOR == '/') { break; } } if (YYCURSOR < YYLIMIT) { YYCURSOR++; } else { zend_error(E_COMPILE_WARNING, "Unterminated comment starting line %d", CG(zend_lineno)); } yyleng = YYCURSOR - SCNG(yy_text); HANDLE_NEWLINES(yytext, yyleng); if (doc_com) { CG(doc_comment) = estrndup(yytext, yyleng); CG(doc_comment_len) = yyleng; return T_DOC_COMMENT; } return T_COMMENT; } #line 3611 "Zend/zend_language_scanner.c" yy239: YYDEBUG(239, *YYCURSOR); yych = *++YYCURSOR; goto yy176; yy240: YYDEBUG(240, *YYCURSOR); ++YYCURSOR; YYDEBUG(241, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1380 "Zend/zend_language_scanner.l" { return T_DIV_EQUAL; } #line 3625 "Zend/zend_language_scanner.c" yy242: YYDEBUG(242, *YYCURSOR); yych = *++YYCURSOR; if (yybm[0+yych] & 64) { goto yy243; } goto yy193; yy243: YYDEBUG(243, *YYCURSOR); ++YYCURSOR; YYFILL(1); yych = *YYCURSOR; YYDEBUG(244, *YYCURSOR); if (yybm[0+yych] & 64) { goto yy243; } goto yy238; yy245: YYDEBUG(245, *YYCURSOR); ++YYCURSOR; YYDEBUG(246, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1376 "Zend/zend_language_scanner.l" { return T_MUL_EQUAL; } #line 3652 "Zend/zend_language_scanner.c" yy247: YYDEBUG(247, *YYCURSOR); ++YYCURSOR; if ((yych = *YYCURSOR) == '=') goto yy251; YYDEBUG(248, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1436 "Zend/zend_language_scanner.l" { return T_SR; } #line 3663 "Zend/zend_language_scanner.c" yy249: YYDEBUG(249, *YYCURSOR); ++YYCURSOR; YYDEBUG(250, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1364 "Zend/zend_language_scanner.l" { return T_IS_GREATER_OR_EQUAL; } #line 3673 "Zend/zend_language_scanner.c" yy251: YYDEBUG(251, *YYCURSOR); ++YYCURSOR; YYDEBUG(252, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1396 "Zend/zend_language_scanner.l" { return T_SR_EQUAL; } #line 3683 "Zend/zend_language_scanner.c" yy253: YYDEBUG(253, *YYCURSOR); yyaccept = 5; yych = *(YYMARKER = ++YYCURSOR); if (yych <= ';') goto yy254; if (yych <= '<') goto yy269; if (yych <= '=') goto yy267; yy254: YYDEBUG(254, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1432 "Zend/zend_language_scanner.l" { return T_SL; } #line 3698 "Zend/zend_language_scanner.c" yy255: YYDEBUG(255, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'S') goto yy260; if (yych == 's') goto yy260; goto yy193; yy256: YYDEBUG(256, *YYCURSOR); ++YYCURSOR; YYDEBUG(257, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1360 "Zend/zend_language_scanner.l" { return T_IS_SMALLER_OR_EQUAL; } #line 3714 "Zend/zend_language_scanner.c" yy258: YYDEBUG(258, *YYCURSOR); ++YYCURSOR; yy259: YYDEBUG(259, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1356 "Zend/zend_language_scanner.l" { return T_IS_NOT_EQUAL; } #line 3725 "Zend/zend_language_scanner.c" yy260: YYDEBUG(260, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'C') goto yy261; if (yych != 'c') goto yy193; yy261: YYDEBUG(261, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'R') goto yy262; if (yych != 'r') goto yy193; yy262: YYDEBUG(262, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'I') goto yy263; if (yych != 'i') goto yy193; yy263: YYDEBUG(263, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'P') goto yy264; if (yych != 'p') goto yy193; yy264: YYDEBUG(264, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'T') goto yy265; if (yych != 't') goto yy193; yy265: YYDEBUG(265, *YYCURSOR); ++YYCURSOR; YYFILL(3); yych = *YYCURSOR; YYDEBUG(266, *YYCURSOR); if (yych <= '\r') { if (yych <= 0x08) goto yy193; if (yych <= '\n') goto yy265; if (yych <= '\f') goto yy193; goto yy265; } else { if (yych <= ' ') { if (yych <= 0x1F) goto yy193; goto yy265; } else { if (yych == '>') goto yy205; goto yy193; } } yy267: YYDEBUG(267, *YYCURSOR); ++YYCURSOR; YYDEBUG(268, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1392 "Zend/zend_language_scanner.l" { return T_SL_EQUAL; } #line 3780 "Zend/zend_language_scanner.c" yy269: YYDEBUG(269, *YYCURSOR); ++YYCURSOR; YYFILL(2); yych = *YYCURSOR; YYDEBUG(270, *YYCURSOR); if (yybm[0+yych] & 128) { goto yy269; } if (yych <= 'Z') { if (yych <= '&') { if (yych == '"') goto yy274; goto yy193; } else { if (yych <= '\'') goto yy273; if (yych <= '@') goto yy193; } } else { if (yych <= '`') { if (yych != '_') goto yy193; } else { if (yych <= 'z') goto yy271; if (yych <= '~') goto yy193; } } yy271: YYDEBUG(271, *YYCURSOR); ++YYCURSOR; YYFILL(2); yych = *YYCURSOR; YYDEBUG(272, *YYCURSOR); if (yych <= '@') { if (yych <= '\f') { if (yych == '\n') goto yy278; goto yy193; } else { if (yych <= '\r') goto yy280; if (yych <= '/') goto yy193; if (yych <= '9') goto yy271; goto yy193; } } else { if (yych <= '_') { if (yych <= 'Z') goto yy271; if (yych <= '^') goto yy193; goto yy271; } else { if (yych <= '`') goto yy193; if (yych <= 'z') goto yy271; if (yych <= '~') goto yy193; goto yy271; } } yy273: YYDEBUG(273, *YYCURSOR); yych = *++YYCURSOR; if (yych == '\'') goto yy193; if (yych <= '/') goto yy282; if (yych <= '9') goto yy193; goto yy282; yy274: YYDEBUG(274, *YYCURSOR); yych = *++YYCURSOR; if (yych == '"') goto yy193; if (yych <= '/') goto yy276; if (yych <= '9') goto yy193; goto yy276; yy275: YYDEBUG(275, *YYCURSOR); ++YYCURSOR; YYFILL(3); yych = *YYCURSOR; yy276: YYDEBUG(276, *YYCURSOR); if (yych <= 'Z') { if (yych <= '/') { if (yych != '"') goto yy193; } else { if (yych <= '9') goto yy275; if (yych <= '@') goto yy193; goto yy275; } } else { if (yych <= '`') { if (yych == '_') goto yy275; goto yy193; } else { if (yych <= 'z') goto yy275; if (yych <= '~') goto yy193; goto yy275; } } yy277: YYDEBUG(277, *YYCURSOR); yych = *++YYCURSOR; if (yych == '\n') goto yy278; if (yych == '\r') goto yy280; goto yy193; yy278: YYDEBUG(278, *YYCURSOR); ++YYCURSOR; yy279: YYDEBUG(279, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 2106 "Zend/zend_language_scanner.l" { char *s; int bprefix = (yytext[0] != '<') ? 1 : 0; /* save old heredoc label */ Z_STRVAL_P(zendlval) = CG(heredoc); Z_STRLEN_P(zendlval) = CG(heredoc_len); CG(zend_lineno)++; CG(heredoc_len) = yyleng-bprefix-3-1-(yytext[yyleng-2]=='\r'?1:0); s = yytext+bprefix+3; while ((*s == ' ') || (*s == '\t')) { s++; CG(heredoc_len)--; } if (*s == '\'') { s++; CG(heredoc_len) -= 2; BEGIN(ST_NOWDOC); } else { if (*s == '"') { s++; CG(heredoc_len) -= 2; } BEGIN(ST_HEREDOC); } CG(heredoc) = estrndup(s, CG(heredoc_len)); /* Check for ending label on the next line */ if (CG(heredoc_len) < YYLIMIT - YYCURSOR && !memcmp(YYCURSOR, s, CG(heredoc_len))) { YYCTYPE *end = YYCURSOR + CG(heredoc_len); if (*end == ';') { end++; } if (*end == '\n' || *end == '\r') { BEGIN(ST_END_HEREDOC); } } return T_START_HEREDOC; } #line 3933 "Zend/zend_language_scanner.c" yy280: YYDEBUG(280, *YYCURSOR); yych = *++YYCURSOR; if (yych == '\n') goto yy278; goto yy279; yy281: YYDEBUG(281, *YYCURSOR); ++YYCURSOR; YYFILL(3); yych = *YYCURSOR; yy282: YYDEBUG(282, *YYCURSOR); if (yych <= 'Z') { if (yych <= '/') { if (yych == '\'') goto yy277; goto yy193; } else { if (yych <= '9') goto yy281; if (yych <= '@') goto yy193; goto yy281; } } else { if (yych <= '`') { if (yych == '_') goto yy281; goto yy193; } else { if (yych <= 'z') goto yy281; if (yych <= '~') goto yy193; goto yy281; } } yy283: YYDEBUG(283, *YYCURSOR); yych = *++YYCURSOR; if (yych != '=') goto yy259; YYDEBUG(284, *YYCURSOR); ++YYCURSOR; YYDEBUG(285, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1348 "Zend/zend_language_scanner.l" { return T_IS_NOT_IDENTICAL; } #line 3977 "Zend/zend_language_scanner.c" yy286: YYDEBUG(286, *YYCURSOR); ++YYCURSOR; YYDEBUG(287, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1368 "Zend/zend_language_scanner.l" { return T_PLUS_EQUAL; } #line 3987 "Zend/zend_language_scanner.c" yy288: YYDEBUG(288, *YYCURSOR); ++YYCURSOR; YYDEBUG(289, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1336 "Zend/zend_language_scanner.l" { return T_INC; } #line 3997 "Zend/zend_language_scanner.c" yy290: YYDEBUG(290, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'S') goto yy291; if (yych != 's') goto yy186; yy291: YYDEBUG(291, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'T') goto yy292; if (yych != 't') goto yy186; yy292: YYDEBUG(292, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(293, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1324 "Zend/zend_language_scanner.l" { return T_LIST; } #line 4020 "Zend/zend_language_scanner.c" yy294: YYDEBUG(294, *YYCURSOR); ++YYCURSOR; if ((yych = *YYCURSOR) == '=') goto yy298; YYDEBUG(295, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1352 "Zend/zend_language_scanner.l" { return T_IS_EQUAL; } #line 4031 "Zend/zend_language_scanner.c" yy296: YYDEBUG(296, *YYCURSOR); ++YYCURSOR; YYDEBUG(297, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1320 "Zend/zend_language_scanner.l" { return T_DOUBLE_ARROW; } #line 4041 "Zend/zend_language_scanner.c" yy298: YYDEBUG(298, *YYCURSOR); ++YYCURSOR; YYDEBUG(299, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1344 "Zend/zend_language_scanner.l" { return T_IS_IDENTICAL; } #line 4051 "Zend/zend_language_scanner.c" yy300: YYDEBUG(300, *YYCURSOR); yych = *++YYCURSOR; YYDEBUG(-1, yych); switch (yych) { case 'C': case 'c': goto yy302; case 'D': case 'd': goto yy307; case 'F': case 'f': goto yy304; case 'H': case 'h': goto yy301; case 'L': case 'l': goto yy306; case 'M': case 'm': goto yy305; case 'N': case 'n': goto yy308; case 'T': case 't': goto yy303; default: goto yy186; } yy301: YYDEBUG(301, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'A') goto yy369; if (yych == 'a') goto yy369; goto yy186; yy302: YYDEBUG(302, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'L') goto yy362; if (yych == 'l') goto yy362; goto yy186; yy303: YYDEBUG(303, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'R') goto yy355; if (yych == 'r') goto yy355; goto yy186; yy304: YYDEBUG(304, *YYCURSOR); yych = *++YYCURSOR; if (yych <= 'U') { if (yych == 'I') goto yy339; if (yych <= 'T') goto yy186; goto yy340; } else { if (yych <= 'i') { if (yych <= 'h') goto yy186; goto yy339; } else { if (yych == 'u') goto yy340; goto yy186; } } yy305: YYDEBUG(305, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy331; if (yych == 'e') goto yy331; goto yy186; yy306: YYDEBUG(306, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'I') goto yy325; if (yych == 'i') goto yy325; goto yy186; yy307: YYDEBUG(307, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'I') goto yy320; if (yych == 'i') goto yy320; goto yy186; yy308: YYDEBUG(308, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'A') goto yy309; if (yych != 'a') goto yy186; yy309: YYDEBUG(309, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'M') goto yy310; if (yych != 'm') goto yy186; yy310: YYDEBUG(310, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy311; if (yych != 'e') goto yy186; yy311: YYDEBUG(311, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'S') goto yy312; if (yych != 's') goto yy186; yy312: YYDEBUG(312, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'P') goto yy313; if (yych != 'p') goto yy186; yy313: YYDEBUG(313, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'A') goto yy314; if (yych != 'a') goto yy186; yy314: YYDEBUG(314, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'C') goto yy315; if (yych != 'c') goto yy186; yy315: YYDEBUG(315, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy316; if (yych != 'e') goto yy186; yy316: YYDEBUG(316, *YYCURSOR); yych = *++YYCURSOR; if (yych != '_') goto yy186; YYDEBUG(317, *YYCURSOR); yych = *++YYCURSOR; if (yych != '_') goto yy186; YYDEBUG(318, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(319, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1707 "Zend/zend_language_scanner.l" { if (CG(current_namespace)) { *zendlval = *CG(current_namespace); zval_copy_ctor(zendlval); } else { ZVAL_EMPTY_STRING(zendlval); } return T_NS_C; } #line 4191 "Zend/zend_language_scanner.c" yy320: YYDEBUG(320, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'R') goto yy321; if (yych != 'r') goto yy186; yy321: YYDEBUG(321, *YYCURSOR); yych = *++YYCURSOR; if (yych != '_') goto yy186; YYDEBUG(322, *YYCURSOR); yych = *++YYCURSOR; if (yych != '_') goto yy186; YYDEBUG(323, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(324, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1680 "Zend/zend_language_scanner.l" { char *filename = zend_get_compiled_filename(TSRMLS_C); const size_t filename_len = strlen(filename); char *dirname; if (!filename) { filename = ""; } dirname = estrndup(filename, filename_len); zend_dirname(dirname, filename_len); if (strcmp(dirname, ".") == 0) { dirname = erealloc(dirname, MAXPATHLEN); #if HAVE_GETCWD VCWD_GETCWD(dirname, MAXPATHLEN); #elif HAVE_GETWD VCWD_GETWD(dirname); #endif } zendlval->value.str.len = strlen(dirname); zendlval->value.str.val = dirname; zendlval->type = IS_STRING; return T_DIR; } #line 4238 "Zend/zend_language_scanner.c" yy325: YYDEBUG(325, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'N') goto yy326; if (yych != 'n') goto yy186; yy326: YYDEBUG(326, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy327; if (yych != 'e') goto yy186; yy327: YYDEBUG(327, *YYCURSOR); yych = *++YYCURSOR; if (yych != '_') goto yy186; YYDEBUG(328, *YYCURSOR); yych = *++YYCURSOR; if (yych != '_') goto yy186; YYDEBUG(329, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(330, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1662 "Zend/zend_language_scanner.l" { zendlval->value.lval = CG(zend_lineno); zendlval->type = IS_LONG; return T_LINE; } #line 4269 "Zend/zend_language_scanner.c" yy331: YYDEBUG(331, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'T') goto yy332; if (yych != 't') goto yy186; yy332: YYDEBUG(332, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'H') goto yy333; if (yych != 'h') goto yy186; yy333: YYDEBUG(333, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'O') goto yy334; if (yych != 'o') goto yy186; yy334: YYDEBUG(334, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'D') goto yy335; if (yych != 'd') goto yy186; yy335: YYDEBUG(335, *YYCURSOR); yych = *++YYCURSOR; if (yych != '_') goto yy186; YYDEBUG(336, *YYCURSOR); yych = *++YYCURSOR; if (yych != '_') goto yy186; YYDEBUG(337, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(338, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1641 "Zend/zend_language_scanner.l" { const char *class_name = CG(active_class_entry) ? CG(active_class_entry)->name : NULL; const char *func_name = CG(active_op_array)? CG(active_op_array)->function_name : NULL; size_t len = 0; if (class_name) { len += strlen(class_name) + 2; } if (func_name) { len += strlen(func_name); } zendlval->value.str.len = zend_spprintf(&zendlval->value.str.val, 0, "%s%s%s", class_name ? class_name : "", class_name && func_name ? "::" : "", func_name ? func_name : "" ); zendlval->type = IS_STRING; return T_METHOD_C; } #line 4325 "Zend/zend_language_scanner.c" yy339: YYDEBUG(339, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'L') goto yy350; if (yych == 'l') goto yy350; goto yy186; yy340: YYDEBUG(340, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'N') goto yy341; if (yych != 'n') goto yy186; yy341: YYDEBUG(341, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'C') goto yy342; if (yych != 'c') goto yy186; yy342: YYDEBUG(342, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'T') goto yy343; if (yych != 't') goto yy186; yy343: YYDEBUG(343, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'I') goto yy344; if (yych != 'i') goto yy186; yy344: YYDEBUG(344, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'O') goto yy345; if (yych != 'o') goto yy186; yy345: YYDEBUG(345, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'N') goto yy346; if (yych != 'n') goto yy186; yy346: YYDEBUG(346, *YYCURSOR); yych = *++YYCURSOR; if (yych != '_') goto yy186; YYDEBUG(347, *YYCURSOR); yych = *++YYCURSOR; if (yych != '_') goto yy186; YYDEBUG(348, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(349, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1625 "Zend/zend_language_scanner.l" { const char *func_name = NULL; if (CG(active_op_array)) { func_name = CG(active_op_array)->function_name; } if (!func_name) { func_name = ""; } zendlval->value.str.len = strlen(func_name); zendlval->value.str.val = estrndup(func_name, zendlval->value.str.len); zendlval->type = IS_STRING; return T_FUNC_C; } #line 4392 "Zend/zend_language_scanner.c" yy350: YYDEBUG(350, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy351; if (yych != 'e') goto yy186; yy351: YYDEBUG(351, *YYCURSOR); yych = *++YYCURSOR; if (yych != '_') goto yy186; YYDEBUG(352, *YYCURSOR); yych = *++YYCURSOR; if (yych != '_') goto yy186; YYDEBUG(353, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(354, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1668 "Zend/zend_language_scanner.l" { char *filename = zend_get_compiled_filename(TSRMLS_C); if (!filename) { filename = ""; } zendlval->value.str.len = strlen(filename); zendlval->value.str.val = estrndup(filename, zendlval->value.str.len); zendlval->type = IS_STRING; return T_FILE; } #line 4424 "Zend/zend_language_scanner.c" yy355: YYDEBUG(355, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'A') goto yy356; if (yych != 'a') goto yy186; yy356: YYDEBUG(356, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'I') goto yy357; if (yych != 'i') goto yy186; yy357: YYDEBUG(357, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'T') goto yy358; if (yych != 't') goto yy186; yy358: YYDEBUG(358, *YYCURSOR); yych = *++YYCURSOR; if (yych != '_') goto yy186; YYDEBUG(359, *YYCURSOR); yych = *++YYCURSOR; if (yych != '_') goto yy186; YYDEBUG(360, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(361, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1605 "Zend/zend_language_scanner.l" { const char *trait_name = NULL; if (CG(active_class_entry) && (ZEND_ACC_TRAIT == (CG(active_class_entry)->ce_flags & ZEND_ACC_TRAIT))) { trait_name = CG(active_class_entry)->name; } if (!trait_name) { trait_name = ""; } zendlval->value.str.len = strlen(trait_name); zendlval->value.str.val = estrndup(trait_name, zendlval->value.str.len); zendlval->type = IS_STRING; return T_TRAIT_C; } #line 4474 "Zend/zend_language_scanner.c" yy362: YYDEBUG(362, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'A') goto yy363; if (yych != 'a') goto yy186; yy363: YYDEBUG(363, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'S') goto yy364; if (yych != 's') goto yy186; yy364: YYDEBUG(364, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'S') goto yy365; if (yych != 's') goto yy186; yy365: YYDEBUG(365, *YYCURSOR); yych = *++YYCURSOR; if (yych != '_') goto yy186; YYDEBUG(366, *YYCURSOR); yych = *++YYCURSOR; if (yych != '_') goto yy186; YYDEBUG(367, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(368, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1578 "Zend/zend_language_scanner.l" { const char *class_name = NULL; if (CG(active_class_entry) && (ZEND_ACC_TRAIT == (CG(active_class_entry)->ce_flags & ZEND_ACC_TRAIT))) { /* We create a special __CLASS__ constant that is going to be resolved at run-time */ zendlval->value.str.len = sizeof("__CLASS__")-1; zendlval->value.str.val = estrndup("__CLASS__", zendlval->value.str.len); zendlval->type = IS_CONSTANT; } else { if (CG(active_class_entry)) { class_name = CG(active_class_entry)->name; } if (!class_name) { class_name = ""; } zendlval->value.str.len = strlen(class_name); zendlval->value.str.val = estrndup(class_name, zendlval->value.str.len); zendlval->type = IS_STRING; } return T_CLASS_C; } #line 4531 "Zend/zend_language_scanner.c" yy369: YYDEBUG(369, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'L') goto yy370; if (yych != 'l') goto yy186; yy370: YYDEBUG(370, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'T') goto yy371; if (yych != 't') goto yy186; yy371: YYDEBUG(371, *YYCURSOR); yych = *++YYCURSOR; if (yych != '_') goto yy186; YYDEBUG(372, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'C') goto yy373; if (yych != 'c') goto yy186; yy373: YYDEBUG(373, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'O') goto yy374; if (yych != 'o') goto yy186; yy374: YYDEBUG(374, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'M') goto yy375; if (yych != 'm') goto yy186; yy375: YYDEBUG(375, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'P') goto yy376; if (yych != 'p') goto yy186; yy376: YYDEBUG(376, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'I') goto yy377; if (yych != 'i') goto yy186; yy377: YYDEBUG(377, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'L') goto yy378; if (yych != 'l') goto yy186; yy378: YYDEBUG(378, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy379; if (yych != 'e') goto yy186; yy379: YYDEBUG(379, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'R') goto yy380; if (yych != 'r') goto yy186; yy380: YYDEBUG(380, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(381, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1288 "Zend/zend_language_scanner.l" { return T_HALT_COMPILER; } #line 4597 "Zend/zend_language_scanner.c" yy382: YYDEBUG(382, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'S') goto yy386; if (yych == 's') goto yy386; goto yy186; yy383: YYDEBUG(383, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy384; if (yych != 'e') goto yy186; yy384: YYDEBUG(384, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(385, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1268 "Zend/zend_language_scanner.l" { return T_USE; } #line 4621 "Zend/zend_language_scanner.c" yy386: YYDEBUG(386, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy387; if (yych != 'e') goto yy186; yy387: YYDEBUG(387, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'T') goto yy388; if (yych != 't') goto yy186; yy388: YYDEBUG(388, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(389, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1316 "Zend/zend_language_scanner.l" { return T_UNSET; } #line 4644 "Zend/zend_language_scanner.c" yy390: YYDEBUG(390, *YYCURSOR); ++YYCURSOR; YYFILL(7); yych = *YYCURSOR; yy391: YYDEBUG(391, *YYCURSOR); if (yych <= 'S') { if (yych <= 'D') { if (yych <= ' ') { if (yych == '\t') goto yy390; if (yych <= 0x1F) goto yy193; goto yy390; } else { if (yych <= 'A') { if (yych <= '@') goto yy193; goto yy395; } else { if (yych <= 'B') goto yy393; if (yych <= 'C') goto yy193; goto yy398; } } } else { if (yych <= 'I') { if (yych == 'F') goto yy399; if (yych <= 'H') goto yy193; goto yy400; } else { if (yych <= 'O') { if (yych <= 'N') goto yy193; goto yy394; } else { if (yych <= 'Q') goto yy193; if (yych <= 'R') goto yy397; goto yy396; } } } } else { if (yych <= 'f') { if (yych <= 'a') { if (yych == 'U') goto yy392; if (yych <= '`') goto yy193; goto yy395; } else { if (yych <= 'c') { if (yych <= 'b') goto yy393; goto yy193; } else { if (yych <= 'd') goto yy398; if (yych <= 'e') goto yy193; goto yy399; } } } else { if (yych <= 'q') { if (yych <= 'i') { if (yych <= 'h') goto yy193; goto yy400; } else { if (yych == 'o') goto yy394; goto yy193; } } else { if (yych <= 's') { if (yych <= 'r') goto yy397; goto yy396; } else { if (yych != 'u') goto yy193; } } } } yy392: YYDEBUG(392, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'N') goto yy459; if (yych == 'n') goto yy459; goto yy193; yy393: YYDEBUG(393, *YYCURSOR); yych = *++YYCURSOR; if (yych <= 'O') { if (yych == 'I') goto yy446; if (yych <= 'N') goto yy193; goto yy447; } else { if (yych <= 'i') { if (yych <= 'h') goto yy193; goto yy446; } else { if (yych == 'o') goto yy447; goto yy193; } } yy394: YYDEBUG(394, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'B') goto yy438; if (yych == 'b') goto yy438; goto yy193; yy395: YYDEBUG(395, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'R') goto yy431; if (yych == 'r') goto yy431; goto yy193; yy396: YYDEBUG(396, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'T') goto yy423; if (yych == 't') goto yy423; goto yy193; yy397: YYDEBUG(397, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy421; if (yych == 'e') goto yy421; goto yy193; yy398: YYDEBUG(398, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'O') goto yy417; if (yych == 'o') goto yy417; goto yy193; yy399: YYDEBUG(399, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'L') goto yy410; if (yych == 'l') goto yy410; goto yy193; yy400: YYDEBUG(400, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'N') goto yy401; if (yych != 'n') goto yy193; yy401: YYDEBUG(401, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'T') goto yy402; if (yych != 't') goto yy193; yy402: YYDEBUG(402, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy403; if (yych != 'e') goto yy405; yy403: YYDEBUG(403, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'G') goto yy408; if (yych == 'g') goto yy408; goto yy193; yy404: YYDEBUG(404, *YYCURSOR); ++YYCURSOR; YYFILL(1); yych = *YYCURSOR; yy405: YYDEBUG(405, *YYCURSOR); if (yych <= 0x1F) { if (yych == '\t') goto yy404; goto yy193; } else { if (yych <= ' ') goto yy404; if (yych != ')') goto yy193; } YYDEBUG(406, *YYCURSOR); ++YYCURSOR; YYDEBUG(407, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1216 "Zend/zend_language_scanner.l" { return T_INT_CAST; } #line 4820 "Zend/zend_language_scanner.c" yy408: YYDEBUG(408, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy409; if (yych != 'e') goto yy193; yy409: YYDEBUG(409, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'R') goto yy404; if (yych == 'r') goto yy404; goto yy193; yy410: YYDEBUG(410, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'O') goto yy411; if (yych != 'o') goto yy193; yy411: YYDEBUG(411, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'A') goto yy412; if (yych != 'a') goto yy193; yy412: YYDEBUG(412, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'T') goto yy413; if (yych != 't') goto yy193; yy413: YYDEBUG(413, *YYCURSOR); ++YYCURSOR; YYFILL(1); yych = *YYCURSOR; YYDEBUG(414, *YYCURSOR); if (yych <= 0x1F) { if (yych == '\t') goto yy413; goto yy193; } else { if (yych <= ' ') goto yy413; if (yych != ')') goto yy193; } YYDEBUG(415, *YYCURSOR); ++YYCURSOR; YYDEBUG(416, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1220 "Zend/zend_language_scanner.l" { return T_DOUBLE_CAST; } #line 4868 "Zend/zend_language_scanner.c" yy417: YYDEBUG(417, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'U') goto yy418; if (yych != 'u') goto yy193; yy418: YYDEBUG(418, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'B') goto yy419; if (yych != 'b') goto yy193; yy419: YYDEBUG(419, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'L') goto yy420; if (yych != 'l') goto yy193; yy420: YYDEBUG(420, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy413; if (yych == 'e') goto yy413; goto yy193; yy421: YYDEBUG(421, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'A') goto yy422; if (yych != 'a') goto yy193; yy422: YYDEBUG(422, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'L') goto yy413; if (yych == 'l') goto yy413; goto yy193; yy423: YYDEBUG(423, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'R') goto yy424; if (yych != 'r') goto yy193; yy424: YYDEBUG(424, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'I') goto yy425; if (yych != 'i') goto yy193; yy425: YYDEBUG(425, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'N') goto yy426; if (yych != 'n') goto yy193; yy426: YYDEBUG(426, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'G') goto yy427; if (yych != 'g') goto yy193; yy427: YYDEBUG(427, *YYCURSOR); ++YYCURSOR; YYFILL(1); yych = *YYCURSOR; YYDEBUG(428, *YYCURSOR); if (yych <= 0x1F) { if (yych == '\t') goto yy427; goto yy193; } else { if (yych <= ' ') goto yy427; if (yych != ')') goto yy193; } YYDEBUG(429, *YYCURSOR); ++YYCURSOR; YYDEBUG(430, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1224 "Zend/zend_language_scanner.l" { return T_STRING_CAST; } #line 4942 "Zend/zend_language_scanner.c" yy431: YYDEBUG(431, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'R') goto yy432; if (yych != 'r') goto yy193; yy432: YYDEBUG(432, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'A') goto yy433; if (yych != 'a') goto yy193; yy433: YYDEBUG(433, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'Y') goto yy434; if (yych != 'y') goto yy193; yy434: YYDEBUG(434, *YYCURSOR); ++YYCURSOR; YYFILL(1); yych = *YYCURSOR; YYDEBUG(435, *YYCURSOR); if (yych <= 0x1F) { if (yych == '\t') goto yy434; goto yy193; } else { if (yych <= ' ') goto yy434; if (yych != ')') goto yy193; } YYDEBUG(436, *YYCURSOR); ++YYCURSOR; YYDEBUG(437, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1228 "Zend/zend_language_scanner.l" { return T_ARRAY_CAST; } #line 4979 "Zend/zend_language_scanner.c" yy438: YYDEBUG(438, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'J') goto yy439; if (yych != 'j') goto yy193; yy439: YYDEBUG(439, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy440; if (yych != 'e') goto yy193; yy440: YYDEBUG(440, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'C') goto yy441; if (yych != 'c') goto yy193; yy441: YYDEBUG(441, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'T') goto yy442; if (yych != 't') goto yy193; yy442: YYDEBUG(442, *YYCURSOR); ++YYCURSOR; YYFILL(1); yych = *YYCURSOR; YYDEBUG(443, *YYCURSOR); if (yych <= 0x1F) { if (yych == '\t') goto yy442; goto yy193; } else { if (yych <= ' ') goto yy442; if (yych != ')') goto yy193; } YYDEBUG(444, *YYCURSOR); ++YYCURSOR; YYDEBUG(445, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1232 "Zend/zend_language_scanner.l" { return T_OBJECT_CAST; } #line 5021 "Zend/zend_language_scanner.c" yy446: YYDEBUG(446, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'N') goto yy456; if (yych == 'n') goto yy456; goto yy193; yy447: YYDEBUG(447, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'O') goto yy448; if (yych != 'o') goto yy193; yy448: YYDEBUG(448, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'L') goto yy449; if (yych != 'l') goto yy193; yy449: YYDEBUG(449, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy454; if (yych == 'e') goto yy454; goto yy451; yy450: YYDEBUG(450, *YYCURSOR); ++YYCURSOR; YYFILL(1); yych = *YYCURSOR; yy451: YYDEBUG(451, *YYCURSOR); if (yych <= 0x1F) { if (yych == '\t') goto yy450; goto yy193; } else { if (yych <= ' ') goto yy450; if (yych != ')') goto yy193; } YYDEBUG(452, *YYCURSOR); ++YYCURSOR; YYDEBUG(453, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1236 "Zend/zend_language_scanner.l" { return T_BOOL_CAST; } #line 5066 "Zend/zend_language_scanner.c" yy454: YYDEBUG(454, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'A') goto yy455; if (yych != 'a') goto yy193; yy455: YYDEBUG(455, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'N') goto yy450; if (yych == 'n') goto yy450; goto yy193; yy456: YYDEBUG(456, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'A') goto yy457; if (yych != 'a') goto yy193; yy457: YYDEBUG(457, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'R') goto yy458; if (yych != 'r') goto yy193; yy458: YYDEBUG(458, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'Y') goto yy427; if (yych == 'y') goto yy427; goto yy193; yy459: YYDEBUG(459, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'S') goto yy460; if (yych != 's') goto yy193; yy460: YYDEBUG(460, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy461; if (yych != 'e') goto yy193; yy461: YYDEBUG(461, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'T') goto yy462; if (yych != 't') goto yy193; yy462: YYDEBUG(462, *YYCURSOR); ++YYCURSOR; YYFILL(1); yych = *YYCURSOR; YYDEBUG(463, *YYCURSOR); if (yych <= 0x1F) { if (yych == '\t') goto yy462; goto yy193; } else { if (yych <= ' ') goto yy462; if (yych != ')') goto yy193; } YYDEBUG(464, *YYCURSOR); ++YYCURSOR; YYDEBUG(465, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1240 "Zend/zend_language_scanner.l" { return T_UNSET_CAST; } #line 5130 "Zend/zend_language_scanner.c" yy466: YYDEBUG(466, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'R') goto yy467; if (yych != 'r') goto yy186; yy467: YYDEBUG(467, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(468, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1212 "Zend/zend_language_scanner.l" { return T_VAR; } #line 5148 "Zend/zend_language_scanner.c" yy469: YYDEBUG(469, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'M') goto yy473; if (yych == 'm') goto yy473; goto yy186; yy470: YYDEBUG(470, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'W') goto yy471; if (yych != 'w') goto yy186; yy471: YYDEBUG(471, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(472, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1204 "Zend/zend_language_scanner.l" { return T_NEW; } #line 5172 "Zend/zend_language_scanner.c" yy473: YYDEBUG(473, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy474; if (yych != 'e') goto yy186; yy474: YYDEBUG(474, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'S') goto yy475; if (yych != 's') goto yy186; yy475: YYDEBUG(475, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'P') goto yy476; if (yych != 'p') goto yy186; yy476: YYDEBUG(476, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'A') goto yy477; if (yych != 'a') goto yy186; yy477: YYDEBUG(477, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'C') goto yy478; if (yych != 'c') goto yy186; yy478: YYDEBUG(478, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy479; if (yych != 'e') goto yy186; yy479: YYDEBUG(479, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(480, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1264 "Zend/zend_language_scanner.l" { return T_NAMESPACE; } #line 5215 "Zend/zend_language_scanner.c" yy481: YYDEBUG(481, *YYCURSOR); ++YYCURSOR; YYDEBUG(482, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1196 "Zend/zend_language_scanner.l" { return T_PAAMAYIM_NEKUDOTAYIM; } #line 5225 "Zend/zend_language_scanner.c" yy483: YYDEBUG(483, *YYCURSOR); ++YYCURSOR; YYFILL(1); yych = *YYCURSOR; yy484: YYDEBUG(484, *YYCURSOR); if (yych <= '\f') { if (yych <= 0x08) goto yy140; if (yych <= '\n') goto yy483; goto yy140; } else { if (yych <= '\r') goto yy483; if (yych == ' ') goto yy483; goto yy140; } yy485: YYDEBUG(485, *YYCURSOR); ++YYCURSOR; YYDEBUG(486, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1372 "Zend/zend_language_scanner.l" { return T_MINUS_EQUAL; } #line 5251 "Zend/zend_language_scanner.c" yy487: YYDEBUG(487, *YYCURSOR); ++YYCURSOR; YYDEBUG(488, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1340 "Zend/zend_language_scanner.l" { return T_DEC; } #line 5261 "Zend/zend_language_scanner.c" yy489: YYDEBUG(489, *YYCURSOR); ++YYCURSOR; YYDEBUG(490, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1166 "Zend/zend_language_scanner.l" { yy_push_state(ST_LOOKING_FOR_PROPERTY TSRMLS_CC); return T_OBJECT_OPERATOR; } #line 5272 "Zend/zend_language_scanner.c" yy491: YYDEBUG(491, *YYCURSOR); yych = *++YYCURSOR; if (yych <= 'O') { if (yych == 'I') goto yy498; if (yych <= 'N') goto yy186; goto yy499; } else { if (yych <= 'i') { if (yych <= 'h') goto yy186; goto yy498; } else { if (yych == 'o') goto yy499; goto yy186; } } yy492: YYDEBUG(492, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'B') goto yy493; if (yych != 'b') goto yy186; yy493: YYDEBUG(493, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'L') goto yy494; if (yych != 'l') goto yy186; yy494: YYDEBUG(494, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'I') goto yy495; if (yych != 'i') goto yy186; yy495: YYDEBUG(495, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'C') goto yy496; if (yych != 'c') goto yy186; yy496: YYDEBUG(496, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(497, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1312 "Zend/zend_language_scanner.l" { return T_PUBLIC; } #line 5321 "Zend/zend_language_scanner.c" yy498: YYDEBUG(498, *YYCURSOR); yych = *++YYCURSOR; if (yych <= 'V') { if (yych == 'N') goto yy507; if (yych <= 'U') goto yy186; goto yy508; } else { if (yych <= 'n') { if (yych <= 'm') goto yy186; goto yy507; } else { if (yych == 'v') goto yy508; goto yy186; } } yy499: YYDEBUG(499, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'T') goto yy500; if (yych != 't') goto yy186; yy500: YYDEBUG(500, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy501; if (yych != 'e') goto yy186; yy501: YYDEBUG(501, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'C') goto yy502; if (yych != 'c') goto yy186; yy502: YYDEBUG(502, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'T') goto yy503; if (yych != 't') goto yy186; yy503: YYDEBUG(503, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy504; if (yych != 'e') goto yy186; yy504: YYDEBUG(504, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'D') goto yy505; if (yych != 'd') goto yy186; yy505: YYDEBUG(505, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(506, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1308 "Zend/zend_language_scanner.l" { return T_PROTECTED; } #line 5380 "Zend/zend_language_scanner.c" yy507: YYDEBUG(507, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'T') goto yy513; if (yych == 't') goto yy513; goto yy186; yy508: YYDEBUG(508, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'A') goto yy509; if (yych != 'a') goto yy186; yy509: YYDEBUG(509, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'T') goto yy510; if (yych != 't') goto yy186; yy510: YYDEBUG(510, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy511; if (yych != 'e') goto yy186; yy511: YYDEBUG(511, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(512, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1304 "Zend/zend_language_scanner.l" { return T_PRIVATE; } #line 5414 "Zend/zend_language_scanner.c" yy513: YYDEBUG(513, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(514, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1142 "Zend/zend_language_scanner.l" { return T_PRINT; } #line 5427 "Zend/zend_language_scanner.c" yy515: YYDEBUG(515, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'O') goto yy520; if (yych == 'o') goto yy520; goto yy186; yy516: YYDEBUG(516, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'T') goto yy517; if (yych != 't') goto yy186; yy517: YYDEBUG(517, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'O') goto yy518; if (yych != 'o') goto yy186; yy518: YYDEBUG(518, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(519, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1134 "Zend/zend_language_scanner.l" { return T_GOTO; } #line 5456 "Zend/zend_language_scanner.c" yy520: YYDEBUG(520, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'B') goto yy521; if (yych != 'b') goto yy186; yy521: YYDEBUG(521, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'A') goto yy522; if (yych != 'a') goto yy186; yy522: YYDEBUG(522, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'L') goto yy523; if (yych != 'l') goto yy186; yy523: YYDEBUG(523, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(524, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1276 "Zend/zend_language_scanner.l" { return T_GLOBAL; } #line 5484 "Zend/zend_language_scanner.c" yy525: YYDEBUG(525, *YYCURSOR); yych = *++YYCURSOR; if (yych == '<') goto yy533; goto yy193; yy526: YYDEBUG(526, *YYCURSOR); yych = *++YYCURSOR; goto yy180; yy527: YYDEBUG(527, *YYCURSOR); yych = *++YYCURSOR; goto yy178; yy528: YYDEBUG(528, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy529; if (yych != 'e') goto yy186; yy529: YYDEBUG(529, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'A') goto yy530; if (yych != 'a') goto yy186; yy530: YYDEBUG(530, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'K') goto yy531; if (yych != 'k') goto yy186; yy531: YYDEBUG(531, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(532, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1126 "Zend/zend_language_scanner.l" { return T_BREAK; } #line 5525 "Zend/zend_language_scanner.c" yy533: YYDEBUG(533, *YYCURSOR); yych = *++YYCURSOR; if (yych == '<') goto yy269; goto yy193; yy534: YYDEBUG(534, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'A') goto yy541; if (yych == 'a') goto yy541; goto yy186; yy535: YYDEBUG(535, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'I') goto yy536; if (yych != 'i') goto yy186; yy536: YYDEBUG(536, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'T') goto yy537; if (yych != 't') goto yy186; yy537: YYDEBUG(537, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'C') goto yy538; if (yych != 'c') goto yy186; yy538: YYDEBUG(538, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'H') goto yy539; if (yych != 'h') goto yy186; yy539: YYDEBUG(539, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(540, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1110 "Zend/zend_language_scanner.l" { return T_SWITCH; } #line 5569 "Zend/zend_language_scanner.c" yy541: YYDEBUG(541, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'T') goto yy542; if (yych != 't') goto yy186; yy542: YYDEBUG(542, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'I') goto yy543; if (yych != 'i') goto yy186; yy543: YYDEBUG(543, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'C') goto yy544; if (yych != 'c') goto yy186; yy544: YYDEBUG(544, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(545, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1292 "Zend/zend_language_scanner.l" { return T_STATIC; } #line 5597 "Zend/zend_language_scanner.c" yy546: YYDEBUG(546, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'S') goto yy557; if (yych == 's') goto yy557; goto yy186; yy547: YYDEBUG(547, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'D') goto yy555; if (yych == 'd') goto yy555; goto yy186; yy548: YYDEBUG(548, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'R') goto yy551; if (yych == 'r') goto yy551; goto yy186; yy549: YYDEBUG(549, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(550, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1106 "Zend/zend_language_scanner.l" { return T_AS; } #line 5628 "Zend/zend_language_scanner.c" yy551: YYDEBUG(551, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'A') goto yy552; if (yych != 'a') goto yy186; yy552: YYDEBUG(552, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'Y') goto yy553; if (yych != 'y') goto yy186; yy553: YYDEBUG(553, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(554, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1328 "Zend/zend_language_scanner.l" { return T_ARRAY; } #line 5651 "Zend/zend_language_scanner.c" yy555: YYDEBUG(555, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(556, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1424 "Zend/zend_language_scanner.l" { return T_LOGICAL_AND; } #line 5664 "Zend/zend_language_scanner.c" yy557: YYDEBUG(557, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'T') goto yy558; if (yych != 't') goto yy186; yy558: YYDEBUG(558, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'R') goto yy559; if (yych != 'r') goto yy186; yy559: YYDEBUG(559, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'A') goto yy560; if (yych != 'a') goto yy186; yy560: YYDEBUG(560, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'C') goto yy561; if (yych != 'c') goto yy186; yy561: YYDEBUG(561, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'T') goto yy562; if (yych != 't') goto yy186; yy562: YYDEBUG(562, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(563, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1296 "Zend/zend_language_scanner.l" { return T_ABSTRACT; } #line 5702 "Zend/zend_language_scanner.c" yy564: YYDEBUG(564, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'I') goto yy565; if (yych != 'i') goto yy186; yy565: YYDEBUG(565, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'L') goto yy566; if (yych != 'l') goto yy186; yy566: YYDEBUG(566, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy567; if (yych != 'e') goto yy186; yy567: YYDEBUG(567, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(568, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1066 "Zend/zend_language_scanner.l" { return T_WHILE; } #line 5730 "Zend/zend_language_scanner.c" yy569: YYDEBUG(569, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(570, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1050 "Zend/zend_language_scanner.l" { return T_IF; } #line 5743 "Zend/zend_language_scanner.c" yy571: YYDEBUG(571, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'P') goto yy613; if (yych == 'p') goto yy613; goto yy186; yy572: YYDEBUG(572, *YYCURSOR); yych = *++YYCURSOR; if (yych <= 'T') { if (yych <= 'C') { if (yych <= 'B') goto yy186; goto yy580; } else { if (yych <= 'R') goto yy186; if (yych <= 'S') goto yy578; goto yy579; } } else { if (yych <= 'r') { if (yych == 'c') goto yy580; goto yy186; } else { if (yych <= 's') goto yy578; if (yych <= 't') goto yy579; goto yy186; } } yy573: YYDEBUG(573, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'S') goto yy574; if (yych != 's') goto yy186; yy574: YYDEBUG(574, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy575; if (yych != 'e') goto yy186; yy575: YYDEBUG(575, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'T') goto yy576; if (yych != 't') goto yy186; yy576: YYDEBUG(576, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(577, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1280 "Zend/zend_language_scanner.l" { return T_ISSET; } #line 5799 "Zend/zend_language_scanner.c" yy578: YYDEBUG(578, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'T') goto yy599; if (yych == 't') goto yy599; goto yy186; yy579: YYDEBUG(579, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy592; if (yych == 'e') goto yy592; goto yy186; yy580: YYDEBUG(580, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'L') goto yy581; if (yych != 'l') goto yy186; yy581: YYDEBUG(581, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'U') goto yy582; if (yych != 'u') goto yy186; yy582: YYDEBUG(582, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'D') goto yy583; if (yych != 'd') goto yy186; yy583: YYDEBUG(583, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy584; if (yych != 'e') goto yy186; yy584: YYDEBUG(584, *YYCURSOR); ++YYCURSOR; if ((yych = *YYCURSOR) <= '^') { if (yych <= '9') { if (yych >= '0') goto yy185; } else { if (yych <= '@') goto yy585; if (yych <= 'Z') goto yy185; } } else { if (yych <= '`') { if (yych <= '_') goto yy586; } else { if (yych <= 'z') goto yy185; if (yych >= 0x7F) goto yy185; } } yy585: YYDEBUG(585, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1248 "Zend/zend_language_scanner.l" { return T_INCLUDE; } #line 5857 "Zend/zend_language_scanner.c" yy586: YYDEBUG(586, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'O') goto yy587; if (yych != 'o') goto yy186; yy587: YYDEBUG(587, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'N') goto yy588; if (yych != 'n') goto yy186; yy588: YYDEBUG(588, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'C') goto yy589; if (yych != 'c') goto yy186; yy589: YYDEBUG(589, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy590; if (yych != 'e') goto yy186; yy590: YYDEBUG(590, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(591, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1252 "Zend/zend_language_scanner.l" { return T_INCLUDE_ONCE; } #line 5890 "Zend/zend_language_scanner.c" yy592: YYDEBUG(592, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'R') goto yy593; if (yych != 'r') goto yy186; yy593: YYDEBUG(593, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'F') goto yy594; if (yych != 'f') goto yy186; yy594: YYDEBUG(594, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'A') goto yy595; if (yych != 'a') goto yy186; yy595: YYDEBUG(595, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'C') goto yy596; if (yych != 'c') goto yy186; yy596: YYDEBUG(596, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy597; if (yych != 'e') goto yy186; yy597: YYDEBUG(597, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(598, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1150 "Zend/zend_language_scanner.l" { return T_INTERFACE; } #line 5928 "Zend/zend_language_scanner.c" yy599: YYDEBUG(599, *YYCURSOR); yych = *++YYCURSOR; if (yych <= 'E') { if (yych == 'A') goto yy600; if (yych <= 'D') goto yy186; goto yy601; } else { if (yych <= 'a') { if (yych <= '`') goto yy186; } else { if (yych == 'e') goto yy601; goto yy186; } } yy600: YYDEBUG(600, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'N') goto yy607; if (yych == 'n') goto yy607; goto yy186; yy601: YYDEBUG(601, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'A') goto yy602; if (yych != 'a') goto yy186; yy602: YYDEBUG(602, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'D') goto yy603; if (yych != 'd') goto yy186; yy603: YYDEBUG(603, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'O') goto yy604; if (yych != 'o') goto yy186; yy604: YYDEBUG(604, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'F') goto yy605; if (yych != 'f') goto yy186; yy605: YYDEBUG(605, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(606, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1272 "Zend/zend_language_scanner.l" { return T_INSTEADOF; } #line 5982 "Zend/zend_language_scanner.c" yy607: YYDEBUG(607, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'C') goto yy608; if (yych != 'c') goto yy186; yy608: YYDEBUG(608, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy609; if (yych != 'e') goto yy186; yy609: YYDEBUG(609, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'O') goto yy610; if (yych != 'o') goto yy186; yy610: YYDEBUG(610, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'F') goto yy611; if (yych != 'f') goto yy186; yy611: YYDEBUG(611, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(612, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1102 "Zend/zend_language_scanner.l" { return T_INSTANCEOF; } #line 6015 "Zend/zend_language_scanner.c" yy613: YYDEBUG(613, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'L') goto yy614; if (yych != 'l') goto yy186; yy614: YYDEBUG(614, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy615; if (yych != 'e') goto yy186; yy615: YYDEBUG(615, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'M') goto yy616; if (yych != 'm') goto yy186; yy616: YYDEBUG(616, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy617; if (yych != 'e') goto yy186; yy617: YYDEBUG(617, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'N') goto yy618; if (yych != 'n') goto yy186; yy618: YYDEBUG(618, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'T') goto yy619; if (yych != 't') goto yy186; yy619: YYDEBUG(619, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'S') goto yy620; if (yych != 's') goto yy186; yy620: YYDEBUG(620, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(621, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1162 "Zend/zend_language_scanner.l" { return T_IMPLEMENTS; } #line 6063 "Zend/zend_language_scanner.c" yy622: YYDEBUG(622, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'R') goto yy630; if (yych == 'r') goto yy630; goto yy186; yy623: YYDEBUG(623, *YYCURSOR); yych = *++YYCURSOR; if (yych <= 'Y') { if (yych == 'A') goto yy626; if (yych <= 'X') goto yy186; } else { if (yych <= 'a') { if (yych <= '`') goto yy186; goto yy626; } else { if (yych != 'y') goto yy186; } } YYDEBUG(624, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(625, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1038 "Zend/zend_language_scanner.l" { return T_TRY; } #line 6095 "Zend/zend_language_scanner.c" yy626: YYDEBUG(626, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'I') goto yy627; if (yych != 'i') goto yy186; yy627: YYDEBUG(627, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'T') goto yy628; if (yych != 't') goto yy186; yy628: YYDEBUG(628, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(629, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1154 "Zend/zend_language_scanner.l" { return T_TRAIT; } #line 6118 "Zend/zend_language_scanner.c" yy630: YYDEBUG(630, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'O') goto yy631; if (yych != 'o') goto yy186; yy631: YYDEBUG(631, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'W') goto yy632; if (yych != 'w') goto yy186; yy632: YYDEBUG(632, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(633, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1046 "Zend/zend_language_scanner.l" { return T_THROW; } #line 6141 "Zend/zend_language_scanner.c" yy634: YYDEBUG(634, *YYCURSOR); yych = *++YYCURSOR; if (yych <= 'T') { if (yych == 'Q') goto yy636; if (yych <= 'S') goto yy186; } else { if (yych <= 'q') { if (yych <= 'p') goto yy186; goto yy636; } else { if (yych != 't') goto yy186; } } YYDEBUG(635, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'U') goto yy648; if (yych == 'u') goto yy648; goto yy186; yy636: YYDEBUG(636, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'U') goto yy637; if (yych != 'u') goto yy186; yy637: YYDEBUG(637, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'I') goto yy638; if (yych != 'i') goto yy186; yy638: YYDEBUG(638, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'R') goto yy639; if (yych != 'r') goto yy186; yy639: YYDEBUG(639, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy640; if (yych != 'e') goto yy186; yy640: YYDEBUG(640, *YYCURSOR); ++YYCURSOR; if ((yych = *YYCURSOR) <= '^') { if (yych <= '9') { if (yych >= '0') goto yy185; } else { if (yych <= '@') goto yy641; if (yych <= 'Z') goto yy185; } } else { if (yych <= '`') { if (yych <= '_') goto yy642; } else { if (yych <= 'z') goto yy185; if (yych >= 0x7F) goto yy185; } } yy641: YYDEBUG(641, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1256 "Zend/zend_language_scanner.l" { return T_REQUIRE; } #line 6206 "Zend/zend_language_scanner.c" yy642: YYDEBUG(642, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'O') goto yy643; if (yych != 'o') goto yy186; yy643: YYDEBUG(643, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'N') goto yy644; if (yych != 'n') goto yy186; yy644: YYDEBUG(644, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'C') goto yy645; if (yych != 'c') goto yy186; yy645: YYDEBUG(645, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy646; if (yych != 'e') goto yy186; yy646: YYDEBUG(646, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(647, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1260 "Zend/zend_language_scanner.l" { return T_REQUIRE_ONCE; } #line 6239 "Zend/zend_language_scanner.c" yy648: YYDEBUG(648, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'R') goto yy649; if (yych != 'r') goto yy186; yy649: YYDEBUG(649, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'N') goto yy650; if (yych != 'n') goto yy186; yy650: YYDEBUG(650, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(651, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1034 "Zend/zend_language_scanner.l" { return T_RETURN; } #line 6262 "Zend/zend_language_scanner.c" yy652: YYDEBUG(652, *YYCURSOR); yych = *++YYCURSOR; if (yych <= 'T') { if (yych <= 'L') { if (yych <= 'K') goto yy186; goto yy675; } else { if (yych <= 'R') goto yy186; if (yych <= 'S') goto yy674; goto yy673; } } else { if (yych <= 'r') { if (yych == 'l') goto yy675; goto yy186; } else { if (yych <= 's') goto yy674; if (yych <= 't') goto yy673; goto yy186; } } yy653: YYDEBUG(653, *YYCURSOR); yych = *++YYCURSOR; if (yych <= 'O') { if (yych == 'A') goto yy665; if (yych <= 'N') goto yy186; goto yy666; } else { if (yych <= 'a') { if (yych <= '`') goto yy186; goto yy665; } else { if (yych == 'o') goto yy666; goto yy186; } } yy654: YYDEBUG(654, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'N') goto yy655; if (yych != 'n') goto yy186; yy655: YYDEBUG(655, *YYCURSOR); yych = *++YYCURSOR; if (yych <= 'T') { if (yych <= 'R') goto yy186; if (yych >= 'T') goto yy657; } else { if (yych <= 'r') goto yy186; if (yych <= 's') goto yy656; if (yych <= 't') goto yy657; goto yy186; } yy656: YYDEBUG(656, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'T') goto yy663; if (yych == 't') goto yy663; goto yy186; yy657: YYDEBUG(657, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'I') goto yy658; if (yych != 'i') goto yy186; yy658: YYDEBUG(658, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'N') goto yy659; if (yych != 'n') goto yy186; yy659: YYDEBUG(659, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'U') goto yy660; if (yych != 'u') goto yy186; yy660: YYDEBUG(660, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy661; if (yych != 'e') goto yy186; yy661: YYDEBUG(661, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(662, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1130 "Zend/zend_language_scanner.l" { return T_CONTINUE; } #line 6356 "Zend/zend_language_scanner.c" yy663: YYDEBUG(663, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(664, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1030 "Zend/zend_language_scanner.l" { return T_CONST; } #line 6369 "Zend/zend_language_scanner.c" yy665: YYDEBUG(665, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'S') goto yy670; if (yych == 's') goto yy670; goto yy186; yy666: YYDEBUG(666, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'N') goto yy667; if (yych != 'n') goto yy186; yy667: YYDEBUG(667, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy668; if (yych != 'e') goto yy186; yy668: YYDEBUG(668, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(669, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1208 "Zend/zend_language_scanner.l" { return T_CLONE; } #line 6398 "Zend/zend_language_scanner.c" yy670: YYDEBUG(670, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'S') goto yy671; if (yych != 's') goto yy186; yy671: YYDEBUG(671, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(672, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1146 "Zend/zend_language_scanner.l" { return T_CLASS; } #line 6416 "Zend/zend_language_scanner.c" yy673: YYDEBUG(673, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'C') goto yy684; if (yych == 'c') goto yy684; goto yy186; yy674: YYDEBUG(674, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy682; if (yych == 'e') goto yy682; goto yy186; yy675: YYDEBUG(675, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'L') goto yy676; if (yych != 'l') goto yy186; yy676: YYDEBUG(676, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'A') goto yy677; if (yych != 'a') goto yy186; yy677: YYDEBUG(677, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'B') goto yy678; if (yych != 'b') goto yy186; yy678: YYDEBUG(678, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'L') goto yy679; if (yych != 'l') goto yy186; yy679: YYDEBUG(679, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy680; if (yych != 'e') goto yy186; yy680: YYDEBUG(680, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(681, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1332 "Zend/zend_language_scanner.l" { return T_CALLABLE; } #line 6466 "Zend/zend_language_scanner.c" yy682: YYDEBUG(682, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(683, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1118 "Zend/zend_language_scanner.l" { return T_CASE; } #line 6479 "Zend/zend_language_scanner.c" yy684: YYDEBUG(684, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'H') goto yy685; if (yych != 'h') goto yy186; yy685: YYDEBUG(685, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(686, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1042 "Zend/zend_language_scanner.l" { return T_CATCH; } #line 6497 "Zend/zend_language_scanner.c" yy687: YYDEBUG(687, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'N') goto yy704; if (yych == 'n') goto yy704; goto yy186; yy688: YYDEBUG(688, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'R') goto yy697; if (yych == 'r') goto yy697; goto yy186; yy689: YYDEBUG(689, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'N') goto yy690; if (yych != 'n') goto yy186; yy690: YYDEBUG(690, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'C') goto yy691; if (yych != 'c') goto yy186; yy691: YYDEBUG(691, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'T') goto yy692; if (yych != 't') goto yy186; yy692: YYDEBUG(692, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'I') goto yy693; if (yych != 'i') goto yy186; yy693: YYDEBUG(693, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'O') goto yy694; if (yych != 'o') goto yy186; yy694: YYDEBUG(694, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'N') goto yy695; if (yych != 'n') goto yy186; yy695: YYDEBUG(695, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(696, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1026 "Zend/zend_language_scanner.l" { return T_FUNCTION; } #line 6552 "Zend/zend_language_scanner.c" yy697: YYDEBUG(697, *YYCURSOR); ++YYCURSOR; if ((yych = *YYCURSOR) <= '^') { if (yych <= '@') { if (yych <= '/') goto yy698; if (yych <= '9') goto yy185; } else { if (yych == 'E') goto yy699; if (yych <= 'Z') goto yy185; } } else { if (yych <= 'd') { if (yych != '`') goto yy185; } else { if (yych <= 'e') goto yy699; if (yych <= 'z') goto yy185; if (yych >= 0x7F) goto yy185; } } yy698: YYDEBUG(698, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1078 "Zend/zend_language_scanner.l" { return T_FOR; } #line 6580 "Zend/zend_language_scanner.c" yy699: YYDEBUG(699, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'A') goto yy700; if (yych != 'a') goto yy186; yy700: YYDEBUG(700, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'C') goto yy701; if (yych != 'c') goto yy186; yy701: YYDEBUG(701, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'H') goto yy702; if (yych != 'h') goto yy186; yy702: YYDEBUG(702, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(703, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1086 "Zend/zend_language_scanner.l" { return T_FOREACH; } #line 6608 "Zend/zend_language_scanner.c" yy704: YYDEBUG(704, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'A') goto yy705; if (yych != 'a') goto yy186; yy705: YYDEBUG(705, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'L') goto yy706; if (yych != 'l') goto yy186; yy706: YYDEBUG(706, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(707, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1300 "Zend/zend_language_scanner.l" { return T_FINAL; } #line 6631 "Zend/zend_language_scanner.c" yy708: YYDEBUG(708, *YYCURSOR); yych = *++YYCURSOR; if (yych <= 'F') { if (yych == 'C') goto yy714; if (yych <= 'E') goto yy186; goto yy715; } else { if (yych <= 'c') { if (yych <= 'b') goto yy186; goto yy714; } else { if (yych == 'f') goto yy715; goto yy186; } } yy709: YYDEBUG(709, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy712; if (yych == 'e') goto yy712; goto yy186; yy710: YYDEBUG(710, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(711, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1074 "Zend/zend_language_scanner.l" { return T_DO; } #line 6666 "Zend/zend_language_scanner.c" yy712: YYDEBUG(712, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(713, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1022 "Zend/zend_language_scanner.l" { return T_EXIT; } #line 6679 "Zend/zend_language_scanner.c" yy714: YYDEBUG(714, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'L') goto yy721; if (yych == 'l') goto yy721; goto yy186; yy715: YYDEBUG(715, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'A') goto yy716; if (yych != 'a') goto yy186; yy716: YYDEBUG(716, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'U') goto yy717; if (yych != 'u') goto yy186; yy717: YYDEBUG(717, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'L') goto yy718; if (yych != 'l') goto yy186; yy718: YYDEBUG(718, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'T') goto yy719; if (yych != 't') goto yy186; yy719: YYDEBUG(719, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(720, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1122 "Zend/zend_language_scanner.l" { return T_DEFAULT; } #line 6718 "Zend/zend_language_scanner.c" yy721: YYDEBUG(721, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'A') goto yy722; if (yych != 'a') goto yy186; yy722: YYDEBUG(722, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'R') goto yy723; if (yych != 'r') goto yy186; yy723: YYDEBUG(723, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy724; if (yych != 'e') goto yy186; yy724: YYDEBUG(724, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(725, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1094 "Zend/zend_language_scanner.l" { return T_DECLARE; } #line 6746 "Zend/zend_language_scanner.c" yy726: YYDEBUG(726, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'H') goto yy788; if (yych == 'h') goto yy788; goto yy186; yy727: YYDEBUG(727, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'S') goto yy782; if (yych == 's') goto yy782; goto yy186; yy728: YYDEBUG(728, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'P') goto yy778; if (yych == 'p') goto yy778; goto yy186; yy729: YYDEBUG(729, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'D') goto yy744; if (yych == 'd') goto yy744; goto yy186; yy730: YYDEBUG(730, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'A') goto yy741; if (yych == 'a') goto yy741; goto yy186; yy731: YYDEBUG(731, *YYCURSOR); yych = *++YYCURSOR; if (yych <= 'T') { if (yych == 'I') goto yy732; if (yych <= 'S') goto yy186; goto yy733; } else { if (yych <= 'i') { if (yych <= 'h') goto yy186; } else { if (yych == 't') goto yy733; goto yy186; } } yy732: YYDEBUG(732, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'T') goto yy739; if (yych == 't') goto yy739; goto yy186; yy733: YYDEBUG(733, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy734; if (yych != 'e') goto yy186; yy734: YYDEBUG(734, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'N') goto yy735; if (yych != 'n') goto yy186; yy735: YYDEBUG(735, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'D') goto yy736; if (yych != 'd') goto yy186; yy736: YYDEBUG(736, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'S') goto yy737; if (yych != 's') goto yy186; yy737: YYDEBUG(737, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(738, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1158 "Zend/zend_language_scanner.l" { return T_EXTENDS; } #line 6830 "Zend/zend_language_scanner.c" yy739: YYDEBUG(739, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(740, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1018 "Zend/zend_language_scanner.l" { return T_EXIT; } #line 6843 "Zend/zend_language_scanner.c" yy741: YYDEBUG(741, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'L') goto yy742; if (yych != 'l') goto yy186; yy742: YYDEBUG(742, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(743, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1244 "Zend/zend_language_scanner.l" { return T_EVAL; } #line 6861 "Zend/zend_language_scanner.c" yy744: YYDEBUG(744, *YYCURSOR); yych = *++YYCURSOR; YYDEBUG(-1, yych); switch (yych) { case 'D': case 'd': goto yy745; case 'F': case 'f': goto yy746; case 'I': case 'i': goto yy747; case 'S': case 's': goto yy748; case 'W': case 'w': goto yy749; default: goto yy186; } yy745: YYDEBUG(745, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy771; if (yych == 'e') goto yy771; goto yy186; yy746: YYDEBUG(746, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'O') goto yy763; if (yych == 'o') goto yy763; goto yy186; yy747: YYDEBUG(747, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'F') goto yy761; if (yych == 'f') goto yy761; goto yy186; yy748: YYDEBUG(748, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'W') goto yy755; if (yych == 'w') goto yy755; goto yy186; yy749: YYDEBUG(749, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'H') goto yy750; if (yych != 'h') goto yy186; yy750: YYDEBUG(750, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'I') goto yy751; if (yych != 'i') goto yy186; yy751: YYDEBUG(751, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'L') goto yy752; if (yych != 'l') goto yy186; yy752: YYDEBUG(752, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy753; if (yych != 'e') goto yy186; yy753: YYDEBUG(753, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(754, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1070 "Zend/zend_language_scanner.l" { return T_ENDWHILE; } #line 6935 "Zend/zend_language_scanner.c" yy755: YYDEBUG(755, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'I') goto yy756; if (yych != 'i') goto yy186; yy756: YYDEBUG(756, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'T') goto yy757; if (yych != 't') goto yy186; yy757: YYDEBUG(757, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'C') goto yy758; if (yych != 'c') goto yy186; yy758: YYDEBUG(758, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'H') goto yy759; if (yych != 'h') goto yy186; yy759: YYDEBUG(759, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(760, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1114 "Zend/zend_language_scanner.l" { return T_ENDSWITCH; } #line 6968 "Zend/zend_language_scanner.c" yy761: YYDEBUG(761, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(762, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1058 "Zend/zend_language_scanner.l" { return T_ENDIF; } #line 6981 "Zend/zend_language_scanner.c" yy763: YYDEBUG(763, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'R') goto yy764; if (yych != 'r') goto yy186; yy764: YYDEBUG(764, *YYCURSOR); ++YYCURSOR; if ((yych = *YYCURSOR) <= '^') { if (yych <= '@') { if (yych <= '/') goto yy765; if (yych <= '9') goto yy185; } else { if (yych == 'E') goto yy766; if (yych <= 'Z') goto yy185; } } else { if (yych <= 'd') { if (yych != '`') goto yy185; } else { if (yych <= 'e') goto yy766; if (yych <= 'z') goto yy185; if (yych >= 0x7F) goto yy185; } } yy765: YYDEBUG(765, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1082 "Zend/zend_language_scanner.l" { return T_ENDFOR; } #line 7014 "Zend/zend_language_scanner.c" yy766: YYDEBUG(766, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'A') goto yy767; if (yych != 'a') goto yy186; yy767: YYDEBUG(767, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'C') goto yy768; if (yych != 'c') goto yy186; yy768: YYDEBUG(768, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'H') goto yy769; if (yych != 'h') goto yy186; yy769: YYDEBUG(769, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(770, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1090 "Zend/zend_language_scanner.l" { return T_ENDFOREACH; } #line 7042 "Zend/zend_language_scanner.c" yy771: YYDEBUG(771, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'C') goto yy772; if (yych != 'c') goto yy186; yy772: YYDEBUG(772, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'L') goto yy773; if (yych != 'l') goto yy186; yy773: YYDEBUG(773, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'A') goto yy774; if (yych != 'a') goto yy186; yy774: YYDEBUG(774, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'R') goto yy775; if (yych != 'r') goto yy186; yy775: YYDEBUG(775, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy776; if (yych != 'e') goto yy186; yy776: YYDEBUG(776, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(777, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1098 "Zend/zend_language_scanner.l" { return T_ENDDECLARE; } #line 7080 "Zend/zend_language_scanner.c" yy778: YYDEBUG(778, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'T') goto yy779; if (yych != 't') goto yy186; yy779: YYDEBUG(779, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'Y') goto yy780; if (yych != 'y') goto yy186; yy780: YYDEBUG(780, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(781, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1284 "Zend/zend_language_scanner.l" { return T_EMPTY; } #line 7103 "Zend/zend_language_scanner.c" yy782: YYDEBUG(782, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'E') goto yy783; if (yych != 'e') goto yy186; yy783: YYDEBUG(783, *YYCURSOR); ++YYCURSOR; if ((yych = *YYCURSOR) <= '^') { if (yych <= '@') { if (yych <= '/') goto yy784; if (yych <= '9') goto yy185; } else { if (yych == 'I') goto yy785; if (yych <= 'Z') goto yy185; } } else { if (yych <= 'h') { if (yych != '`') goto yy185; } else { if (yych <= 'i') goto yy785; if (yych <= 'z') goto yy185; if (yych >= 0x7F) goto yy185; } } yy784: YYDEBUG(784, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1062 "Zend/zend_language_scanner.l" { return T_ELSE; } #line 7136 "Zend/zend_language_scanner.c" yy785: YYDEBUG(785, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'F') goto yy786; if (yych != 'f') goto yy186; yy786: YYDEBUG(786, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(787, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1054 "Zend/zend_language_scanner.l" { return T_ELSEIF; } #line 7154 "Zend/zend_language_scanner.c" yy788: YYDEBUG(788, *YYCURSOR); yych = *++YYCURSOR; if (yych == 'O') goto yy789; if (yych != 'o') goto yy186; yy789: YYDEBUG(789, *YYCURSOR); ++YYCURSOR; if (yybm[0+(yych = *YYCURSOR)] & 4) { goto yy185; } YYDEBUG(790, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1138 "Zend/zend_language_scanner.l" { return T_ECHO; } #line 7172 "Zend/zend_language_scanner.c" } /* *********************************** */ yyc_ST_LOOKING_FOR_PROPERTY: { static const unsigned char yybm[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 128, 0, 0, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 0, 0, 0, 0, 0, 0, 0, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 0, 0, 0, 0, 64, 0, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 0, 0, 0, 0, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, }; YYDEBUG(791, *YYCURSOR); YYFILL(2); yych = *YYCURSOR; if (yych <= '-') { if (yych <= '\r') { if (yych <= 0x08) goto yy799; if (yych <= '\n') goto yy793; if (yych <= '\f') goto yy799; } else { if (yych == ' ') goto yy793; if (yych <= ',') goto yy799; goto yy795; } } else { if (yych <= '_') { if (yych <= '@') goto yy799; if (yych <= 'Z') goto yy797; if (yych <= '^') goto yy799; goto yy797; } else { if (yych <= '`') goto yy799; if (yych <= 'z') goto yy797; if (yych <= '~') goto yy799; goto yy797; } } yy793: YYDEBUG(793, *YYCURSOR); ++YYCURSOR; yych = *YYCURSOR; goto yy805; yy794: YYDEBUG(794, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1171 "Zend/zend_language_scanner.l" { zendlval->value.str.val = yytext; /* no copying - intentional */ zendlval->value.str.len = yyleng; zendlval->type = IS_STRING; HANDLE_NEWLINES(yytext, yyleng); return T_WHITESPACE; } #line 7253 "Zend/zend_language_scanner.c" yy795: YYDEBUG(795, *YYCURSOR); ++YYCURSOR; if ((yych = *YYCURSOR) == '>') goto yy802; yy796: YYDEBUG(796, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1190 "Zend/zend_language_scanner.l" { yyless(0); yy_pop_state(TSRMLS_C); goto restart; } #line 7267 "Zend/zend_language_scanner.c" yy797: YYDEBUG(797, *YYCURSOR); ++YYCURSOR; yych = *YYCURSOR; goto yy801; yy798: YYDEBUG(798, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1183 "Zend/zend_language_scanner.l" { yy_pop_state(TSRMLS_C); zend_copy_value(zendlval, yytext, yyleng); zendlval->type = IS_STRING; return T_STRING; } #line 7283 "Zend/zend_language_scanner.c" yy799: YYDEBUG(799, *YYCURSOR); yych = *++YYCURSOR; goto yy796; yy800: YYDEBUG(800, *YYCURSOR); ++YYCURSOR; YYFILL(1); yych = *YYCURSOR; yy801: YYDEBUG(801, *YYCURSOR); if (yybm[0+yych] & 64) { goto yy800; } goto yy798; yy802: YYDEBUG(802, *YYCURSOR); ++YYCURSOR; YYDEBUG(803, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1179 "Zend/zend_language_scanner.l" { return T_OBJECT_OPERATOR; } #line 7308 "Zend/zend_language_scanner.c" yy804: YYDEBUG(804, *YYCURSOR); ++YYCURSOR; YYFILL(1); yych = *YYCURSOR; yy805: YYDEBUG(805, *YYCURSOR); if (yybm[0+yych] & 128) { goto yy804; } goto yy794; } /* *********************************** */ yyc_ST_LOOKING_FOR_VARNAME: { static const unsigned char yybm[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 0, 0, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 128, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, }; YYDEBUG(806, *YYCURSOR); YYFILL(2); yych = *YYCURSOR; if (yych <= '_') { if (yych <= '@') goto yy810; if (yych <= 'Z') goto yy808; if (yych <= '^') goto yy810; } else { if (yych <= '`') goto yy810; if (yych <= 'z') goto yy808; if (yych <= '~') goto yy810; } yy808: YYDEBUG(808, *YYCURSOR); ++YYCURSOR; yych = *YYCURSOR; goto yy813; yy809: YYDEBUG(809, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1466 "Zend/zend_language_scanner.l" { zend_copy_value(zendlval, yytext, yyleng); zendlval->type = IS_STRING; yy_pop_state(TSRMLS_C); yy_push_state(ST_IN_SCRIPTING TSRMLS_CC); return T_STRING_VARNAME; } #line 7386 "Zend/zend_language_scanner.c" yy810: YYDEBUG(810, *YYCURSOR); ++YYCURSOR; YYDEBUG(811, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1475 "Zend/zend_language_scanner.l" { yyless(0); yy_pop_state(TSRMLS_C); yy_push_state(ST_IN_SCRIPTING TSRMLS_CC); goto restart; } #line 7399 "Zend/zend_language_scanner.c" yy812: YYDEBUG(812, *YYCURSOR); ++YYCURSOR; YYFILL(1); yych = *YYCURSOR; yy813: YYDEBUG(813, *YYCURSOR); if (yybm[0+yych] & 128) { goto yy812; } goto yy809; } /* *********************************** */ yyc_ST_NOWDOC: YYDEBUG(814, *YYCURSOR); YYFILL(1); yych = *YYCURSOR; YYDEBUG(816, *YYCURSOR); ++YYCURSOR; YYDEBUG(817, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 2357 "Zend/zend_language_scanner.l" { int newline = 0; if (YYCURSOR > YYLIMIT) { return 0; } YYCURSOR--; while (YYCURSOR < YYLIMIT) { switch (*YYCURSOR++) { case '\r': if (*YYCURSOR == '\n') { YYCURSOR++; } /* fall through */ case '\n': /* Check for ending label on the next line */ if (IS_LABEL_START(*YYCURSOR) && CG(heredoc_len) < YYLIMIT - YYCURSOR && !memcmp(YYCURSOR, CG(heredoc), CG(heredoc_len))) { YYCTYPE *end = YYCURSOR + CG(heredoc_len); if (*end == ';') { end++; } if (*end == '\n' || *end == '\r') { /* newline before label will be subtracted from returned text, but * yyleng/yytext will include it, for zend_highlight/strip, tokenizer, etc. */ if (YYCURSOR[-2] == '\r' && YYCURSOR[-1] == '\n') { newline = 2; /* Windows newline */ } else { newline = 1; } CG(increment_lineno) = 1; /* For newline before label */ BEGIN(ST_END_HEREDOC); goto nowdoc_scan_done; } } /* fall through */ default: continue; } } nowdoc_scan_done: yyleng = YYCURSOR - SCNG(yy_text); zend_copy_value(zendlval, yytext, yyleng - newline); zendlval->type = IS_STRING; HANDLE_NEWLINES(yytext, yyleng - newline); return T_ENCAPSED_AND_WHITESPACE; } #line 7476 "Zend/zend_language_scanner.c" /* *********************************** */ yyc_ST_VAR_OFFSET: { static const unsigned char yybm[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 240, 240, 112, 112, 112, 112, 112, 112, 112, 112, 0, 0, 0, 0, 0, 0, 0, 80, 80, 80, 80, 80, 80, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 0, 0, 0, 0, 16, 0, 80, 80, 80, 80, 80, 80, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 0, 0, 0, 0, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, }; YYDEBUG(818, *YYCURSOR); YYFILL(3); yych = *YYCURSOR; if (yych <= '/') { if (yych <= ' ') { if (yych <= '\f') { if (yych <= 0x08) goto yy832; if (yych <= '\n') goto yy828; goto yy832; } else { if (yych <= '\r') goto yy828; if (yych <= 0x1F) goto yy832; goto yy828; } } else { if (yych <= '$') { if (yych <= '"') goto yy827; if (yych <= '#') goto yy828; goto yy823; } else { if (yych == '\'') goto yy828; goto yy827; } } } else { if (yych <= '\\') { if (yych <= '@') { if (yych <= '0') goto yy820; if (yych <= '9') goto yy822; goto yy827; } else { if (yych <= 'Z') goto yy830; if (yych <= '[') goto yy827; goto yy828; } } else { if (yych <= '_') { if (yych <= ']') goto yy825; if (yych <= '^') goto yy827; goto yy830; } else { if (yych <= '`') goto yy827; if (yych <= 'z') goto yy830; if (yych <= '~') goto yy827; goto yy830; } } } yy820: YYDEBUG(820, *YYCURSOR); yyaccept = 0; yych = *(YYMARKER = ++YYCURSOR); if (yych <= 'W') { if (yych <= '9') { if (yych >= '0') goto yy844; } else { if (yych == 'B') goto yy841; } } else { if (yych <= 'b') { if (yych <= 'X') goto yy843; if (yych >= 'b') goto yy841; } else { if (yych == 'x') goto yy843; } } yy821: YYDEBUG(821, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1553 "Zend/zend_language_scanner.l" { /* Offset could be treated as a long */ if (yyleng < MAX_LENGTH_OF_LONG - 1 || (yyleng == MAX_LENGTH_OF_LONG - 1 && strcmp(yytext, long_min_digits) < 0)) { zendlval->value.lval = strtol(yytext, NULL, 10); zendlval->type = IS_LONG; } else { zendlval->value.str.val = (char *)estrndup(yytext, yyleng); zendlval->value.str.len = yyleng; zendlval->type = IS_STRING; } return T_NUM_STRING; } #line 7595 "Zend/zend_language_scanner.c" yy822: YYDEBUG(822, *YYCURSOR); yych = *++YYCURSOR; goto yy840; yy823: YYDEBUG(823, *YYCURSOR); ++YYCURSOR; if ((yych = *YYCURSOR) <= '_') { if (yych <= '@') goto yy824; if (yych <= 'Z') goto yy836; if (yych >= '_') goto yy836; } else { if (yych <= '`') goto yy824; if (yych <= 'z') goto yy836; if (yych >= 0x7F) goto yy836; } yy824: YYDEBUG(824, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1885 "Zend/zend_language_scanner.l" { /* Only '[' can be valid, but returning other tokens will allow a more explicit parse error */ return yytext[0]; } #line 7620 "Zend/zend_language_scanner.c" yy825: YYDEBUG(825, *YYCURSOR); ++YYCURSOR; YYDEBUG(826, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1880 "Zend/zend_language_scanner.l" { yy_pop_state(TSRMLS_C); return ']'; } #line 7631 "Zend/zend_language_scanner.c" yy827: YYDEBUG(827, *YYCURSOR); yych = *++YYCURSOR; goto yy824; yy828: YYDEBUG(828, *YYCURSOR); ++YYCURSOR; YYDEBUG(829, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1890 "Zend/zend_language_scanner.l" { /* Invalid rule to return a more explicit parse error with proper line number */ yyless(0); yy_pop_state(TSRMLS_C); return T_ENCAPSED_AND_WHITESPACE; } #line 7648 "Zend/zend_language_scanner.c" yy830: YYDEBUG(830, *YYCURSOR); ++YYCURSOR; yych = *YYCURSOR; goto yy835; yy831: YYDEBUG(831, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1897 "Zend/zend_language_scanner.l" { zend_copy_value(zendlval, yytext, yyleng); zendlval->type = IS_STRING; return T_STRING; } #line 7663 "Zend/zend_language_scanner.c" yy832: YYDEBUG(832, *YYCURSOR); ++YYCURSOR; YYDEBUG(833, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 2413 "Zend/zend_language_scanner.l" { if (YYCURSOR > YYLIMIT) { return 0; } zend_error(E_COMPILE_WARNING,"Unexpected character in input: '%c' (ASCII=%d) state=%d", yytext[0], yytext[0], YYSTATE); goto restart; } #line 7678 "Zend/zend_language_scanner.c" yy834: YYDEBUG(834, *YYCURSOR); ++YYCURSOR; YYFILL(1); yych = *YYCURSOR; yy835: YYDEBUG(835, *YYCURSOR); if (yybm[0+yych] & 16) { goto yy834; } goto yy831; yy836: YYDEBUG(836, *YYCURSOR); ++YYCURSOR; YYFILL(1); yych = *YYCURSOR; YYDEBUG(837, *YYCURSOR); if (yych <= '^') { if (yych <= '9') { if (yych >= '0') goto yy836; } else { if (yych <= '@') goto yy838; if (yych <= 'Z') goto yy836; } } else { if (yych <= '`') { if (yych <= '_') goto yy836; } else { if (yych <= 'z') goto yy836; if (yych >= 0x7F) goto yy836; } } yy838: YYDEBUG(838, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1874 "Zend/zend_language_scanner.l" { zend_copy_value(zendlval, (yytext+1), (yyleng-1)); zendlval->type = IS_STRING; return T_VARIABLE; } #line 7720 "Zend/zend_language_scanner.c" yy839: YYDEBUG(839, *YYCURSOR); ++YYCURSOR; YYFILL(1); yych = *YYCURSOR; yy840: YYDEBUG(840, *YYCURSOR); if (yybm[0+yych] & 32) { goto yy839; } goto yy821; yy841: YYDEBUG(841, *YYCURSOR); yych = *++YYCURSOR; if (yybm[0+yych] & 128) { goto yy849; } yy842: YYDEBUG(842, *YYCURSOR); YYCURSOR = YYMARKER; goto yy821; yy843: YYDEBUG(843, *YYCURSOR); yych = *++YYCURSOR; if (yybm[0+yych] & 64) { goto yy847; } goto yy842; yy844: YYDEBUG(844, *YYCURSOR); ++YYCURSOR; YYFILL(1); yych = *YYCURSOR; YYDEBUG(845, *YYCURSOR); if (yych <= '/') goto yy846; if (yych <= '9') goto yy844; yy846: YYDEBUG(846, *YYCURSOR); yyleng = YYCURSOR - SCNG(yy_text); #line 1565 "Zend/zend_language_scanner.l" { /* Offset must be treated as a string */ zendlval->value.str.val = (char *)estrndup(yytext, yyleng); zendlval->value.str.len = yyleng; zendlval->type = IS_STRING; return T_NUM_STRING; } #line 7767 "Zend/zend_language_scanner.c" yy847: YYDEBUG(847, *YYCURSOR); ++YYCURSOR; YYFILL(1); yych = *YYCURSOR; YYDEBUG(848, *YYCURSOR); if (yybm[0+yych] & 64) { goto yy847; } goto yy846; yy849: YYDEBUG(849, *YYCURSOR); ++YYCURSOR; YYFILL(1); yych = *YYCURSOR; YYDEBUG(850, *YYCURSOR); if (yybm[0+yych] & 128) { goto yy849; } goto yy846; } } #line 2422 "Zend/zend_language_scanner.l" }
./CrossVul/dataset_final_sorted/CWE-20/c/good_5711_2
crossvul-cpp_data_good_3989_1
#include "clar_libgit2.h" #include "checkout_helpers.h" #include "git2/checkout.h" #include "repository.h" #include "buffer.h" #include "futils.h" static const char *repo_name = "nasty"; static git_repository *repo; static git_checkout_options checkout_opts; void test_checkout_nasty__initialize(void) { repo = cl_git_sandbox_init(repo_name); GIT_INIT_STRUCTURE(&checkout_opts, GIT_CHECKOUT_OPTIONS_VERSION); checkout_opts.checkout_strategy = GIT_CHECKOUT_FORCE; } void test_checkout_nasty__cleanup(void) { cl_git_sandbox_cleanup(); } static void test_checkout_passes(const char *refname, const char *filename) { git_oid commit_id; git_commit *commit; git_checkout_options opts = GIT_CHECKOUT_OPTIONS_INIT; git_buf path = GIT_BUF_INIT; cl_git_pass(git_buf_joinpath(&path, repo_name, filename)); cl_git_pass(git_reference_name_to_id(&commit_id, repo, refname)); cl_git_pass(git_commit_lookup(&commit, repo, &commit_id)); opts.checkout_strategy = GIT_CHECKOUT_FORCE | GIT_CHECKOUT_DONT_UPDATE_INDEX; cl_git_pass(git_checkout_tree(repo, (const git_object *)commit, &opts)); cl_assert(!git_path_exists(path.ptr)); git_commit_free(commit); git_buf_dispose(&path); } static void test_checkout_fails(const char *refname, const char *filename) { git_oid commit_id; git_commit *commit; git_checkout_options opts = GIT_CHECKOUT_OPTIONS_INIT; git_buf path = GIT_BUF_INIT; cl_git_pass(git_buf_joinpath(&path, repo_name, filename)); cl_git_pass(git_reference_name_to_id(&commit_id, repo, refname)); cl_git_pass(git_commit_lookup(&commit, repo, &commit_id)); opts.checkout_strategy = GIT_CHECKOUT_FORCE; cl_git_fail(git_checkout_tree(repo, (const git_object *)commit, &opts)); cl_assert(!git_path_exists(path.ptr)); git_commit_free(commit); git_buf_dispose(&path); } /* A tree that contains ".git" as a tree, with a blob inside * (".git/foobar"). */ void test_checkout_nasty__dotgit_tree(void) { test_checkout_fails("refs/heads/dotgit_tree", ".git/foobar"); } /* A tree that contains ".GIT" as a tree, with a blob inside * (".GIT/foobar"). */ void test_checkout_nasty__dotcapitalgit_tree(void) { test_checkout_fails("refs/heads/dotcapitalgit_tree", ".GIT/foobar"); } /* A tree that contains a tree ".", with a blob inside ("./foobar"). */ void test_checkout_nasty__dot_tree(void) { test_checkout_fails("refs/heads/dot_tree", "foobar"); } /* A tree that contains a tree ".", with a tree ".git", with a blob * inside ("./.git/foobar"). */ void test_checkout_nasty__dot_dotgit_tree(void) { test_checkout_fails("refs/heads/dot_dotgit_tree", ".git/foobar"); } /* A tree that contains a tree, with a tree "..", with a tree ".git", with a * blob inside ("foo/../.git/foobar"). */ void test_checkout_nasty__dotdot_dotgit_tree(void) { test_checkout_fails("refs/heads/dotdot_dotgit_tree", ".git/foobar"); } /* A tree that contains a tree, with a tree "..", with a blob inside * ("foo/../foobar"). */ void test_checkout_nasty__dotdot_tree(void) { test_checkout_fails("refs/heads/dotdot_tree", "foobar"); } /* A tree that contains a blob with the rogue name ".git/foobar" */ void test_checkout_nasty__dotgit_path(void) { test_checkout_fails("refs/heads/dotgit_path", ".git/foobar"); } /* A tree that contains a blob with the rogue name ".GIT/foobar" */ void test_checkout_nasty__dotcapitalgit_path(void) { test_checkout_fails("refs/heads/dotcapitalgit_path", ".GIT/foobar"); } /* A tree that contains a blob with the rogue name "./.git/foobar" */ void test_checkout_nasty__dot_dotgit_path(void) { test_checkout_fails("refs/heads/dot_dotgit_path", ".git/foobar"); } /* A tree that contains a blob with the rogue name "./.GIT/foobar" */ void test_checkout_nasty__dot_dotcapitalgit_path(void) { test_checkout_fails("refs/heads/dot_dotcapitalgit_path", ".GIT/foobar"); } /* A tree that contains a blob with the rogue name "foo/../.git/foobar" */ void test_checkout_nasty__dotdot_dotgit_path(void) { test_checkout_fails("refs/heads/dotdot_dotgit_path", ".git/foobar"); } /* A tree that contains a blob with the rogue name "foo/../.GIT/foobar" */ void test_checkout_nasty__dotdot_dotcapitalgit_path(void) { test_checkout_fails("refs/heads/dotdot_dotcapitalgit_path", ".GIT/foobar"); } /* A tree that contains a blob with the rogue name "foo/." */ void test_checkout_nasty__dot_path(void) { test_checkout_fails("refs/heads/dot_path", "./foobar"); } /* A tree that contains a blob with the rogue name "foo/." */ void test_checkout_nasty__dot_path_two(void) { test_checkout_fails("refs/heads/dot_path_two", "foo/."); } /* A tree that contains a blob with the rogue name "foo/../foobar" */ void test_checkout_nasty__dotdot_path(void) { test_checkout_fails("refs/heads/dotdot_path", "foobar"); } /* A tree that contains an entry with a backslash ".git\foobar" */ void test_checkout_nasty__dotgit_backslash_path(void) { #ifdef GIT_WIN32 test_checkout_fails("refs/heads/dotgit_backslash_path", ".git/foobar"); #endif } /* A tree that contains an entry with a backslash ".GIT\foobar" */ void test_checkout_nasty__dotcapitalgit_backslash_path(void) { #ifdef GIT_WIN32 test_checkout_fails("refs/heads/dotcapitalgit_backslash_path", ".GIT/foobar"); #endif } /* A tree that contains an entry with a backslash ".\.GIT\foobar" */ void test_checkout_nasty__dot_backslash_dotcapitalgit_path(void) { #ifdef GIT_WIN32 test_checkout_fails("refs/heads/dot_backslash_dotcapitalgit_path", ".GIT/foobar"); #endif } /* A tree that contains an entry ".git.", because Win32 APIs will drop the * trailing slash. */ void test_checkout_nasty__dot_git_dot(void) { #ifdef GIT_WIN32 test_checkout_fails("refs/heads/dot_git_dot", ".git/foobar"); #endif } /* A tree that contains an entry "git~1", because that is typically the * short name for ".git". */ void test_checkout_nasty__git_tilde1(void) { test_checkout_fails("refs/heads/git_tilde1", ".git/foobar"); test_checkout_fails("refs/heads/git_tilde1", "git~1/foobar"); } /* A tree that contains an entry "git~2", when we have forced the short * name for ".git" into "GIT~2". */ void test_checkout_nasty__git_custom_shortname(void) { #ifdef GIT_WIN32 if (!cl_sandbox_supports_8dot3()) clar__skip(); cl_must_pass(p_rename("nasty/.git", "nasty/_temp")); cl_git_write2file("nasty/git~1", "", 0, O_RDWR|O_CREAT, 0666); cl_must_pass(p_rename("nasty/_temp", "nasty/.git")); test_checkout_fails("refs/heads/git_tilde2", ".git/foobar"); #endif } /* A tree that contains an entry "git~3", which should be allowed, since * it is not the typical short name ("GIT~1") or the actual short name * ("GIT~2") for ".git". */ void test_checkout_nasty__only_looks_like_a_git_shortname(void) { #ifdef GIT_WIN32 git_oid commit_id; git_commit *commit; git_checkout_options opts = GIT_CHECKOUT_OPTIONS_INIT; cl_must_pass(p_rename("nasty/.git", "nasty/_temp")); cl_git_write2file("nasty/git~1", "", 0, O_RDWR|O_CREAT, 0666); cl_must_pass(p_rename("nasty/_temp", "nasty/.git")); cl_git_pass(git_reference_name_to_id(&commit_id, repo, "refs/heads/git_tilde3")); cl_git_pass(git_commit_lookup(&commit, repo, &commit_id)); opts.checkout_strategy = GIT_CHECKOUT_FORCE; cl_git_pass(git_checkout_tree(repo, (const git_object *)commit, &opts)); cl_assert(git_path_exists("nasty/git~3/foobar")); git_commit_free(commit); #endif } /* A tree that contains an entry "git:", because Win32 APIs will reject * that as looking too similar to a drive letter. */ void test_checkout_nasty__dot_git_colon(void) { #ifdef GIT_WIN32 test_checkout_fails("refs/heads/dot_git_colon", ".git/foobar"); #endif } /* A tree that contains an entry "git:foo", because Win32 APIs will turn * that into ".git". */ void test_checkout_nasty__dot_git_colon_stuff(void) { #ifdef GIT_WIN32 test_checkout_fails("refs/heads/dot_git_colon_stuff", ".git/foobar"); #endif } /* Trees that contains entries with a tree ".git" that contain * byte sequences: * { 0xe2, 0x80, 0x8c } * { 0xe2, 0x80, 0x8d } * { 0xe2, 0x80, 0x8e } * { 0xe2, 0x80, 0x8f } * { 0xe2, 0x80, 0xaa } * { 0xe2, 0x80, 0xab } * { 0xe2, 0x80, 0xac } * { 0xe2, 0x80, 0xad } * { 0xe2, 0x81, 0xae } * { 0xe2, 0x81, 0xaa } * { 0xe2, 0x81, 0xab } * { 0xe2, 0x81, 0xac } * { 0xe2, 0x81, 0xad } * { 0xe2, 0x81, 0xae } * { 0xe2, 0x81, 0xaf } * { 0xef, 0xbb, 0xbf } * Because these map to characters that HFS filesystems "ignore". Thus * ".git<U+200C>" will map to ".git". */ void test_checkout_nasty__dot_git_hfs_ignorable(void) { #ifdef __APPLE__ test_checkout_fails("refs/heads/dotgit_hfs_ignorable_1", ".git/foobar"); test_checkout_fails("refs/heads/dotgit_hfs_ignorable_2", ".git/foobar"); test_checkout_fails("refs/heads/dotgit_hfs_ignorable_3", ".git/foobar"); test_checkout_fails("refs/heads/dotgit_hfs_ignorable_4", ".git/foobar"); test_checkout_fails("refs/heads/dotgit_hfs_ignorable_5", ".git/foobar"); test_checkout_fails("refs/heads/dotgit_hfs_ignorable_6", ".git/foobar"); test_checkout_fails("refs/heads/dotgit_hfs_ignorable_7", ".git/foobar"); test_checkout_fails("refs/heads/dotgit_hfs_ignorable_8", ".git/foobar"); test_checkout_fails("refs/heads/dotgit_hfs_ignorable_9", ".git/foobar"); test_checkout_fails("refs/heads/dotgit_hfs_ignorable_10", ".git/foobar"); test_checkout_fails("refs/heads/dotgit_hfs_ignorable_11", ".git/foobar"); test_checkout_fails("refs/heads/dotgit_hfs_ignorable_12", ".git/foobar"); test_checkout_fails("refs/heads/dotgit_hfs_ignorable_13", ".git/foobar"); test_checkout_fails("refs/heads/dotgit_hfs_ignorable_14", ".git/foobar"); test_checkout_fails("refs/heads/dotgit_hfs_ignorable_15", ".git/foobar"); test_checkout_fails("refs/heads/dotgit_hfs_ignorable_16", ".git/foobar"); #endif } void test_checkout_nasty__honors_core_protecthfs(void) { cl_repo_set_bool(repo, "core.protectHFS", true); test_checkout_fails("refs/heads/dotgit_hfs_ignorable_1", ".git/foobar"); test_checkout_fails("refs/heads/dotgit_hfs_ignorable_2", ".git/foobar"); test_checkout_fails("refs/heads/dotgit_hfs_ignorable_3", ".git/foobar"); test_checkout_fails("refs/heads/dotgit_hfs_ignorable_4", ".git/foobar"); test_checkout_fails("refs/heads/dotgit_hfs_ignorable_5", ".git/foobar"); test_checkout_fails("refs/heads/dotgit_hfs_ignorable_6", ".git/foobar"); test_checkout_fails("refs/heads/dotgit_hfs_ignorable_7", ".git/foobar"); test_checkout_fails("refs/heads/dotgit_hfs_ignorable_8", ".git/foobar"); test_checkout_fails("refs/heads/dotgit_hfs_ignorable_9", ".git/foobar"); test_checkout_fails("refs/heads/dotgit_hfs_ignorable_10", ".git/foobar"); test_checkout_fails("refs/heads/dotgit_hfs_ignorable_11", ".git/foobar"); test_checkout_fails("refs/heads/dotgit_hfs_ignorable_12", ".git/foobar"); test_checkout_fails("refs/heads/dotgit_hfs_ignorable_13", ".git/foobar"); test_checkout_fails("refs/heads/dotgit_hfs_ignorable_14", ".git/foobar"); test_checkout_fails("refs/heads/dotgit_hfs_ignorable_15", ".git/foobar"); test_checkout_fails("refs/heads/dotgit_hfs_ignorable_16", ".git/foobar"); } void test_checkout_nasty__honors_core_protectntfs(void) { cl_repo_set_bool(repo, "core.protectNTFS", true); test_checkout_fails("refs/heads/dotgit_backslash_path", ".git/foobar"); test_checkout_fails("refs/heads/dotcapitalgit_backslash_path", ".GIT/foobar"); test_checkout_fails("refs/heads/dot_git_dot", ".git/foobar"); test_checkout_fails("refs/heads/git_tilde1", ".git/foobar"); } void test_checkout_nasty__symlink1(void) { test_checkout_passes("refs/heads/symlink1", ".git/foobar"); } void test_checkout_nasty__symlink2(void) { test_checkout_passes("refs/heads/symlink2", ".git/foobar"); } void test_checkout_nasty__symlink3(void) { test_checkout_passes("refs/heads/symlink3", ".git/foobar"); } void test_checkout_nasty__gitmodules_symlink(void) { cl_repo_set_bool(repo, "core.protectHFS", true); test_checkout_fails("refs/heads/gitmodules-symlink", ".gitmodules"); cl_repo_set_bool(repo, "core.protectHFS", false); cl_repo_set_bool(repo, "core.protectNTFS", true); test_checkout_fails("refs/heads/gitmodules-symlink", ".gitmodules"); cl_repo_set_bool(repo, "core.protectNTFS", false); test_checkout_fails("refs/heads/gitmodules-symlink", ".gitmodules"); }
./CrossVul/dataset_final_sorted/CWE-20/c/good_3989_1
crossvul-cpp_data_bad_2891_6
/* Userspace key control operations * * Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/sched/task.h> #include <linux/slab.h> #include <linux/syscalls.h> #include <linux/key.h> #include <linux/keyctl.h> #include <linux/fs.h> #include <linux/capability.h> #include <linux/cred.h> #include <linux/string.h> #include <linux/err.h> #include <linux/vmalloc.h> #include <linux/security.h> #include <linux/uio.h> #include <linux/uaccess.h> #include "internal.h" #define KEY_MAX_DESC_SIZE 4096 static int key_get_type_from_user(char *type, const char __user *_type, unsigned len) { int ret; ret = strncpy_from_user(type, _type, len); if (ret < 0) return ret; if (ret == 0 || ret >= len) return -EINVAL; if (type[0] == '.') return -EPERM; type[len - 1] = '\0'; return 0; } /* * Extract the description of a new key from userspace and either add it as a * new key to the specified keyring or update a matching key in that keyring. * * If the description is NULL or an empty string, the key type is asked to * generate one from the payload. * * The keyring must be writable so that we can attach the key to it. * * If successful, the new key's serial number is returned, otherwise an error * code is returned. */ SYSCALL_DEFINE5(add_key, const char __user *, _type, const char __user *, _description, const void __user *, _payload, size_t, plen, key_serial_t, ringid) { key_ref_t keyring_ref, key_ref; char type[32], *description; void *payload; long ret; ret = -EINVAL; if (plen > 1024 * 1024 - 1) goto error; /* draw all the data into kernel space */ ret = key_get_type_from_user(type, _type, sizeof(type)); if (ret < 0) goto error; description = NULL; if (_description) { description = strndup_user(_description, KEY_MAX_DESC_SIZE); if (IS_ERR(description)) { ret = PTR_ERR(description); goto error; } if (!*description) { kfree(description); description = NULL; } else if ((description[0] == '.') && (strncmp(type, "keyring", 7) == 0)) { ret = -EPERM; goto error2; } } /* pull the payload in if one was supplied */ payload = NULL; if (plen) { ret = -ENOMEM; payload = kvmalloc(plen, GFP_KERNEL); if (!payload) goto error2; ret = -EFAULT; if (copy_from_user(payload, _payload, plen) != 0) goto error3; } /* find the target keyring (which must be writable) */ keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); goto error3; } /* create or update the requested key and add it to the target * keyring */ key_ref = key_create_or_update(keyring_ref, type, description, payload, plen, KEY_PERM_UNDEF, KEY_ALLOC_IN_QUOTA); if (!IS_ERR(key_ref)) { ret = key_ref_to_ptr(key_ref)->serial; key_ref_put(key_ref); } else { ret = PTR_ERR(key_ref); } key_ref_put(keyring_ref); error3: if (payload) { memzero_explicit(payload, plen); kvfree(payload); } error2: kfree(description); error: return ret; } /* * Search the process keyrings and keyring trees linked from those for a * matching key. Keyrings must have appropriate Search permission to be * searched. * * If a key is found, it will be attached to the destination keyring if there's * one specified and the serial number of the key will be returned. * * If no key is found, /sbin/request-key will be invoked if _callout_info is * non-NULL in an attempt to create a key. The _callout_info string will be * passed to /sbin/request-key to aid with completing the request. If the * _callout_info string is "" then it will be changed to "-". */ SYSCALL_DEFINE4(request_key, const char __user *, _type, const char __user *, _description, const char __user *, _callout_info, key_serial_t, destringid) { struct key_type *ktype; struct key *key; key_ref_t dest_ref; size_t callout_len; char type[32], *description, *callout_info; long ret; /* pull the type into kernel space */ ret = key_get_type_from_user(type, _type, sizeof(type)); if (ret < 0) goto error; /* pull the description into kernel space */ description = strndup_user(_description, KEY_MAX_DESC_SIZE); if (IS_ERR(description)) { ret = PTR_ERR(description); goto error; } /* pull the callout info into kernel space */ callout_info = NULL; callout_len = 0; if (_callout_info) { callout_info = strndup_user(_callout_info, PAGE_SIZE); if (IS_ERR(callout_info)) { ret = PTR_ERR(callout_info); goto error2; } callout_len = strlen(callout_info); } /* get the destination keyring if specified */ dest_ref = NULL; if (destringid) { dest_ref = lookup_user_key(destringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(dest_ref)) { ret = PTR_ERR(dest_ref); goto error3; } } /* find the key type */ ktype = key_type_lookup(type); if (IS_ERR(ktype)) { ret = PTR_ERR(ktype); goto error4; } /* do the search */ key = request_key_and_link(ktype, description, callout_info, callout_len, NULL, key_ref_to_ptr(dest_ref), KEY_ALLOC_IN_QUOTA); if (IS_ERR(key)) { ret = PTR_ERR(key); goto error5; } /* wait for the key to finish being constructed */ ret = wait_for_key_construction(key, 1); if (ret < 0) goto error6; ret = key->serial; error6: key_put(key); error5: key_type_put(ktype); error4: key_ref_put(dest_ref); error3: kfree(callout_info); error2: kfree(description); error: return ret; } /* * Get the ID of the specified process keyring. * * The requested keyring must have search permission to be found. * * If successful, the ID of the requested keyring will be returned. */ long keyctl_get_keyring_ID(key_serial_t id, int create) { key_ref_t key_ref; unsigned long lflags; long ret; lflags = create ? KEY_LOOKUP_CREATE : 0; key_ref = lookup_user_key(id, lflags, KEY_NEED_SEARCH); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error; } ret = key_ref_to_ptr(key_ref)->serial; key_ref_put(key_ref); error: return ret; } /* * Join a (named) session keyring. * * Create and join an anonymous session keyring or join a named session * keyring, creating it if necessary. A named session keyring must have Search * permission for it to be joined. Session keyrings without this permit will * be skipped over. It is not permitted for userspace to create or join * keyrings whose name begin with a dot. * * If successful, the ID of the joined session keyring will be returned. */ long keyctl_join_session_keyring(const char __user *_name) { char *name; long ret; /* fetch the name from userspace */ name = NULL; if (_name) { name = strndup_user(_name, KEY_MAX_DESC_SIZE); if (IS_ERR(name)) { ret = PTR_ERR(name); goto error; } ret = -EPERM; if (name[0] == '.') goto error_name; } /* join the session */ ret = join_session_keyring(name); error_name: kfree(name); error: return ret; } /* * Update a key's data payload from the given data. * * The key must grant the caller Write permission and the key type must support * updating for this to work. A negative key can be positively instantiated * with this call. * * If successful, 0 will be returned. If the key type does not support * updating, then -EOPNOTSUPP will be returned. */ long keyctl_update_key(key_serial_t id, const void __user *_payload, size_t plen) { key_ref_t key_ref; void *payload; long ret; ret = -EINVAL; if (plen > PAGE_SIZE) goto error; /* pull the payload in if one was supplied */ payload = NULL; if (plen) { ret = -ENOMEM; payload = kmalloc(plen, GFP_KERNEL); if (!payload) goto error; ret = -EFAULT; if (copy_from_user(payload, _payload, plen) != 0) goto error2; } /* find the target key (which must be writable) */ key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error2; } /* update the key */ ret = key_update(key_ref, payload, plen); key_ref_put(key_ref); error2: kzfree(payload); error: return ret; } /* * Revoke a key. * * The key must be grant the caller Write or Setattr permission for this to * work. The key type should give up its quota claim when revoked. The key * and any links to the key will be automatically garbage collected after a * certain amount of time (/proc/sys/kernel/keys/gc_delay). * * Keys with KEY_FLAG_KEEP set should not be revoked. * * If successful, 0 is returned. */ long keyctl_revoke_key(key_serial_t id) { key_ref_t key_ref; struct key *key; long ret; key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); if (ret != -EACCES) goto error; key_ref = lookup_user_key(id, 0, KEY_NEED_SETATTR); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error; } } key = key_ref_to_ptr(key_ref); ret = 0; if (test_bit(KEY_FLAG_KEEP, &key->flags)) ret = -EPERM; else key_revoke(key); key_ref_put(key_ref); error: return ret; } /* * Invalidate a key. * * The key must be grant the caller Invalidate permission for this to work. * The key and any links to the key will be automatically garbage collected * immediately. * * Keys with KEY_FLAG_KEEP set should not be invalidated. * * If successful, 0 is returned. */ long keyctl_invalidate_key(key_serial_t id) { key_ref_t key_ref; struct key *key; long ret; kenter("%d", id); key_ref = lookup_user_key(id, 0, KEY_NEED_SEARCH); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); /* Root is permitted to invalidate certain special keys */ if (capable(CAP_SYS_ADMIN)) { key_ref = lookup_user_key(id, 0, 0); if (IS_ERR(key_ref)) goto error; if (test_bit(KEY_FLAG_ROOT_CAN_INVAL, &key_ref_to_ptr(key_ref)->flags)) goto invalidate; goto error_put; } goto error; } invalidate: key = key_ref_to_ptr(key_ref); ret = 0; if (test_bit(KEY_FLAG_KEEP, &key->flags)) ret = -EPERM; else key_invalidate(key); error_put: key_ref_put(key_ref); error: kleave(" = %ld", ret); return ret; } /* * Clear the specified keyring, creating an empty process keyring if one of the * special keyring IDs is used. * * The keyring must grant the caller Write permission and not have * KEY_FLAG_KEEP set for this to work. If successful, 0 will be returned. */ long keyctl_keyring_clear(key_serial_t ringid) { key_ref_t keyring_ref; struct key *keyring; long ret; keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); /* Root is permitted to invalidate certain special keyrings */ if (capable(CAP_SYS_ADMIN)) { keyring_ref = lookup_user_key(ringid, 0, 0); if (IS_ERR(keyring_ref)) goto error; if (test_bit(KEY_FLAG_ROOT_CAN_CLEAR, &key_ref_to_ptr(keyring_ref)->flags)) goto clear; goto error_put; } goto error; } clear: keyring = key_ref_to_ptr(keyring_ref); if (test_bit(KEY_FLAG_KEEP, &keyring->flags)) ret = -EPERM; else ret = keyring_clear(keyring); error_put: key_ref_put(keyring_ref); error: return ret; } /* * Create a link from a keyring to a key if there's no matching key in the * keyring, otherwise replace the link to the matching key with a link to the * new key. * * The key must grant the caller Link permission and the the keyring must grant * the caller Write permission. Furthermore, if an additional link is created, * the keyring's quota will be extended. * * If successful, 0 will be returned. */ long keyctl_keyring_link(key_serial_t id, key_serial_t ringid) { key_ref_t keyring_ref, key_ref; long ret; keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); goto error; } key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE, KEY_NEED_LINK); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error2; } ret = key_link(key_ref_to_ptr(keyring_ref), key_ref_to_ptr(key_ref)); key_ref_put(key_ref); error2: key_ref_put(keyring_ref); error: return ret; } /* * Unlink a key from a keyring. * * The keyring must grant the caller Write permission for this to work; the key * itself need not grant the caller anything. If the last link to a key is * removed then that key will be scheduled for destruction. * * Keys or keyrings with KEY_FLAG_KEEP set should not be unlinked. * * If successful, 0 will be returned. */ long keyctl_keyring_unlink(key_serial_t id, key_serial_t ringid) { key_ref_t keyring_ref, key_ref; struct key *keyring, *key; long ret; keyring_ref = lookup_user_key(ringid, 0, KEY_NEED_WRITE); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); goto error; } key_ref = lookup_user_key(id, KEY_LOOKUP_FOR_UNLINK, 0); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error2; } keyring = key_ref_to_ptr(keyring_ref); key = key_ref_to_ptr(key_ref); if (test_bit(KEY_FLAG_KEEP, &keyring->flags) && test_bit(KEY_FLAG_KEEP, &key->flags)) ret = -EPERM; else ret = key_unlink(keyring, key); key_ref_put(key_ref); error2: key_ref_put(keyring_ref); error: return ret; } /* * Return a description of a key to userspace. * * The key must grant the caller View permission for this to work. * * If there's a buffer, we place up to buflen bytes of data into it formatted * in the following way: * * type;uid;gid;perm;description<NUL> * * If successful, we return the amount of description available, irrespective * of how much we may have copied into the buffer. */ long keyctl_describe_key(key_serial_t keyid, char __user *buffer, size_t buflen) { struct key *key, *instkey; key_ref_t key_ref; char *infobuf; long ret; int desclen, infolen; key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW); if (IS_ERR(key_ref)) { /* viewing a key under construction is permitted if we have the * authorisation token handy */ if (PTR_ERR(key_ref) == -EACCES) { instkey = key_get_instantiation_authkey(keyid); if (!IS_ERR(instkey)) { key_put(instkey); key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, 0); if (!IS_ERR(key_ref)) goto okay; } } ret = PTR_ERR(key_ref); goto error; } okay: key = key_ref_to_ptr(key_ref); desclen = strlen(key->description); /* calculate how much information we're going to return */ ret = -ENOMEM; infobuf = kasprintf(GFP_KERNEL, "%s;%d;%d;%08x;", key->type->name, from_kuid_munged(current_user_ns(), key->uid), from_kgid_munged(current_user_ns(), key->gid), key->perm); if (!infobuf) goto error2; infolen = strlen(infobuf); ret = infolen + desclen + 1; /* consider returning the data */ if (buffer && buflen >= ret) { if (copy_to_user(buffer, infobuf, infolen) != 0 || copy_to_user(buffer + infolen, key->description, desclen + 1) != 0) ret = -EFAULT; } kfree(infobuf); error2: key_ref_put(key_ref); error: return ret; } /* * Search the specified keyring and any keyrings it links to for a matching * key. Only keyrings that grant the caller Search permission will be searched * (this includes the starting keyring). Only keys with Search permission can * be found. * * If successful, the found key will be linked to the destination keyring if * supplied and the key has Link permission, and the found key ID will be * returned. */ long keyctl_keyring_search(key_serial_t ringid, const char __user *_type, const char __user *_description, key_serial_t destringid) { struct key_type *ktype; key_ref_t keyring_ref, key_ref, dest_ref; char type[32], *description; long ret; /* pull the type and description into kernel space */ ret = key_get_type_from_user(type, _type, sizeof(type)); if (ret < 0) goto error; description = strndup_user(_description, KEY_MAX_DESC_SIZE); if (IS_ERR(description)) { ret = PTR_ERR(description); goto error; } /* get the keyring at which to begin the search */ keyring_ref = lookup_user_key(ringid, 0, KEY_NEED_SEARCH); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); goto error2; } /* get the destination keyring if specified */ dest_ref = NULL; if (destringid) { dest_ref = lookup_user_key(destringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(dest_ref)) { ret = PTR_ERR(dest_ref); goto error3; } } /* find the key type */ ktype = key_type_lookup(type); if (IS_ERR(ktype)) { ret = PTR_ERR(ktype); goto error4; } /* do the search */ key_ref = keyring_search(keyring_ref, ktype, description); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); /* treat lack or presence of a negative key the same */ if (ret == -EAGAIN) ret = -ENOKEY; goto error5; } /* link the resulting key to the destination keyring if we can */ if (dest_ref) { ret = key_permission(key_ref, KEY_NEED_LINK); if (ret < 0) goto error6; ret = key_link(key_ref_to_ptr(dest_ref), key_ref_to_ptr(key_ref)); if (ret < 0) goto error6; } ret = key_ref_to_ptr(key_ref)->serial; error6: key_ref_put(key_ref); error5: key_type_put(ktype); error4: key_ref_put(dest_ref); error3: key_ref_put(keyring_ref); error2: kfree(description); error: return ret; } /* * Read a key's payload. * * The key must either grant the caller Read permission, or it must grant the * caller Search permission when searched for from the process keyrings. * * If successful, we place up to buflen bytes of data into the buffer, if one * is provided, and return the amount of data that is available in the key, * irrespective of how much we copied into the buffer. */ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) { struct key *key; key_ref_t key_ref; long ret; /* find the key first */ key_ref = lookup_user_key(keyid, 0, 0); if (IS_ERR(key_ref)) { ret = -ENOKEY; goto error; } key = key_ref_to_ptr(key_ref); if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) { ret = -ENOKEY; goto error2; } /* see if we can read it directly */ ret = key_permission(key_ref, KEY_NEED_READ); if (ret == 0) goto can_read_key; if (ret != -EACCES) goto error2; /* we can't; see if it's searchable from this process's keyrings * - we automatically take account of the fact that it may be * dangling off an instantiation key */ if (!is_key_possessed(key_ref)) { ret = -EACCES; goto error2; } /* the key is probably readable - now try to read it */ can_read_key: ret = -EOPNOTSUPP; if (key->type->read) { /* Read the data with the semaphore held (since we might sleep) * to protect against the key being updated or revoked. */ down_read(&key->sem); ret = key_validate(key); if (ret == 0) ret = key->type->read(key, buffer, buflen); up_read(&key->sem); } error2: key_put(key); error: return ret; } /* * Change the ownership of a key * * The key must grant the caller Setattr permission for this to work, though * the key need not be fully instantiated yet. For the UID to be changed, or * for the GID to be changed to a group the caller is not a member of, the * caller must have sysadmin capability. If either uid or gid is -1 then that * attribute is not changed. * * If the UID is to be changed, the new user must have sufficient quota to * accept the key. The quota deduction will be removed from the old user to * the new user should the attribute be changed. * * If successful, 0 will be returned. */ long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group) { struct key_user *newowner, *zapowner = NULL; struct key *key; key_ref_t key_ref; long ret; kuid_t uid; kgid_t gid; uid = make_kuid(current_user_ns(), user); gid = make_kgid(current_user_ns(), group); ret = -EINVAL; if ((user != (uid_t) -1) && !uid_valid(uid)) goto error; if ((group != (gid_t) -1) && !gid_valid(gid)) goto error; ret = 0; if (user == (uid_t) -1 && group == (gid_t) -1) goto error; key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL, KEY_NEED_SETATTR); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error; } key = key_ref_to_ptr(key_ref); /* make the changes with the locks held to prevent chown/chown races */ ret = -EACCES; down_write(&key->sem); if (!capable(CAP_SYS_ADMIN)) { /* only the sysadmin can chown a key to some other UID */ if (user != (uid_t) -1 && !uid_eq(key->uid, uid)) goto error_put; /* only the sysadmin can set the key's GID to a group other * than one of those that the current process subscribes to */ if (group != (gid_t) -1 && !gid_eq(gid, key->gid) && !in_group_p(gid)) goto error_put; } /* change the UID */ if (user != (uid_t) -1 && !uid_eq(uid, key->uid)) { ret = -ENOMEM; newowner = key_user_lookup(uid); if (!newowner) goto error_put; /* transfer the quota burden to the new user */ if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ? key_quota_root_maxkeys : key_quota_maxkeys; unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ? key_quota_root_maxbytes : key_quota_maxbytes; spin_lock(&newowner->lock); if (newowner->qnkeys + 1 >= maxkeys || newowner->qnbytes + key->quotalen >= maxbytes || newowner->qnbytes + key->quotalen < newowner->qnbytes) goto quota_overrun; newowner->qnkeys++; newowner->qnbytes += key->quotalen; spin_unlock(&newowner->lock); spin_lock(&key->user->lock); key->user->qnkeys--; key->user->qnbytes -= key->quotalen; spin_unlock(&key->user->lock); } atomic_dec(&key->user->nkeys); atomic_inc(&newowner->nkeys); if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { atomic_dec(&key->user->nikeys); atomic_inc(&newowner->nikeys); } zapowner = key->user; key->user = newowner; key->uid = uid; } /* change the GID */ if (group != (gid_t) -1) key->gid = gid; ret = 0; error_put: up_write(&key->sem); key_put(key); if (zapowner) key_user_put(zapowner); error: return ret; quota_overrun: spin_unlock(&newowner->lock); zapowner = newowner; ret = -EDQUOT; goto error_put; } /* * Change the permission mask on a key. * * The key must grant the caller Setattr permission for this to work, though * the key need not be fully instantiated yet. If the caller does not have * sysadmin capability, it may only change the permission on keys that it owns. */ long keyctl_setperm_key(key_serial_t id, key_perm_t perm) { struct key *key; key_ref_t key_ref; long ret; ret = -EINVAL; if (perm & ~(KEY_POS_ALL | KEY_USR_ALL | KEY_GRP_ALL | KEY_OTH_ALL)) goto error; key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL, KEY_NEED_SETATTR); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error; } key = key_ref_to_ptr(key_ref); /* make the changes with the locks held to prevent chown/chmod races */ ret = -EACCES; down_write(&key->sem); /* if we're not the sysadmin, we can only change a key that we own */ if (capable(CAP_SYS_ADMIN) || uid_eq(key->uid, current_fsuid())) { key->perm = perm; ret = 0; } up_write(&key->sem); key_put(key); error: return ret; } /* * Get the destination keyring for instantiation and check that the caller has * Write permission on it. */ static long get_instantiation_keyring(key_serial_t ringid, struct request_key_auth *rka, struct key **_dest_keyring) { key_ref_t dkref; *_dest_keyring = NULL; /* just return a NULL pointer if we weren't asked to make a link */ if (ringid == 0) return 0; /* if a specific keyring is nominated by ID, then use that */ if (ringid > 0) { dkref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(dkref)) return PTR_ERR(dkref); *_dest_keyring = key_ref_to_ptr(dkref); return 0; } if (ringid == KEY_SPEC_REQKEY_AUTH_KEY) return -EINVAL; /* otherwise specify the destination keyring recorded in the * authorisation key (any KEY_SPEC_*_KEYRING) */ if (ringid >= KEY_SPEC_REQUESTOR_KEYRING) { *_dest_keyring = key_get(rka->dest_keyring); return 0; } return -ENOKEY; } /* * Change the request_key authorisation key on the current process. */ static int keyctl_change_reqkey_auth(struct key *key) { struct cred *new; new = prepare_creds(); if (!new) return -ENOMEM; key_put(new->request_key_auth); new->request_key_auth = key_get(key); return commit_creds(new); } /* * Instantiate a key with the specified payload and link the key into the * destination keyring if one is given. * * The caller must have the appropriate instantiation permit set for this to * work (see keyctl_assume_authority). No other permissions are required. * * If successful, 0 will be returned. */ long keyctl_instantiate_key_common(key_serial_t id, struct iov_iter *from, key_serial_t ringid) { const struct cred *cred = current_cred(); struct request_key_auth *rka; struct key *instkey, *dest_keyring; size_t plen = from ? iov_iter_count(from) : 0; void *payload; long ret; kenter("%d,,%zu,%d", id, plen, ringid); if (!plen) from = NULL; ret = -EINVAL; if (plen > 1024 * 1024 - 1) goto error; /* the appropriate instantiation authorisation key must have been * assumed before calling this */ ret = -EPERM; instkey = cred->request_key_auth; if (!instkey) goto error; rka = instkey->payload.data[0]; if (rka->target_key->serial != id) goto error; /* pull the payload in if one was supplied */ payload = NULL; if (from) { ret = -ENOMEM; payload = kvmalloc(plen, GFP_KERNEL); if (!payload) goto error; ret = -EFAULT; if (!copy_from_iter_full(payload, plen, from)) goto error2; } /* find the destination keyring amongst those belonging to the * requesting task */ ret = get_instantiation_keyring(ringid, rka, &dest_keyring); if (ret < 0) goto error2; /* instantiate the key and link it into a keyring */ ret = key_instantiate_and_link(rka->target_key, payload, plen, dest_keyring, instkey); key_put(dest_keyring); /* discard the assumed authority if it's just been disabled by * instantiation of the key */ if (ret == 0) keyctl_change_reqkey_auth(NULL); error2: if (payload) { memzero_explicit(payload, plen); kvfree(payload); } error: return ret; } /* * Instantiate a key with the specified payload and link the key into the * destination keyring if one is given. * * The caller must have the appropriate instantiation permit set for this to * work (see keyctl_assume_authority). No other permissions are required. * * If successful, 0 will be returned. */ long keyctl_instantiate_key(key_serial_t id, const void __user *_payload, size_t plen, key_serial_t ringid) { if (_payload && plen) { struct iovec iov; struct iov_iter from; int ret; ret = import_single_range(WRITE, (void __user *)_payload, plen, &iov, &from); if (unlikely(ret)) return ret; return keyctl_instantiate_key_common(id, &from, ringid); } return keyctl_instantiate_key_common(id, NULL, ringid); } /* * Instantiate a key with the specified multipart payload and link the key into * the destination keyring if one is given. * * The caller must have the appropriate instantiation permit set for this to * work (see keyctl_assume_authority). No other permissions are required. * * If successful, 0 will be returned. */ long keyctl_instantiate_key_iov(key_serial_t id, const struct iovec __user *_payload_iov, unsigned ioc, key_serial_t ringid) { struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; struct iov_iter from; long ret; if (!_payload_iov) ioc = 0; ret = import_iovec(WRITE, _payload_iov, ioc, ARRAY_SIZE(iovstack), &iov, &from); if (ret < 0) return ret; ret = keyctl_instantiate_key_common(id, &from, ringid); kfree(iov); return ret; } /* * Negatively instantiate the key with the given timeout (in seconds) and link * the key into the destination keyring if one is given. * * The caller must have the appropriate instantiation permit set for this to * work (see keyctl_assume_authority). No other permissions are required. * * The key and any links to the key will be automatically garbage collected * after the timeout expires. * * Negative keys are used to rate limit repeated request_key() calls by causing * them to return -ENOKEY until the negative key expires. * * If successful, 0 will be returned. */ long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid) { return keyctl_reject_key(id, timeout, ENOKEY, ringid); } /* * Negatively instantiate the key with the given timeout (in seconds) and error * code and link the key into the destination keyring if one is given. * * The caller must have the appropriate instantiation permit set for this to * work (see keyctl_assume_authority). No other permissions are required. * * The key and any links to the key will be automatically garbage collected * after the timeout expires. * * Negative keys are used to rate limit repeated request_key() calls by causing * them to return the specified error code until the negative key expires. * * If successful, 0 will be returned. */ long keyctl_reject_key(key_serial_t id, unsigned timeout, unsigned error, key_serial_t ringid) { const struct cred *cred = current_cred(); struct request_key_auth *rka; struct key *instkey, *dest_keyring; long ret; kenter("%d,%u,%u,%d", id, timeout, error, ringid); /* must be a valid error code and mustn't be a kernel special */ if (error <= 0 || error >= MAX_ERRNO || error == ERESTARTSYS || error == ERESTARTNOINTR || error == ERESTARTNOHAND || error == ERESTART_RESTARTBLOCK) return -EINVAL; /* the appropriate instantiation authorisation key must have been * assumed before calling this */ ret = -EPERM; instkey = cred->request_key_auth; if (!instkey) goto error; rka = instkey->payload.data[0]; if (rka->target_key->serial != id) goto error; /* find the destination keyring if present (which must also be * writable) */ ret = get_instantiation_keyring(ringid, rka, &dest_keyring); if (ret < 0) goto error; /* instantiate the key and link it into a keyring */ ret = key_reject_and_link(rka->target_key, timeout, error, dest_keyring, instkey); key_put(dest_keyring); /* discard the assumed authority if it's just been disabled by * instantiation of the key */ if (ret == 0) keyctl_change_reqkey_auth(NULL); error: return ret; } /* * Read or set the default keyring in which request_key() will cache keys and * return the old setting. * * If a thread or process keyring is specified then it will be created if it * doesn't yet exist. The old setting will be returned if successful. */ long keyctl_set_reqkey_keyring(int reqkey_defl) { struct cred *new; int ret, old_setting; old_setting = current_cred_xxx(jit_keyring); if (reqkey_defl == KEY_REQKEY_DEFL_NO_CHANGE) return old_setting; new = prepare_creds(); if (!new) return -ENOMEM; switch (reqkey_defl) { case KEY_REQKEY_DEFL_THREAD_KEYRING: ret = install_thread_keyring_to_cred(new); if (ret < 0) goto error; goto set; case KEY_REQKEY_DEFL_PROCESS_KEYRING: ret = install_process_keyring_to_cred(new); if (ret < 0) goto error; goto set; case KEY_REQKEY_DEFL_DEFAULT: case KEY_REQKEY_DEFL_SESSION_KEYRING: case KEY_REQKEY_DEFL_USER_KEYRING: case KEY_REQKEY_DEFL_USER_SESSION_KEYRING: case KEY_REQKEY_DEFL_REQUESTOR_KEYRING: goto set; case KEY_REQKEY_DEFL_NO_CHANGE: case KEY_REQKEY_DEFL_GROUP_KEYRING: default: ret = -EINVAL; goto error; } set: new->jit_keyring = reqkey_defl; commit_creds(new); return old_setting; error: abort_creds(new); return ret; } /* * Set or clear the timeout on a key. * * Either the key must grant the caller Setattr permission or else the caller * must hold an instantiation authorisation token for the key. * * The timeout is either 0 to clear the timeout, or a number of seconds from * the current time. The key and any links to the key will be automatically * garbage collected after the timeout expires. * * Keys with KEY_FLAG_KEEP set should not be timed out. * * If successful, 0 is returned. */ long keyctl_set_timeout(key_serial_t id, unsigned timeout) { struct key *key, *instkey; key_ref_t key_ref; long ret; key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL, KEY_NEED_SETATTR); if (IS_ERR(key_ref)) { /* setting the timeout on a key under construction is permitted * if we have the authorisation token handy */ if (PTR_ERR(key_ref) == -EACCES) { instkey = key_get_instantiation_authkey(id); if (!IS_ERR(instkey)) { key_put(instkey); key_ref = lookup_user_key(id, KEY_LOOKUP_PARTIAL, 0); if (!IS_ERR(key_ref)) goto okay; } } ret = PTR_ERR(key_ref); goto error; } okay: key = key_ref_to_ptr(key_ref); ret = 0; if (test_bit(KEY_FLAG_KEEP, &key->flags)) ret = -EPERM; else key_set_timeout(key, timeout); key_put(key); error: return ret; } /* * Assume (or clear) the authority to instantiate the specified key. * * This sets the authoritative token currently in force for key instantiation. * This must be done for a key to be instantiated. It has the effect of making * available all the keys from the caller of the request_key() that created a * key to request_key() calls made by the caller of this function. * * The caller must have the instantiation key in their process keyrings with a * Search permission grant available to the caller. * * If the ID given is 0, then the setting will be cleared and 0 returned. * * If the ID given has a matching an authorisation key, then that key will be * set and its ID will be returned. The authorisation key can be read to get * the callout information passed to request_key(). */ long keyctl_assume_authority(key_serial_t id) { struct key *authkey; long ret; /* special key IDs aren't permitted */ ret = -EINVAL; if (id < 0) goto error; /* we divest ourselves of authority if given an ID of 0 */ if (id == 0) { ret = keyctl_change_reqkey_auth(NULL); goto error; } /* attempt to assume the authority temporarily granted to us whilst we * instantiate the specified key * - the authorisation key must be in the current task's keyrings * somewhere */ authkey = key_get_instantiation_authkey(id); if (IS_ERR(authkey)) { ret = PTR_ERR(authkey); goto error; } ret = keyctl_change_reqkey_auth(authkey); if (ret == 0) ret = authkey->serial; key_put(authkey); error: return ret; } /* * Get a key's the LSM security label. * * The key must grant the caller View permission for this to work. * * If there's a buffer, then up to buflen bytes of data will be placed into it. * * If successful, the amount of information available will be returned, * irrespective of how much was copied (including the terminal NUL). */ long keyctl_get_security(key_serial_t keyid, char __user *buffer, size_t buflen) { struct key *key, *instkey; key_ref_t key_ref; char *context; long ret; key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW); if (IS_ERR(key_ref)) { if (PTR_ERR(key_ref) != -EACCES) return PTR_ERR(key_ref); /* viewing a key under construction is also permitted if we * have the authorisation token handy */ instkey = key_get_instantiation_authkey(keyid); if (IS_ERR(instkey)) return PTR_ERR(instkey); key_put(instkey); key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, 0); if (IS_ERR(key_ref)) return PTR_ERR(key_ref); } key = key_ref_to_ptr(key_ref); ret = security_key_getsecurity(key, &context); if (ret == 0) { /* if no information was returned, give userspace an empty * string */ ret = 1; if (buffer && buflen > 0 && copy_to_user(buffer, "", 1) != 0) ret = -EFAULT; } else if (ret > 0) { /* return as much data as there's room for */ if (buffer && buflen > 0) { if (buflen > ret) buflen = ret; if (copy_to_user(buffer, context, buflen) != 0) ret = -EFAULT; } kfree(context); } key_ref_put(key_ref); return ret; } /* * Attempt to install the calling process's session keyring on the process's * parent process. * * The keyring must exist and must grant the caller LINK permission, and the * parent process must be single-threaded and must have the same effective * ownership as this process and mustn't be SUID/SGID. * * The keyring will be emplaced on the parent when it next resumes userspace. * * If successful, 0 will be returned. */ long keyctl_session_to_parent(void) { struct task_struct *me, *parent; const struct cred *mycred, *pcred; struct callback_head *newwork, *oldwork; key_ref_t keyring_r; struct cred *cred; int ret; keyring_r = lookup_user_key(KEY_SPEC_SESSION_KEYRING, 0, KEY_NEED_LINK); if (IS_ERR(keyring_r)) return PTR_ERR(keyring_r); ret = -ENOMEM; /* our parent is going to need a new cred struct, a new tgcred struct * and new security data, so we allocate them here to prevent ENOMEM in * our parent */ cred = cred_alloc_blank(); if (!cred) goto error_keyring; newwork = &cred->rcu; cred->session_keyring = key_ref_to_ptr(keyring_r); keyring_r = NULL; init_task_work(newwork, key_change_session_keyring); me = current; rcu_read_lock(); write_lock_irq(&tasklist_lock); ret = -EPERM; oldwork = NULL; parent = me->real_parent; /* the parent mustn't be init and mustn't be a kernel thread */ if (parent->pid <= 1 || !parent->mm) goto unlock; /* the parent must be single threaded */ if (!thread_group_empty(parent)) goto unlock; /* the parent and the child must have different session keyrings or * there's no point */ mycred = current_cred(); pcred = __task_cred(parent); if (mycred == pcred || mycred->session_keyring == pcred->session_keyring) { ret = 0; goto unlock; } /* the parent must have the same effective ownership and mustn't be * SUID/SGID */ if (!uid_eq(pcred->uid, mycred->euid) || !uid_eq(pcred->euid, mycred->euid) || !uid_eq(pcred->suid, mycred->euid) || !gid_eq(pcred->gid, mycred->egid) || !gid_eq(pcred->egid, mycred->egid) || !gid_eq(pcred->sgid, mycred->egid)) goto unlock; /* the keyrings must have the same UID */ if ((pcred->session_keyring && !uid_eq(pcred->session_keyring->uid, mycred->euid)) || !uid_eq(mycred->session_keyring->uid, mycred->euid)) goto unlock; /* cancel an already pending keyring replacement */ oldwork = task_work_cancel(parent, key_change_session_keyring); /* the replacement session keyring is applied just prior to userspace * restarting */ ret = task_work_add(parent, newwork, true); if (!ret) newwork = NULL; unlock: write_unlock_irq(&tasklist_lock); rcu_read_unlock(); if (oldwork) put_cred(container_of(oldwork, struct cred, rcu)); if (newwork) put_cred(cred); return ret; error_keyring: key_ref_put(keyring_r); return ret; } /* * Apply a restriction to a given keyring. * * The caller must have Setattr permission to change keyring restrictions. * * The requested type name may be a NULL pointer to reject all attempts * to link to the keyring. If _type is non-NULL, _restriction can be * NULL or a pointer to a string describing the restriction. If _type is * NULL, _restriction must also be NULL. * * Returns 0 if successful. */ long keyctl_restrict_keyring(key_serial_t id, const char __user *_type, const char __user *_restriction) { key_ref_t key_ref; bool link_reject = !_type; char type[32]; char *restriction = NULL; long ret; key_ref = lookup_user_key(id, 0, KEY_NEED_SETATTR); if (IS_ERR(key_ref)) return PTR_ERR(key_ref); if (_type) { ret = key_get_type_from_user(type, _type, sizeof(type)); if (ret < 0) goto error; } if (_restriction) { if (!_type) { ret = -EINVAL; goto error; } restriction = strndup_user(_restriction, PAGE_SIZE); if (IS_ERR(restriction)) { ret = PTR_ERR(restriction); goto error; } } ret = keyring_restrict(key_ref, link_reject ? NULL : type, restriction); kfree(restriction); error: key_ref_put(key_ref); return ret; } /* * The key control system call */ SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3, unsigned long, arg4, unsigned long, arg5) { switch (option) { case KEYCTL_GET_KEYRING_ID: return keyctl_get_keyring_ID((key_serial_t) arg2, (int) arg3); case KEYCTL_JOIN_SESSION_KEYRING: return keyctl_join_session_keyring((const char __user *) arg2); case KEYCTL_UPDATE: return keyctl_update_key((key_serial_t) arg2, (const void __user *) arg3, (size_t) arg4); case KEYCTL_REVOKE: return keyctl_revoke_key((key_serial_t) arg2); case KEYCTL_DESCRIBE: return keyctl_describe_key((key_serial_t) arg2, (char __user *) arg3, (unsigned) arg4); case KEYCTL_CLEAR: return keyctl_keyring_clear((key_serial_t) arg2); case KEYCTL_LINK: return keyctl_keyring_link((key_serial_t) arg2, (key_serial_t) arg3); case KEYCTL_UNLINK: return keyctl_keyring_unlink((key_serial_t) arg2, (key_serial_t) arg3); case KEYCTL_SEARCH: return keyctl_keyring_search((key_serial_t) arg2, (const char __user *) arg3, (const char __user *) arg4, (key_serial_t) arg5); case KEYCTL_READ: return keyctl_read_key((key_serial_t) arg2, (char __user *) arg3, (size_t) arg4); case KEYCTL_CHOWN: return keyctl_chown_key((key_serial_t) arg2, (uid_t) arg3, (gid_t) arg4); case KEYCTL_SETPERM: return keyctl_setperm_key((key_serial_t) arg2, (key_perm_t) arg3); case KEYCTL_INSTANTIATE: return keyctl_instantiate_key((key_serial_t) arg2, (const void __user *) arg3, (size_t) arg4, (key_serial_t) arg5); case KEYCTL_NEGATE: return keyctl_negate_key((key_serial_t) arg2, (unsigned) arg3, (key_serial_t) arg4); case KEYCTL_SET_REQKEY_KEYRING: return keyctl_set_reqkey_keyring(arg2); case KEYCTL_SET_TIMEOUT: return keyctl_set_timeout((key_serial_t) arg2, (unsigned) arg3); case KEYCTL_ASSUME_AUTHORITY: return keyctl_assume_authority((key_serial_t) arg2); case KEYCTL_GET_SECURITY: return keyctl_get_security((key_serial_t) arg2, (char __user *) arg3, (size_t) arg4); case KEYCTL_SESSION_TO_PARENT: return keyctl_session_to_parent(); case KEYCTL_REJECT: return keyctl_reject_key((key_serial_t) arg2, (unsigned) arg3, (unsigned) arg4, (key_serial_t) arg5); case KEYCTL_INSTANTIATE_IOV: return keyctl_instantiate_key_iov( (key_serial_t) arg2, (const struct iovec __user *) arg3, (unsigned) arg4, (key_serial_t) arg5); case KEYCTL_INVALIDATE: return keyctl_invalidate_key((key_serial_t) arg2); case KEYCTL_GET_PERSISTENT: return keyctl_get_persistent((uid_t)arg2, (key_serial_t)arg3); case KEYCTL_DH_COMPUTE: return keyctl_dh_compute((struct keyctl_dh_params __user *) arg2, (char __user *) arg3, (size_t) arg4, (struct keyctl_kdf_params __user *) arg5); case KEYCTL_RESTRICT_KEYRING: return keyctl_restrict_keyring((key_serial_t) arg2, (const char __user *) arg3, (const char __user *) arg4); default: return -EOPNOTSUPP; } }
./CrossVul/dataset_final_sorted/CWE-20/c/bad_2891_6
crossvul-cpp_data_good_1837_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % H H DDDD RRRR % % H H D D R R % % HHHHH D D RRRR % % H H D D R R % % H H DDDD R R % % % % % % Read/Write Radiance RGBE Image Format % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2015 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum-private.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/module.h" /* Forward declarations. */ static MagickBooleanType WriteHDRImage(const ImageInfo *,Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s H D R % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsHDR() returns MagickTrue if the image format type, identified by the % magick string, is Radiance RGBE image format. % % The format of the IsHDR method is: % % MagickBooleanType IsHDR(const unsigned char *magick, % const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsHDR(const unsigned char *magick, const size_t length) { if (length < 10) return(MagickFalse); if (LocaleNCompare((const char *) magick,"#?RADIANCE",10) == 0) return(MagickTrue); if (LocaleNCompare((const char *) magick,"#?RGBE",6) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d H D R I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadHDRImage() reads the Radiance RGBE image format and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadHDRImage method is: % % Image *ReadHDRImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadHDRImage(const ImageInfo *image_info,ExceptionInfo *exception) { char format[MaxTextExtent], keyword[MaxTextExtent], tag[MaxTextExtent], value[MaxTextExtent]; double gamma; Image *image; int c; MagickBooleanType status, value_expected; register Quantum *q; register ssize_t i, x; register unsigned char *p; ssize_t count, y; unsigned char *end, pixel[4], *pixels; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Decode image header. */ image->columns=0; image->rows=0; *format='\0'; c=ReadBlobByte(image); if (c == EOF) { image=DestroyImage(image); return((Image *) NULL); } while (isgraph(c) && (image->columns == 0) && (image->rows == 0)) { if (c == (int) '#') { char *comment; register char *p; size_t length; /* Read comment-- any text between # and end-of-line. */ length=MaxTextExtent; comment=AcquireString((char *) NULL); for (p=comment; comment != (char *) NULL; p++) { c=ReadBlobByte(image); if ((c == EOF) || (c == (int) '\n')) break; if ((size_t) (p-comment+1) >= length) { *p='\0'; length<<=1; comment=(char *) ResizeQuantumMemory(comment,length+ MaxTextExtent,sizeof(*comment)); if (comment == (char *) NULL) break; p=comment+strlen(comment); } *p=(char) c; } if (comment == (char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); *p='\0'; (void) SetImageProperty(image,"comment",comment,exception); comment=DestroyString(comment); c=ReadBlobByte(image); } else if (isalnum(c) == MagickFalse) c=ReadBlobByte(image); else { register char *p; /* Determine a keyword and its value. */ p=keyword; do { if ((size_t) (p-keyword) < (MaxTextExtent-1)) *p++=c; c=ReadBlobByte(image); } while (isalnum(c) || (c == '_')); *p='\0'; value_expected=MagickFalse; while ((isspace((int) ((unsigned char) c)) != 0) || (c == '=')) { if (c == '=') value_expected=MagickTrue; c=ReadBlobByte(image); } if (LocaleCompare(keyword,"Y") == 0) value_expected=MagickTrue; if (value_expected == MagickFalse) continue; p=value; while ((c != '\n') && (c != '\0') && (c != EOF)) { if ((size_t) (p-value) < (MaxTextExtent-1)) *p++=c; c=ReadBlobByte(image); } *p='\0'; /* Assign a value to the specified keyword. */ switch (*keyword) { case 'F': case 'f': { if (LocaleCompare(keyword,"format") == 0) { (void) CopyMagickString(format,value,MaxTextExtent); break; } (void) FormatLocaleString(tag,MaxTextExtent,"hdr:%s",keyword); (void) SetImageProperty(image,tag,value,exception); break; } case 'G': case 'g': { if (LocaleCompare(keyword,"gamma") == 0) { image->gamma=StringToDouble(value,(char **) NULL); break; } (void) FormatLocaleString(tag,MaxTextExtent,"hdr:%s",keyword); (void) SetImageProperty(image,tag,value,exception); break; } case 'P': case 'p': { if (LocaleCompare(keyword,"primaries") == 0) { float chromaticity[6], white_point[2]; if (sscanf(value,"%g %g %g %g %g %g %g %g",&chromaticity[0], &chromaticity[1],&chromaticity[2],&chromaticity[3], &chromaticity[4],&chromaticity[5],&white_point[0], &white_point[1]) == 8) { image->chromaticity.red_primary.x=chromaticity[0]; image->chromaticity.red_primary.y=chromaticity[1]; image->chromaticity.green_primary.x=chromaticity[2]; image->chromaticity.green_primary.y=chromaticity[3]; image->chromaticity.blue_primary.x=chromaticity[4]; image->chromaticity.blue_primary.y=chromaticity[5]; image->chromaticity.white_point.x=white_point[0], image->chromaticity.white_point.y=white_point[1]; } break; } (void) FormatLocaleString(tag,MaxTextExtent,"hdr:%s",keyword); (void) SetImageProperty(image,tag,value,exception); break; } case 'Y': case 'y': { char target[] = "Y"; if (strcmp(keyword,target) == 0) { int height, width; if (sscanf(value,"%d +X %d",&height,&width) == 2) { image->columns=(size_t) width; image->rows=(size_t) height; } break; } (void) FormatLocaleString(tag,MaxTextExtent,"hdr:%s",keyword); (void) SetImageProperty(image,tag,value,exception); break; } default: { (void) FormatLocaleString(tag,MaxTextExtent,"hdr:%s",keyword); (void) SetImageProperty(image,tag,value,exception); break; } } } if ((image->columns == 0) && (image->rows == 0)) while (isspace((int) ((unsigned char) c)) != 0) c=ReadBlobByte(image); } if ((LocaleCompare(format,"32-bit_rle_rgbe") != 0) && (LocaleCompare(format,"32-bit_rle_xyze") != 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if ((image->columns == 0) || (image->rows == 0)) ThrowReaderException(CorruptImageError,"NegativeOrZeroImageSize"); (void) SetImageColorspace(image,RGBColorspace,exception); if (LocaleCompare(format,"32-bit_rle_xyze") == 0) (void) SetImageColorspace(image,XYZColorspace,exception); image->compression=(image->columns < 8) || (image->columns > 0x7ffff) ? NoCompression : RLECompression; if (image_info->ping != MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); /* Read RGBE (red+green+blue+exponent) pixels. */ pixels=(unsigned char *) AcquireQuantumMemory(image->columns,4* sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (y=0; y < (ssize_t) image->rows; y++) { if (image->compression != RLECompression) { count=ReadBlob(image,4*image->columns*sizeof(*pixels),pixels); if (count != (ssize_t) (4*image->columns*sizeof(*pixels))) break; } else { count=ReadBlob(image,4*sizeof(*pixel),pixel); if (count != 4) break; if ((size_t) ((((size_t) pixel[2]) << 8) | pixel[3]) != image->columns) { (void) memcpy(pixels,pixel,4*sizeof(*pixel)); count=ReadBlob(image,4*(image->columns-1)*sizeof(*pixels),pixels+4); image->compression=NoCompression; } else { p=pixels; for (i=0; i < 4; i++) { end=&pixels[(i+1)*image->columns]; while (p < end) { count=ReadBlob(image,2*sizeof(*pixel),pixel); if (count < 1) break; if (pixel[0] > 128) { count=(ssize_t) pixel[0]-128; if ((count == 0) || (count > (ssize_t) (end-p))) break; while (count-- > 0) *p++=pixel[1]; } else { count=(ssize_t) pixel[0]; if ((count == 0) || (count > (ssize_t) (end-p))) break; *p++=pixel[1]; if (--count > 0) { count=ReadBlob(image,(size_t) count*sizeof(*p),p); if (count < 1) break; p+=count; } } } } } } q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; i=0; for (x=0; x < (ssize_t) image->columns; x++) { if (image->compression == RLECompression) { pixel[0]=pixels[x]; pixel[1]=pixels[x+image->columns]; pixel[2]=pixels[x+2*image->columns]; pixel[3]=pixels[x+3*image->columns]; } else { pixel[0]=pixels[i++]; pixel[1]=pixels[i++]; pixel[2]=pixels[i++]; pixel[3]=pixels[i++]; } SetPixelRed(image,0,q); SetPixelGreen(image,0,q); SetPixelBlue(image,0,q); if (pixel[3] != 0) { gamma=pow(2.0,pixel[3]-(128.0+8.0)); SetPixelRed(image,ClampToQuantum(QuantumRange*gamma*pixel[0]),q); SetPixelGreen(image,ClampToQuantum(QuantumRange*gamma*pixel[1]),q); SetPixelBlue(image,ClampToQuantum(QuantumRange*gamma*pixel[2]),q); } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } pixels=(unsigned char *) RelinquishMagickMemory(pixels); if (EOFBlob(image) != MagickFalse) ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r H D R I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterHDRImage() adds attributes for the Radiance RGBE image format to the % list of supported formats. The attributes include the image format tag, a % method to read and/or write the format, whether the format supports the % saving of more than one frame to the same file or blob, whether the format % supports native in-memory I/O, and a brief description of the format. % % The format of the RegisterHDRImage method is: % % size_t RegisterHDRImage(void) % */ ModuleExport size_t RegisterHDRImage(void) { MagickInfo *entry; entry=SetMagickInfo("HDR"); entry->decoder=(DecodeImageHandler *) ReadHDRImage; entry->encoder=(EncodeImageHandler *) WriteHDRImage; entry->description=ConstantString("Radiance RGBE image format"); entry->module=ConstantString("HDR"); entry->magick=(IsImageFormatHandler *) IsHDR; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r H D R I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterHDRImage() removes format registrations made by the % HDR module from the list of supported formats. % % The format of the UnregisterHDRImage method is: % % UnregisterHDRImage(void) % */ ModuleExport void UnregisterHDRImage(void) { (void) UnregisterMagickInfo("HDR"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e H D R I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteHDRImage() writes an image in the Radience RGBE image format. % % The format of the WriteHDRImage method is: % % MagickBooleanType WriteHDRImage(const ImageInfo *image_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % */ static size_t HDRWriteRunlengthPixels(Image *image,unsigned char *pixels) { #define MinimumRunlength 4 register size_t p, q; size_t runlength; ssize_t count, previous_count; unsigned char pixel[2]; for (p=0; p < image->columns; ) { q=p; runlength=0; previous_count=0; while ((runlength < MinimumRunlength) && (q < image->columns)) { q+=runlength; previous_count=(ssize_t) runlength; runlength=1; while ((pixels[q] == pixels[q+runlength]) && ((q+runlength) < image->columns) && (runlength < 127)) runlength++; } if ((previous_count > 1) && (previous_count == (ssize_t) (q-p))) { pixel[0]=(unsigned char) (128+previous_count); pixel[1]=pixels[p]; if (WriteBlob(image,2*sizeof(*pixel),pixel) < 1) break; p=q; } while (p < q) { count=(ssize_t) (q-p); if (count > 128) count=128; pixel[0]=(unsigned char) count; if (WriteBlob(image,sizeof(*pixel),pixel) < 1) break; if (WriteBlob(image,(size_t) count*sizeof(*pixel),&pixels[p]) < 1) break; p+=count; } if (runlength >= MinimumRunlength) { pixel[0]=(unsigned char) (128+runlength); pixel[1]=pixels[q]; if (WriteBlob(image,2*sizeof(*pixel),pixel) < 1) break; p+=runlength; } } return(p); } static MagickBooleanType WriteHDRImage(const ImageInfo *image_info,Image *image, ExceptionInfo *exception) { char header[MaxTextExtent]; const char *property; MagickBooleanType status; register const Quantum *p; register ssize_t i, x; size_t length; ssize_t count, y; unsigned char pixel[4], *pixels; /* Open output image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); if (IsRGBColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace(image,RGBColorspace,exception); /* Write header. */ (void) ResetMagickMemory(header,' ',MaxTextExtent); length=CopyMagickString(header,"#?RGBE\n",MaxTextExtent); (void) WriteBlob(image,length,(unsigned char *) header); property=GetImageProperty(image,"comment",exception); if ((property != (const char *) NULL) && (strchr(property,'\n') == (char *) NULL)) { count=FormatLocaleString(header,MaxTextExtent,"#%s\n",property); (void) WriteBlob(image,(size_t) count,(unsigned char *) header); } property=GetImageProperty(image,"hdr:exposure",exception); if (property != (const char *) NULL) { count=FormatLocaleString(header,MaxTextExtent,"EXPOSURE=%g\n", strtod(property,(char **) NULL)); (void) WriteBlob(image,(size_t) count,(unsigned char *) header); } if (image->gamma != 0.0) { count=FormatLocaleString(header,MaxTextExtent,"GAMMA=%g\n",image->gamma); (void) WriteBlob(image,(size_t) count,(unsigned char *) header); } count=FormatLocaleString(header,MaxTextExtent, "PRIMARIES=%g %g %g %g %g %g %g %g\n", image->chromaticity.red_primary.x,image->chromaticity.red_primary.y, image->chromaticity.green_primary.x,image->chromaticity.green_primary.y, image->chromaticity.blue_primary.x,image->chromaticity.blue_primary.y, image->chromaticity.white_point.x,image->chromaticity.white_point.y); (void) WriteBlob(image,(size_t) count,(unsigned char *) header); length=CopyMagickString(header,"FORMAT=32-bit_rle_rgbe\n\n",MaxTextExtent); (void) WriteBlob(image,length,(unsigned char *) header); count=FormatLocaleString(header,MaxTextExtent,"-Y %.20g +X %.20g\n", (double) image->rows,(double) image->columns); (void) WriteBlob(image,(size_t) count,(unsigned char *) header); /* Write HDR pixels. */ pixels=(unsigned char *) AcquireQuantumMemory(image->columns,4* sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; if ((image->columns >= 8) && (image->columns <= 0x7ffff)) { pixel[0]=2; pixel[1]=2; pixel[2]=(unsigned char) (image->columns >> 8); pixel[3]=(unsigned char) (image->columns & 0xff); count=WriteBlob(image,4*sizeof(*pixel),pixel); if (count != (ssize_t) (4*sizeof(*pixel))) break; } i=0; for (x=0; x < (ssize_t) image->columns; x++) { double gamma; pixel[0]=0; pixel[1]=0; pixel[2]=0; pixel[3]=0; gamma=QuantumScale*GetPixelRed(image,p); if ((QuantumScale*GetPixelGreen(image,p)) > gamma) gamma=QuantumScale*GetPixelGreen(image,p); if ((QuantumScale*GetPixelBlue(image,p)) > gamma) gamma=QuantumScale*GetPixelBlue(image,p); if (gamma > MagickEpsilon) { int exponent; gamma=frexp(gamma,&exponent)*256.0/gamma; pixel[0]=(unsigned char) (gamma*QuantumScale*GetPixelRed(image,p)); pixel[1]=(unsigned char) (gamma*QuantumScale*GetPixelGreen(image,p)); pixel[2]=(unsigned char) (gamma*QuantumScale*GetPixelBlue(image,p)); pixel[3]=(unsigned char) (exponent+128); } if ((image->columns >= 8) && (image->columns <= 0x7ffff)) { pixels[x]=pixel[0]; pixels[x+image->columns]=pixel[1]; pixels[x+2*image->columns]=pixel[2]; pixels[x+3*image->columns]=pixel[3]; } else { pixels[i++]=pixel[0]; pixels[i++]=pixel[1]; pixels[i++]=pixel[2]; pixels[i++]=pixel[3]; } p+=GetPixelChannels(image); } if ((image->columns >= 8) && (image->columns <= 0x7ffff)) { for (i=0; i < 4; i++) length=HDRWriteRunlengthPixels(image,&pixels[i*image->columns]); } else { count=WriteBlob(image,4*image->columns*sizeof(*pixels),pixels); if (count != (ssize_t) (4*image->columns*sizeof(*pixels))) break; } status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } pixels=(unsigned char *) RelinquishMagickMemory(pixels); (void) CloseBlob(image); return(MagickTrue); }
./CrossVul/dataset_final_sorted/CWE-20/c/good_1837_0
crossvul-cpp_data_bad_5845_24
/* * Copyright (C) 2011 Instituto Nokia de Tecnologia * * Authors: * Aloisio Almeida Jr <aloisio.almeida@openbossa.org> * Lauro Ramos Venancio <lauro.venancio@openbossa.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ #include <net/tcp_states.h> #include <linux/nfc.h> #include <linux/export.h> #include "nfc.h" static void rawsock_write_queue_purge(struct sock *sk) { pr_debug("sk=%p\n", sk); spin_lock_bh(&sk->sk_write_queue.lock); __skb_queue_purge(&sk->sk_write_queue); nfc_rawsock(sk)->tx_work_scheduled = false; spin_unlock_bh(&sk->sk_write_queue.lock); } static void rawsock_report_error(struct sock *sk, int err) { pr_debug("sk=%p err=%d\n", sk, err); sk->sk_shutdown = SHUTDOWN_MASK; sk->sk_err = -err; sk->sk_error_report(sk); rawsock_write_queue_purge(sk); } static int rawsock_release(struct socket *sock) { struct sock *sk = sock->sk; pr_debug("sock=%p sk=%p\n", sock, sk); if (!sk) return 0; sock_orphan(sk); sock_put(sk); return 0; } static int rawsock_connect(struct socket *sock, struct sockaddr *_addr, int len, int flags) { struct sock *sk = sock->sk; struct sockaddr_nfc *addr = (struct sockaddr_nfc *)_addr; struct nfc_dev *dev; int rc = 0; pr_debug("sock=%p sk=%p flags=%d\n", sock, sk, flags); if (!addr || len < sizeof(struct sockaddr_nfc) || addr->sa_family != AF_NFC) return -EINVAL; pr_debug("addr dev_idx=%u target_idx=%u protocol=%u\n", addr->dev_idx, addr->target_idx, addr->nfc_protocol); lock_sock(sk); if (sock->state == SS_CONNECTED) { rc = -EISCONN; goto error; } dev = nfc_get_device(addr->dev_idx); if (!dev) { rc = -ENODEV; goto error; } if (addr->target_idx > dev->target_next_idx - 1 || addr->target_idx < dev->target_next_idx - dev->n_targets) { rc = -EINVAL; goto error; } rc = nfc_activate_target(dev, addr->target_idx, addr->nfc_protocol); if (rc) goto put_dev; nfc_rawsock(sk)->dev = dev; nfc_rawsock(sk)->target_idx = addr->target_idx; sock->state = SS_CONNECTED; sk->sk_state = TCP_ESTABLISHED; sk->sk_state_change(sk); release_sock(sk); return 0; put_dev: nfc_put_device(dev); error: release_sock(sk); return rc; } static int rawsock_add_header(struct sk_buff *skb) { *skb_push(skb, NFC_HEADER_SIZE) = 0; return 0; } static void rawsock_data_exchange_complete(void *context, struct sk_buff *skb, int err) { struct sock *sk = (struct sock *) context; BUG_ON(in_irq()); pr_debug("sk=%p err=%d\n", sk, err); if (err) goto error; err = rawsock_add_header(skb); if (err) goto error_skb; err = sock_queue_rcv_skb(sk, skb); if (err) goto error_skb; spin_lock_bh(&sk->sk_write_queue.lock); if (!skb_queue_empty(&sk->sk_write_queue)) schedule_work(&nfc_rawsock(sk)->tx_work); else nfc_rawsock(sk)->tx_work_scheduled = false; spin_unlock_bh(&sk->sk_write_queue.lock); sock_put(sk); return; error_skb: kfree_skb(skb); error: rawsock_report_error(sk, err); sock_put(sk); } static void rawsock_tx_work(struct work_struct *work) { struct sock *sk = to_rawsock_sk(work); struct nfc_dev *dev = nfc_rawsock(sk)->dev; u32 target_idx = nfc_rawsock(sk)->target_idx; struct sk_buff *skb; int rc; pr_debug("sk=%p target_idx=%u\n", sk, target_idx); if (sk->sk_shutdown & SEND_SHUTDOWN) { rawsock_write_queue_purge(sk); return; } skb = skb_dequeue(&sk->sk_write_queue); sock_hold(sk); rc = nfc_data_exchange(dev, target_idx, skb, rawsock_data_exchange_complete, sk); if (rc) { rawsock_report_error(sk, rc); sock_put(sk); } } static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct nfc_dev *dev = nfc_rawsock(sk)->dev; struct sk_buff *skb; int rc; pr_debug("sock=%p sk=%p len=%zu\n", sock, sk, len); if (msg->msg_namelen) return -EOPNOTSUPP; if (sock->state != SS_CONNECTED) return -ENOTCONN; skb = nfc_alloc_send_skb(dev, sk, msg->msg_flags, len, &rc); if (skb == NULL) return rc; rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); if (rc < 0) { kfree_skb(skb); return rc; } spin_lock_bh(&sk->sk_write_queue.lock); __skb_queue_tail(&sk->sk_write_queue, skb); if (!nfc_rawsock(sk)->tx_work_scheduled) { schedule_work(&nfc_rawsock(sk)->tx_work); nfc_rawsock(sk)->tx_work_scheduled = true; } spin_unlock_bh(&sk->sk_write_queue.lock); return len; } static int rawsock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; struct sk_buff *skb; int copied; int rc; pr_debug("sock=%p sk=%p len=%zu flags=%d\n", sock, sk, len, flags); skb = skb_recv_datagram(sk, flags, noblock, &rc); if (!skb) return rc; msg->msg_namelen = 0; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); skb_free_datagram(sk, skb); return rc ? : copied; } static const struct proto_ops rawsock_ops = { .family = PF_NFC, .owner = THIS_MODULE, .release = rawsock_release, .bind = sock_no_bind, .connect = rawsock_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, .poll = datagram_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = sock_no_setsockopt, .getsockopt = sock_no_getsockopt, .sendmsg = rawsock_sendmsg, .recvmsg = rawsock_recvmsg, .mmap = sock_no_mmap, }; static void rawsock_destruct(struct sock *sk) { pr_debug("sk=%p\n", sk); if (sk->sk_state == TCP_ESTABLISHED) { nfc_deactivate_target(nfc_rawsock(sk)->dev, nfc_rawsock(sk)->target_idx); nfc_put_device(nfc_rawsock(sk)->dev); } skb_queue_purge(&sk->sk_receive_queue); if (!sock_flag(sk, SOCK_DEAD)) { pr_err("Freeing alive NFC raw socket %p\n", sk); return; } } static int rawsock_create(struct net *net, struct socket *sock, const struct nfc_protocol *nfc_proto) { struct sock *sk; pr_debug("sock=%p\n", sock); if (sock->type != SOCK_SEQPACKET) return -ESOCKTNOSUPPORT; sock->ops = &rawsock_ops; sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sk->sk_protocol = nfc_proto->id; sk->sk_destruct = rawsock_destruct; sock->state = SS_UNCONNECTED; INIT_WORK(&nfc_rawsock(sk)->tx_work, rawsock_tx_work); nfc_rawsock(sk)->tx_work_scheduled = false; return 0; } static struct proto rawsock_proto = { .name = "NFC_RAW", .owner = THIS_MODULE, .obj_size = sizeof(struct nfc_rawsock), }; static const struct nfc_protocol rawsock_nfc_proto = { .id = NFC_SOCKPROTO_RAW, .proto = &rawsock_proto, .owner = THIS_MODULE, .create = rawsock_create }; int __init rawsock_init(void) { int rc; rc = nfc_proto_register(&rawsock_nfc_proto); return rc; } void rawsock_exit(void) { nfc_proto_unregister(&rawsock_nfc_proto); }
./CrossVul/dataset_final_sorted/CWE-20/c/bad_5845_24
crossvul-cpp_data_bad_2117_1
/* * Copyright (c) 2005 Voltaire Inc. All rights reserved. * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved. * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved. * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/completion.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/mutex.h> #include <linux/random.h> #include <linux/idr.h> #include <linux/inetdevice.h> #include <linux/slab.h> #include <linux/module.h> #include <net/route.h> #include <net/tcp.h> #include <net/ipv6.h> #include <rdma/rdma_cm.h> #include <rdma/rdma_cm_ib.h> #include <rdma/rdma_netlink.h> #include <rdma/ib.h> #include <rdma/ib_cache.h> #include <rdma/ib_cm.h> #include <rdma/ib_sa.h> #include <rdma/iw_cm.h> MODULE_AUTHOR("Sean Hefty"); MODULE_DESCRIPTION("Generic RDMA CM Agent"); MODULE_LICENSE("Dual BSD/GPL"); #define CMA_CM_RESPONSE_TIMEOUT 20 #define CMA_MAX_CM_RETRIES 15 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) #define CMA_IBOE_PACKET_LIFETIME 18 static void cma_add_one(struct ib_device *device); static void cma_remove_one(struct ib_device *device); static struct ib_client cma_client = { .name = "cma", .add = cma_add_one, .remove = cma_remove_one }; static struct ib_sa_client sa_client; static struct rdma_addr_client addr_client; static LIST_HEAD(dev_list); static LIST_HEAD(listen_any_list); static DEFINE_MUTEX(lock); static struct workqueue_struct *cma_wq; static DEFINE_IDR(tcp_ps); static DEFINE_IDR(udp_ps); static DEFINE_IDR(ipoib_ps); static DEFINE_IDR(ib_ps); struct cma_device { struct list_head list; struct ib_device *device; struct completion comp; atomic_t refcount; struct list_head id_list; }; struct rdma_bind_list { struct idr *ps; struct hlist_head owners; unsigned short port; }; enum { CMA_OPTION_AFONLY, }; /* * Device removal can occur at anytime, so we need extra handling to * serialize notifying the user of device removal with other callbacks. * We do this by disabling removal notification while a callback is in process, * and reporting it after the callback completes. */ struct rdma_id_private { struct rdma_cm_id id; struct rdma_bind_list *bind_list; struct hlist_node node; struct list_head list; /* listen_any_list or cma_device.list */ struct list_head listen_list; /* per device listens */ struct cma_device *cma_dev; struct list_head mc_list; int internal_id; enum rdma_cm_state state; spinlock_t lock; struct mutex qp_mutex; struct completion comp; atomic_t refcount; struct mutex handler_mutex; int backlog; int timeout_ms; struct ib_sa_query *query; int query_id; union { struct ib_cm_id *ib; struct iw_cm_id *iw; } cm_id; u32 seq_num; u32 qkey; u32 qp_num; pid_t owner; u32 options; u8 srq; u8 tos; u8 reuseaddr; u8 afonly; }; struct cma_multicast { struct rdma_id_private *id_priv; union { struct ib_sa_multicast *ib; } multicast; struct list_head list; void *context; struct sockaddr_storage addr; struct kref mcref; }; struct cma_work { struct work_struct work; struct rdma_id_private *id; enum rdma_cm_state old_state; enum rdma_cm_state new_state; struct rdma_cm_event event; }; struct cma_ndev_work { struct work_struct work; struct rdma_id_private *id; struct rdma_cm_event event; }; struct iboe_mcast_work { struct work_struct work; struct rdma_id_private *id; struct cma_multicast *mc; }; union cma_ip_addr { struct in6_addr ip6; struct { __be32 pad[3]; __be32 addr; } ip4; }; struct cma_hdr { u8 cma_version; u8 ip_version; /* IP version: 7:4 */ __be16 port; union cma_ip_addr src_addr; union cma_ip_addr dst_addr; }; #define CMA_VERSION 0x00 static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp) { unsigned long flags; int ret; spin_lock_irqsave(&id_priv->lock, flags); ret = (id_priv->state == comp); spin_unlock_irqrestore(&id_priv->lock, flags); return ret; } static int cma_comp_exch(struct rdma_id_private *id_priv, enum rdma_cm_state comp, enum rdma_cm_state exch) { unsigned long flags; int ret; spin_lock_irqsave(&id_priv->lock, flags); if ((ret = (id_priv->state == comp))) id_priv->state = exch; spin_unlock_irqrestore(&id_priv->lock, flags); return ret; } static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv, enum rdma_cm_state exch) { unsigned long flags; enum rdma_cm_state old; spin_lock_irqsave(&id_priv->lock, flags); old = id_priv->state; id_priv->state = exch; spin_unlock_irqrestore(&id_priv->lock, flags); return old; } static inline u8 cma_get_ip_ver(struct cma_hdr *hdr) { return hdr->ip_version >> 4; } static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver) { hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF); } static void cma_attach_to_dev(struct rdma_id_private *id_priv, struct cma_device *cma_dev) { atomic_inc(&cma_dev->refcount); id_priv->cma_dev = cma_dev; id_priv->id.device = cma_dev->device; id_priv->id.route.addr.dev_addr.transport = rdma_node_get_transport(cma_dev->device->node_type); list_add_tail(&id_priv->list, &cma_dev->id_list); } static inline void cma_deref_dev(struct cma_device *cma_dev) { if (atomic_dec_and_test(&cma_dev->refcount)) complete(&cma_dev->comp); } static inline void release_mc(struct kref *kref) { struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref); kfree(mc->multicast.ib); kfree(mc); } static void cma_release_dev(struct rdma_id_private *id_priv) { mutex_lock(&lock); list_del(&id_priv->list); cma_deref_dev(id_priv->cma_dev); id_priv->cma_dev = NULL; mutex_unlock(&lock); } static inline struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv) { return (struct sockaddr *) &id_priv->id.route.addr.src_addr; } static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv) { return (struct sockaddr *) &id_priv->id.route.addr.dst_addr; } static inline unsigned short cma_family(struct rdma_id_private *id_priv) { return id_priv->id.route.addr.src_addr.ss_family; } static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey) { struct ib_sa_mcmember_rec rec; int ret = 0; if (id_priv->qkey) { if (qkey && id_priv->qkey != qkey) return -EINVAL; return 0; } if (qkey) { id_priv->qkey = qkey; return 0; } switch (id_priv->id.ps) { case RDMA_PS_UDP: case RDMA_PS_IB: id_priv->qkey = RDMA_UDP_QKEY; break; case RDMA_PS_IPOIB: ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid); ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num, &rec.mgid, &rec); if (!ret) id_priv->qkey = be32_to_cpu(rec.qkey); break; default: break; } return ret; } static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr) { dev_addr->dev_type = ARPHRD_INFINIBAND; rdma_addr_set_sgid(dev_addr, (union ib_gid *) &sib->sib_addr); ib_addr_set_pkey(dev_addr, ntohs(sib->sib_pkey)); } static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) { int ret; if (addr->sa_family != AF_IB) { ret = rdma_translate_ip(addr, dev_addr, NULL); } else { cma_translate_ib((struct sockaddr_ib *) addr, dev_addr); ret = 0; } return ret; } static int cma_acquire_dev(struct rdma_id_private *id_priv, struct rdma_id_private *listen_id_priv) { struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; struct cma_device *cma_dev; union ib_gid gid, iboe_gid; int ret = -ENODEV; u8 port, found_port; enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ? IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET; if (dev_ll != IB_LINK_LAYER_INFINIBAND && id_priv->id.ps == RDMA_PS_IPOIB) return -EINVAL; mutex_lock(&lock); rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, &iboe_gid); memcpy(&gid, dev_addr->src_dev_addr + rdma_addr_gid_offset(dev_addr), sizeof gid); if (listen_id_priv && rdma_port_get_link_layer(listen_id_priv->id.device, listen_id_priv->id.port_num) == dev_ll) { cma_dev = listen_id_priv->cma_dev; port = listen_id_priv->id.port_num; if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB && rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET) ret = ib_find_cached_gid(cma_dev->device, &iboe_gid, &found_port, NULL); else ret = ib_find_cached_gid(cma_dev->device, &gid, &found_port, NULL); if (!ret && (port == found_port)) { id_priv->id.port_num = found_port; goto out; } } list_for_each_entry(cma_dev, &dev_list, list) { for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) { if (listen_id_priv && listen_id_priv->cma_dev == cma_dev && listen_id_priv->id.port_num == port) continue; if (rdma_port_get_link_layer(cma_dev->device, port) == dev_ll) { if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB && rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET) ret = ib_find_cached_gid(cma_dev->device, &iboe_gid, &found_port, NULL); else ret = ib_find_cached_gid(cma_dev->device, &gid, &found_port, NULL); if (!ret && (port == found_port)) { id_priv->id.port_num = found_port; goto out; } } } } out: if (!ret) cma_attach_to_dev(id_priv, cma_dev); mutex_unlock(&lock); return ret; } /* * Select the source IB device and address to reach the destination IB address. */ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) { struct cma_device *cma_dev, *cur_dev; struct sockaddr_ib *addr; union ib_gid gid, sgid, *dgid; u16 pkey, index; u8 p; int i; cma_dev = NULL; addr = (struct sockaddr_ib *) cma_dst_addr(id_priv); dgid = (union ib_gid *) &addr->sib_addr; pkey = ntohs(addr->sib_pkey); list_for_each_entry(cur_dev, &dev_list, list) { if (rdma_node_get_transport(cur_dev->device->node_type) != RDMA_TRANSPORT_IB) continue; for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index)) continue; for (i = 0; !ib_get_cached_gid(cur_dev->device, p, i, &gid); i++) { if (!memcmp(&gid, dgid, sizeof(gid))) { cma_dev = cur_dev; sgid = gid; id_priv->id.port_num = p; goto found; } if (!cma_dev && (gid.global.subnet_prefix == dgid->global.subnet_prefix)) { cma_dev = cur_dev; sgid = gid; id_priv->id.port_num = p; } } } } if (!cma_dev) return -ENODEV; found: cma_attach_to_dev(id_priv, cma_dev); addr = (struct sockaddr_ib *) cma_src_addr(id_priv); memcpy(&addr->sib_addr, &sgid, sizeof sgid); cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); return 0; } static void cma_deref_id(struct rdma_id_private *id_priv) { if (atomic_dec_and_test(&id_priv->refcount)) complete(&id_priv->comp); } static int cma_disable_callback(struct rdma_id_private *id_priv, enum rdma_cm_state state) { mutex_lock(&id_priv->handler_mutex); if (id_priv->state != state) { mutex_unlock(&id_priv->handler_mutex); return -EINVAL; } return 0; } struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, void *context, enum rdma_port_space ps, enum ib_qp_type qp_type) { struct rdma_id_private *id_priv; id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL); if (!id_priv) return ERR_PTR(-ENOMEM); id_priv->owner = task_pid_nr(current); id_priv->state = RDMA_CM_IDLE; id_priv->id.context = context; id_priv->id.event_handler = event_handler; id_priv->id.ps = ps; id_priv->id.qp_type = qp_type; spin_lock_init(&id_priv->lock); mutex_init(&id_priv->qp_mutex); init_completion(&id_priv->comp); atomic_set(&id_priv->refcount, 1); mutex_init(&id_priv->handler_mutex); INIT_LIST_HEAD(&id_priv->listen_list); INIT_LIST_HEAD(&id_priv->mc_list); get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); return &id_priv->id; } EXPORT_SYMBOL(rdma_create_id); static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) { struct ib_qp_attr qp_attr; int qp_attr_mask, ret; qp_attr.qp_state = IB_QPS_INIT; ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); if (ret) return ret; ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); if (ret) return ret; qp_attr.qp_state = IB_QPS_RTR; ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE); if (ret) return ret; qp_attr.qp_state = IB_QPS_RTS; qp_attr.sq_psn = 0; ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN); return ret; } static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) { struct ib_qp_attr qp_attr; int qp_attr_mask, ret; qp_attr.qp_state = IB_QPS_INIT; ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); if (ret) return ret; return ib_modify_qp(qp, &qp_attr, qp_attr_mask); } int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, struct ib_qp_init_attr *qp_init_attr) { struct rdma_id_private *id_priv; struct ib_qp *qp; int ret; id_priv = container_of(id, struct rdma_id_private, id); if (id->device != pd->device) return -EINVAL; qp = ib_create_qp(pd, qp_init_attr); if (IS_ERR(qp)) return PTR_ERR(qp); if (id->qp_type == IB_QPT_UD) ret = cma_init_ud_qp(id_priv, qp); else ret = cma_init_conn_qp(id_priv, qp); if (ret) goto err; id->qp = qp; id_priv->qp_num = qp->qp_num; id_priv->srq = (qp->srq != NULL); return 0; err: ib_destroy_qp(qp); return ret; } EXPORT_SYMBOL(rdma_create_qp); void rdma_destroy_qp(struct rdma_cm_id *id) { struct rdma_id_private *id_priv; id_priv = container_of(id, struct rdma_id_private, id); mutex_lock(&id_priv->qp_mutex); ib_destroy_qp(id_priv->id.qp); id_priv->id.qp = NULL; mutex_unlock(&id_priv->qp_mutex); } EXPORT_SYMBOL(rdma_destroy_qp); static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, struct rdma_conn_param *conn_param) { struct ib_qp_attr qp_attr; int qp_attr_mask, ret; union ib_gid sgid; mutex_lock(&id_priv->qp_mutex); if (!id_priv->id.qp) { ret = 0; goto out; } /* Need to update QP attributes from default values. */ qp_attr.qp_state = IB_QPS_INIT; ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); if (ret) goto out; ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); if (ret) goto out; qp_attr.qp_state = IB_QPS_RTR; ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); if (ret) goto out; ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num, qp_attr.ah_attr.grh.sgid_index, &sgid); if (ret) goto out; if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB && rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num) == IB_LINK_LAYER_ETHERNET) { ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr.smac, NULL); if (ret) goto out; } if (conn_param) qp_attr.max_dest_rd_atomic = conn_param->responder_resources; ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); out: mutex_unlock(&id_priv->qp_mutex); return ret; } static int cma_modify_qp_rts(struct rdma_id_private *id_priv, struct rdma_conn_param *conn_param) { struct ib_qp_attr qp_attr; int qp_attr_mask, ret; mutex_lock(&id_priv->qp_mutex); if (!id_priv->id.qp) { ret = 0; goto out; } qp_attr.qp_state = IB_QPS_RTS; ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); if (ret) goto out; if (conn_param) qp_attr.max_rd_atomic = conn_param->initiator_depth; ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); out: mutex_unlock(&id_priv->qp_mutex); return ret; } static int cma_modify_qp_err(struct rdma_id_private *id_priv) { struct ib_qp_attr qp_attr; int ret; mutex_lock(&id_priv->qp_mutex); if (!id_priv->id.qp) { ret = 0; goto out; } qp_attr.qp_state = IB_QPS_ERR; ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE); out: mutex_unlock(&id_priv->qp_mutex); return ret; } static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, struct ib_qp_attr *qp_attr, int *qp_attr_mask) { struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; int ret; u16 pkey; if (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num) == IB_LINK_LAYER_INFINIBAND) pkey = ib_addr_get_pkey(dev_addr); else pkey = 0xffff; ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, pkey, &qp_attr->pkey_index); if (ret) return ret; qp_attr->port_num = id_priv->id.port_num; *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; if (id_priv->id.qp_type == IB_QPT_UD) { ret = cma_set_qkey(id_priv, 0); if (ret) return ret; qp_attr->qkey = id_priv->qkey; *qp_attr_mask |= IB_QP_QKEY; } else { qp_attr->qp_access_flags = 0; *qp_attr_mask |= IB_QP_ACCESS_FLAGS; } return 0; } int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, int *qp_attr_mask) { struct rdma_id_private *id_priv; int ret = 0; id_priv = container_of(id, struct rdma_id_private, id); switch (rdma_node_get_transport(id_priv->id.device->node_type)) { case RDMA_TRANSPORT_IB: if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); else ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, qp_attr_mask); if (qp_attr->qp_state == IB_QPS_RTR) qp_attr->rq_psn = id_priv->seq_num; break; case RDMA_TRANSPORT_IWARP: if (!id_priv->cm_id.iw) { qp_attr->qp_access_flags = 0; *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; } else ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, qp_attr_mask); break; default: ret = -ENOSYS; break; } return ret; } EXPORT_SYMBOL(rdma_init_qp_attr); static inline int cma_zero_addr(struct sockaddr *addr) { switch (addr->sa_family) { case AF_INET: return ipv4_is_zeronet(((struct sockaddr_in *)addr)->sin_addr.s_addr); case AF_INET6: return ipv6_addr_any(&((struct sockaddr_in6 *) addr)->sin6_addr); case AF_IB: return ib_addr_any(&((struct sockaddr_ib *) addr)->sib_addr); default: return 0; } } static inline int cma_loopback_addr(struct sockaddr *addr) { switch (addr->sa_family) { case AF_INET: return ipv4_is_loopback(((struct sockaddr_in *) addr)->sin_addr.s_addr); case AF_INET6: return ipv6_addr_loopback(&((struct sockaddr_in6 *) addr)->sin6_addr); case AF_IB: return ib_addr_loopback(&((struct sockaddr_ib *) addr)->sib_addr); default: return 0; } } static inline int cma_any_addr(struct sockaddr *addr) { return cma_zero_addr(addr) || cma_loopback_addr(addr); } static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst) { if (src->sa_family != dst->sa_family) return -1; switch (src->sa_family) { case AF_INET: return ((struct sockaddr_in *) src)->sin_addr.s_addr != ((struct sockaddr_in *) dst)->sin_addr.s_addr; case AF_INET6: return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr, &((struct sockaddr_in6 *) dst)->sin6_addr); default: return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr, &((struct sockaddr_ib *) dst)->sib_addr); } } static __be16 cma_port(struct sockaddr *addr) { struct sockaddr_ib *sib; switch (addr->sa_family) { case AF_INET: return ((struct sockaddr_in *) addr)->sin_port; case AF_INET6: return ((struct sockaddr_in6 *) addr)->sin6_port; case AF_IB: sib = (struct sockaddr_ib *) addr; return htons((u16) (be64_to_cpu(sib->sib_sid) & be64_to_cpu(sib->sib_sid_mask))); default: return 0; } } static inline int cma_any_port(struct sockaddr *addr) { return !cma_port(addr); } static void cma_save_ib_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id, struct ib_sa_path_rec *path) { struct sockaddr_ib *listen_ib, *ib; listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr; ib = (struct sockaddr_ib *) &id->route.addr.src_addr; ib->sib_family = listen_ib->sib_family; ib->sib_pkey = path->pkey; ib->sib_flowinfo = path->flow_label; memcpy(&ib->sib_addr, &path->sgid, 16); ib->sib_sid = listen_ib->sib_sid; ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL); ib->sib_scope_id = listen_ib->sib_scope_id; ib = (struct sockaddr_ib *) &id->route.addr.dst_addr; ib->sib_family = listen_ib->sib_family; ib->sib_pkey = path->pkey; ib->sib_flowinfo = path->flow_label; memcpy(&ib->sib_addr, &path->dgid, 16); } static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id, struct cma_hdr *hdr) { struct sockaddr_in *listen4, *ip4; listen4 = (struct sockaddr_in *) &listen_id->route.addr.src_addr; ip4 = (struct sockaddr_in *) &id->route.addr.src_addr; ip4->sin_family = listen4->sin_family; ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr; ip4->sin_port = listen4->sin_port; ip4 = (struct sockaddr_in *) &id->route.addr.dst_addr; ip4->sin_family = listen4->sin_family; ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr; ip4->sin_port = hdr->port; } static void cma_save_ip6_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id, struct cma_hdr *hdr) { struct sockaddr_in6 *listen6, *ip6; listen6 = (struct sockaddr_in6 *) &listen_id->route.addr.src_addr; ip6 = (struct sockaddr_in6 *) &id->route.addr.src_addr; ip6->sin6_family = listen6->sin6_family; ip6->sin6_addr = hdr->dst_addr.ip6; ip6->sin6_port = listen6->sin6_port; ip6 = (struct sockaddr_in6 *) &id->route.addr.dst_addr; ip6->sin6_family = listen6->sin6_family; ip6->sin6_addr = hdr->src_addr.ip6; ip6->sin6_port = hdr->port; } static int cma_save_net_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id, struct ib_cm_event *ib_event) { struct cma_hdr *hdr; if ((listen_id->route.addr.src_addr.ss_family == AF_IB) && (ib_event->event == IB_CM_REQ_RECEIVED)) { cma_save_ib_info(id, listen_id, ib_event->param.req_rcvd.primary_path); return 0; } hdr = ib_event->private_data; if (hdr->cma_version != CMA_VERSION) return -EINVAL; switch (cma_get_ip_ver(hdr)) { case 4: cma_save_ip4_info(id, listen_id, hdr); break; case 6: cma_save_ip6_info(id, listen_id, hdr); break; default: return -EINVAL; } return 0; } static inline int cma_user_data_offset(struct rdma_id_private *id_priv) { return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr); } static void cma_cancel_route(struct rdma_id_private *id_priv) { switch (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)) { case IB_LINK_LAYER_INFINIBAND: if (id_priv->query) ib_sa_cancel_query(id_priv->query_id, id_priv->query); break; default: break; } } static void cma_cancel_listens(struct rdma_id_private *id_priv) { struct rdma_id_private *dev_id_priv; /* * Remove from listen_any_list to prevent added devices from spawning * additional listen requests. */ mutex_lock(&lock); list_del(&id_priv->list); while (!list_empty(&id_priv->listen_list)) { dev_id_priv = list_entry(id_priv->listen_list.next, struct rdma_id_private, listen_list); /* sync with device removal to avoid duplicate destruction */ list_del_init(&dev_id_priv->list); list_del(&dev_id_priv->listen_list); mutex_unlock(&lock); rdma_destroy_id(&dev_id_priv->id); mutex_lock(&lock); } mutex_unlock(&lock); } static void cma_cancel_operation(struct rdma_id_private *id_priv, enum rdma_cm_state state) { switch (state) { case RDMA_CM_ADDR_QUERY: rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); break; case RDMA_CM_ROUTE_QUERY: cma_cancel_route(id_priv); break; case RDMA_CM_LISTEN: if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev) cma_cancel_listens(id_priv); break; default: break; } } static void cma_release_port(struct rdma_id_private *id_priv) { struct rdma_bind_list *bind_list = id_priv->bind_list; if (!bind_list) return; mutex_lock(&lock); hlist_del(&id_priv->node); if (hlist_empty(&bind_list->owners)) { idr_remove(bind_list->ps, bind_list->port); kfree(bind_list); } mutex_unlock(&lock); } static void cma_leave_mc_groups(struct rdma_id_private *id_priv) { struct cma_multicast *mc; while (!list_empty(&id_priv->mc_list)) { mc = container_of(id_priv->mc_list.next, struct cma_multicast, list); list_del(&mc->list); switch (rdma_port_get_link_layer(id_priv->cma_dev->device, id_priv->id.port_num)) { case IB_LINK_LAYER_INFINIBAND: ib_sa_free_multicast(mc->multicast.ib); kfree(mc); break; case IB_LINK_LAYER_ETHERNET: kref_put(&mc->mcref, release_mc); break; default: break; } } } void rdma_destroy_id(struct rdma_cm_id *id) { struct rdma_id_private *id_priv; enum rdma_cm_state state; id_priv = container_of(id, struct rdma_id_private, id); state = cma_exch(id_priv, RDMA_CM_DESTROYING); cma_cancel_operation(id_priv, state); /* * Wait for any active callback to finish. New callbacks will find * the id_priv state set to destroying and abort. */ mutex_lock(&id_priv->handler_mutex); mutex_unlock(&id_priv->handler_mutex); if (id_priv->cma_dev) { switch (rdma_node_get_transport(id_priv->id.device->node_type)) { case RDMA_TRANSPORT_IB: if (id_priv->cm_id.ib) ib_destroy_cm_id(id_priv->cm_id.ib); break; case RDMA_TRANSPORT_IWARP: if (id_priv->cm_id.iw) iw_destroy_cm_id(id_priv->cm_id.iw); break; default: break; } cma_leave_mc_groups(id_priv); cma_release_dev(id_priv); } cma_release_port(id_priv); cma_deref_id(id_priv); wait_for_completion(&id_priv->comp); if (id_priv->internal_id) cma_deref_id(id_priv->id.context); kfree(id_priv->id.route.path_rec); kfree(id_priv); } EXPORT_SYMBOL(rdma_destroy_id); static int cma_rep_recv(struct rdma_id_private *id_priv) { int ret; ret = cma_modify_qp_rtr(id_priv, NULL); if (ret) goto reject; ret = cma_modify_qp_rts(id_priv, NULL); if (ret) goto reject; ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0); if (ret) goto reject; return 0; reject: cma_modify_qp_err(id_priv); ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, NULL, 0); return ret; } static void cma_set_rep_event_data(struct rdma_cm_event *event, struct ib_cm_rep_event_param *rep_data, void *private_data) { event->param.conn.private_data = private_data; event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE; event->param.conn.responder_resources = rep_data->responder_resources; event->param.conn.initiator_depth = rep_data->initiator_depth; event->param.conn.flow_control = rep_data->flow_control; event->param.conn.rnr_retry_count = rep_data->rnr_retry_count; event->param.conn.srq = rep_data->srq; event->param.conn.qp_num = rep_data->remote_qpn; } static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) { struct rdma_id_private *id_priv = cm_id->context; struct rdma_cm_event event; int ret = 0; if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && cma_disable_callback(id_priv, RDMA_CM_CONNECT)) || (ib_event->event == IB_CM_TIMEWAIT_EXIT && cma_disable_callback(id_priv, RDMA_CM_DISCONNECT))) return 0; memset(&event, 0, sizeof event); switch (ib_event->event) { case IB_CM_REQ_ERROR: case IB_CM_REP_ERROR: event.event = RDMA_CM_EVENT_UNREACHABLE; event.status = -ETIMEDOUT; break; case IB_CM_REP_RECEIVED: if (id_priv->id.qp) { event.status = cma_rep_recv(id_priv); event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR : RDMA_CM_EVENT_ESTABLISHED; } else { event.event = RDMA_CM_EVENT_CONNECT_RESPONSE; } cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd, ib_event->private_data); break; case IB_CM_RTU_RECEIVED: case IB_CM_USER_ESTABLISHED: event.event = RDMA_CM_EVENT_ESTABLISHED; break; case IB_CM_DREQ_ERROR: event.status = -ETIMEDOUT; /* fall through */ case IB_CM_DREQ_RECEIVED: case IB_CM_DREP_RECEIVED: if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_DISCONNECT)) goto out; event.event = RDMA_CM_EVENT_DISCONNECTED; break; case IB_CM_TIMEWAIT_EXIT: event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT; break; case IB_CM_MRA_RECEIVED: /* ignore event */ goto out; case IB_CM_REJ_RECEIVED: cma_modify_qp_err(id_priv); event.status = ib_event->param.rej_rcvd.reason; event.event = RDMA_CM_EVENT_REJECTED; event.param.conn.private_data = ib_event->private_data; event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; break; default: printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n", ib_event->event); goto out; } ret = id_priv->id.event_handler(&id_priv->id, &event); if (ret) { /* Destroy the CM ID by returning a non-zero value. */ id_priv->cm_id.ib = NULL; cma_exch(id_priv, RDMA_CM_DESTROYING); mutex_unlock(&id_priv->handler_mutex); rdma_destroy_id(&id_priv->id); return ret; } out: mutex_unlock(&id_priv->handler_mutex); return ret; } static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, struct ib_cm_event *ib_event) { struct rdma_id_private *id_priv; struct rdma_cm_id *id; struct rdma_route *rt; int ret; id = rdma_create_id(listen_id->event_handler, listen_id->context, listen_id->ps, ib_event->param.req_rcvd.qp_type); if (IS_ERR(id)) return NULL; id_priv = container_of(id, struct rdma_id_private, id); if (cma_save_net_info(id, listen_id, ib_event)) goto err; rt = &id->route; rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1; rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths, GFP_KERNEL); if (!rt->path_rec) goto err; rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path; if (rt->num_paths == 2) rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; if (cma_any_addr(cma_src_addr(id_priv))) { rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND; rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid); ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey)); } else { ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr); if (ret) goto err; } rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); id_priv->state = RDMA_CM_CONNECT; return id_priv; err: rdma_destroy_id(id); return NULL; } static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id, struct ib_cm_event *ib_event) { struct rdma_id_private *id_priv; struct rdma_cm_id *id; int ret; id = rdma_create_id(listen_id->event_handler, listen_id->context, listen_id->ps, IB_QPT_UD); if (IS_ERR(id)) return NULL; id_priv = container_of(id, struct rdma_id_private, id); if (cma_save_net_info(id, listen_id, ib_event)) goto err; if (!cma_any_addr((struct sockaddr *) &id->route.addr.src_addr)) { ret = cma_translate_addr(cma_src_addr(id_priv), &id->route.addr.dev_addr); if (ret) goto err; } id_priv->state = RDMA_CM_CONNECT; return id_priv; err: rdma_destroy_id(id); return NULL; } static void cma_set_req_event_data(struct rdma_cm_event *event, struct ib_cm_req_event_param *req_data, void *private_data, int offset) { event->param.conn.private_data = private_data + offset; event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset; event->param.conn.responder_resources = req_data->responder_resources; event->param.conn.initiator_depth = req_data->initiator_depth; event->param.conn.flow_control = req_data->flow_control; event->param.conn.retry_count = req_data->retry_count; event->param.conn.rnr_retry_count = req_data->rnr_retry_count; event->param.conn.srq = req_data->srq; event->param.conn.qp_num = req_data->remote_qpn; } static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event) { return (((ib_event->event == IB_CM_REQ_RECEIVED) && (ib_event->param.req_rcvd.qp_type == id->qp_type)) || ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) && (id->qp_type == IB_QPT_UD)) || (!id->qp_type)); } static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) { struct rdma_id_private *listen_id, *conn_id; struct rdma_cm_event event; int offset, ret; u8 smac[ETH_ALEN]; u8 alt_smac[ETH_ALEN]; u8 *psmac = smac; u8 *palt_smac = alt_smac; int is_iboe = ((rdma_node_get_transport(cm_id->device->node_type) == RDMA_TRANSPORT_IB) && (rdma_port_get_link_layer(cm_id->device, ib_event->param.req_rcvd.port) == IB_LINK_LAYER_ETHERNET)); listen_id = cm_id->context; if (!cma_check_req_qp_type(&listen_id->id, ib_event)) return -EINVAL; if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) return -ECONNABORTED; memset(&event, 0, sizeof event); offset = cma_user_data_offset(listen_id); event.event = RDMA_CM_EVENT_CONNECT_REQUEST; if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) { conn_id = cma_new_udp_id(&listen_id->id, ib_event); event.param.ud.private_data = ib_event->private_data + offset; event.param.ud.private_data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset; } else { conn_id = cma_new_conn_id(&listen_id->id, ib_event); cma_set_req_event_data(&event, &ib_event->param.req_rcvd, ib_event->private_data, offset); } if (!conn_id) { ret = -ENOMEM; goto err1; } mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); ret = cma_acquire_dev(conn_id, listen_id); if (ret) goto err2; conn_id->cm_id.ib = cm_id; cm_id->context = conn_id; cm_id->cm_handler = cma_ib_handler; /* * Protect against the user destroying conn_id from another thread * until we're done accessing it. */ atomic_inc(&conn_id->refcount); ret = conn_id->id.event_handler(&conn_id->id, &event); if (ret) goto err3; if (is_iboe) { if (ib_event->param.req_rcvd.primary_path != NULL) rdma_addr_find_smac_by_sgid( &ib_event->param.req_rcvd.primary_path->sgid, psmac, NULL); else psmac = NULL; if (ib_event->param.req_rcvd.alternate_path != NULL) rdma_addr_find_smac_by_sgid( &ib_event->param.req_rcvd.alternate_path->sgid, palt_smac, NULL); else palt_smac = NULL; } /* * Acquire mutex to prevent user executing rdma_destroy_id() * while we're accessing the cm_id. */ mutex_lock(&lock); if (is_iboe) ib_update_cm_av(cm_id, psmac, palt_smac); if (cma_comp(conn_id, RDMA_CM_CONNECT) && (conn_id->id.qp_type != IB_QPT_UD)) ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); mutex_unlock(&lock); mutex_unlock(&conn_id->handler_mutex); mutex_unlock(&listen_id->handler_mutex); cma_deref_id(conn_id); return 0; err3: cma_deref_id(conn_id); /* Destroy the CM ID by returning a non-zero value. */ conn_id->cm_id.ib = NULL; err2: cma_exch(conn_id, RDMA_CM_DESTROYING); mutex_unlock(&conn_id->handler_mutex); err1: mutex_unlock(&listen_id->handler_mutex); if (conn_id) rdma_destroy_id(&conn_id->id); return ret; } __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr) { if (addr->sa_family == AF_IB) return ((struct sockaddr_ib *) addr)->sib_sid; return cpu_to_be64(((u64)id->ps << 16) + be16_to_cpu(cma_port(addr))); } EXPORT_SYMBOL(rdma_get_service_id); static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr, struct ib_cm_compare_data *compare) { struct cma_hdr *cma_data, *cma_mask; __be32 ip4_addr; struct in6_addr ip6_addr; memset(compare, 0, sizeof *compare); cma_data = (void *) compare->data; cma_mask = (void *) compare->mask; switch (addr->sa_family) { case AF_INET: ip4_addr = ((struct sockaddr_in *) addr)->sin_addr.s_addr; cma_set_ip_ver(cma_data, 4); cma_set_ip_ver(cma_mask, 0xF); if (!cma_any_addr(addr)) { cma_data->dst_addr.ip4.addr = ip4_addr; cma_mask->dst_addr.ip4.addr = htonl(~0); } break; case AF_INET6: ip6_addr = ((struct sockaddr_in6 *) addr)->sin6_addr; cma_set_ip_ver(cma_data, 6); cma_set_ip_ver(cma_mask, 0xF); if (!cma_any_addr(addr)) { cma_data->dst_addr.ip6 = ip6_addr; memset(&cma_mask->dst_addr.ip6, 0xFF, sizeof cma_mask->dst_addr.ip6); } break; default: break; } } static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) { struct rdma_id_private *id_priv = iw_id->context; struct rdma_cm_event event; int ret = 0; struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; if (cma_disable_callback(id_priv, RDMA_CM_CONNECT)) return 0; memset(&event, 0, sizeof event); switch (iw_event->event) { case IW_CM_EVENT_CLOSE: event.event = RDMA_CM_EVENT_DISCONNECTED; break; case IW_CM_EVENT_CONNECT_REPLY: memcpy(cma_src_addr(id_priv), laddr, rdma_addr_size(laddr)); memcpy(cma_dst_addr(id_priv), raddr, rdma_addr_size(raddr)); switch (iw_event->status) { case 0: event.event = RDMA_CM_EVENT_ESTABLISHED; event.param.conn.initiator_depth = iw_event->ird; event.param.conn.responder_resources = iw_event->ord; break; case -ECONNRESET: case -ECONNREFUSED: event.event = RDMA_CM_EVENT_REJECTED; break; case -ETIMEDOUT: event.event = RDMA_CM_EVENT_UNREACHABLE; break; default: event.event = RDMA_CM_EVENT_CONNECT_ERROR; break; } break; case IW_CM_EVENT_ESTABLISHED: event.event = RDMA_CM_EVENT_ESTABLISHED; event.param.conn.initiator_depth = iw_event->ird; event.param.conn.responder_resources = iw_event->ord; break; default: BUG_ON(1); } event.status = iw_event->status; event.param.conn.private_data = iw_event->private_data; event.param.conn.private_data_len = iw_event->private_data_len; ret = id_priv->id.event_handler(&id_priv->id, &event); if (ret) { /* Destroy the CM ID by returning a non-zero value. */ id_priv->cm_id.iw = NULL; cma_exch(id_priv, RDMA_CM_DESTROYING); mutex_unlock(&id_priv->handler_mutex); rdma_destroy_id(&id_priv->id); return ret; } mutex_unlock(&id_priv->handler_mutex); return ret; } static int iw_conn_req_handler(struct iw_cm_id *cm_id, struct iw_cm_event *iw_event) { struct rdma_cm_id *new_cm_id; struct rdma_id_private *listen_id, *conn_id; struct rdma_cm_event event; int ret; struct ib_device_attr attr; struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; listen_id = cm_id->context; if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) return -ECONNABORTED; /* Create a new RDMA id for the new IW CM ID */ new_cm_id = rdma_create_id(listen_id->id.event_handler, listen_id->id.context, RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(new_cm_id)) { ret = -ENOMEM; goto out; } conn_id = container_of(new_cm_id, struct rdma_id_private, id); mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); conn_id->state = RDMA_CM_CONNECT; ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr, NULL); if (ret) { mutex_unlock(&conn_id->handler_mutex); rdma_destroy_id(new_cm_id); goto out; } ret = cma_acquire_dev(conn_id, listen_id); if (ret) { mutex_unlock(&conn_id->handler_mutex); rdma_destroy_id(new_cm_id); goto out; } conn_id->cm_id.iw = cm_id; cm_id->context = conn_id; cm_id->cm_handler = cma_iw_handler; memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr)); memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr)); ret = ib_query_device(conn_id->id.device, &attr); if (ret) { mutex_unlock(&conn_id->handler_mutex); rdma_destroy_id(new_cm_id); goto out; } memset(&event, 0, sizeof event); event.event = RDMA_CM_EVENT_CONNECT_REQUEST; event.param.conn.private_data = iw_event->private_data; event.param.conn.private_data_len = iw_event->private_data_len; event.param.conn.initiator_depth = iw_event->ird; event.param.conn.responder_resources = iw_event->ord; /* * Protect against the user destroying conn_id from another thread * until we're done accessing it. */ atomic_inc(&conn_id->refcount); ret = conn_id->id.event_handler(&conn_id->id, &event); if (ret) { /* User wants to destroy the CM ID */ conn_id->cm_id.iw = NULL; cma_exch(conn_id, RDMA_CM_DESTROYING); mutex_unlock(&conn_id->handler_mutex); cma_deref_id(conn_id); rdma_destroy_id(&conn_id->id); goto out; } mutex_unlock(&conn_id->handler_mutex); cma_deref_id(conn_id); out: mutex_unlock(&listen_id->handler_mutex); return ret; } static int cma_ib_listen(struct rdma_id_private *id_priv) { struct ib_cm_compare_data compare_data; struct sockaddr *addr; struct ib_cm_id *id; __be64 svc_id; int ret; id = ib_create_cm_id(id_priv->id.device, cma_req_handler, id_priv); if (IS_ERR(id)) return PTR_ERR(id); id_priv->cm_id.ib = id; addr = cma_src_addr(id_priv); svc_id = rdma_get_service_id(&id_priv->id, addr); if (cma_any_addr(addr) && !id_priv->afonly) ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL); else { cma_set_compare_data(id_priv->id.ps, addr, &compare_data); ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, &compare_data); } if (ret) { ib_destroy_cm_id(id_priv->cm_id.ib); id_priv->cm_id.ib = NULL; } return ret; } static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) { int ret; struct iw_cm_id *id; id = iw_create_cm_id(id_priv->id.device, iw_conn_req_handler, id_priv); if (IS_ERR(id)) return PTR_ERR(id); id_priv->cm_id.iw = id; memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv), rdma_addr_size(cma_src_addr(id_priv))); ret = iw_cm_listen(id_priv->cm_id.iw, backlog); if (ret) { iw_destroy_cm_id(id_priv->cm_id.iw); id_priv->cm_id.iw = NULL; } return ret; } static int cma_listen_handler(struct rdma_cm_id *id, struct rdma_cm_event *event) { struct rdma_id_private *id_priv = id->context; id->context = id_priv->id.context; id->event_handler = id_priv->id.event_handler; return id_priv->id.event_handler(id, event); } static void cma_listen_on_dev(struct rdma_id_private *id_priv, struct cma_device *cma_dev) { struct rdma_id_private *dev_id_priv; struct rdma_cm_id *id; int ret; if (cma_family(id_priv) == AF_IB && rdma_node_get_transport(cma_dev->device->node_type) != RDMA_TRANSPORT_IB) return; id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps, id_priv->id.qp_type); if (IS_ERR(id)) return; dev_id_priv = container_of(id, struct rdma_id_private, id); dev_id_priv->state = RDMA_CM_ADDR_BOUND; memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv), rdma_addr_size(cma_src_addr(id_priv))); cma_attach_to_dev(dev_id_priv, cma_dev); list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); atomic_inc(&id_priv->refcount); dev_id_priv->internal_id = 1; dev_id_priv->afonly = id_priv->afonly; ret = rdma_listen(id, id_priv->backlog); if (ret) printk(KERN_WARNING "RDMA CMA: cma_listen_on_dev, error %d, " "listening on device %s\n", ret, cma_dev->device->name); } static void cma_listen_on_all(struct rdma_id_private *id_priv) { struct cma_device *cma_dev; mutex_lock(&lock); list_add_tail(&id_priv->list, &listen_any_list); list_for_each_entry(cma_dev, &dev_list, list) cma_listen_on_dev(id_priv, cma_dev); mutex_unlock(&lock); } void rdma_set_service_type(struct rdma_cm_id *id, int tos) { struct rdma_id_private *id_priv; id_priv = container_of(id, struct rdma_id_private, id); id_priv->tos = (u8) tos; } EXPORT_SYMBOL(rdma_set_service_type); static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec, void *context) { struct cma_work *work = context; struct rdma_route *route; route = &work->id->id.route; if (!status) { route->num_paths = 1; *route->path_rec = *path_rec; } else { work->old_state = RDMA_CM_ROUTE_QUERY; work->new_state = RDMA_CM_ADDR_RESOLVED; work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; work->event.status = status; } queue_work(cma_wq, &work->work); } static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, struct cma_work *work) { struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; struct ib_sa_path_rec path_rec; ib_sa_comp_mask comp_mask; struct sockaddr_in6 *sin6; struct sockaddr_ib *sib; memset(&path_rec, 0, sizeof path_rec); rdma_addr_get_sgid(dev_addr, &path_rec.sgid); rdma_addr_get_dgid(dev_addr, &path_rec.dgid); path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); path_rec.numb_path = 1; path_rec.reversible = 1; path_rec.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID; switch (cma_family(id_priv)) { case AF_INET: path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); comp_mask |= IB_SA_PATH_REC_QOS_CLASS; break; case AF_INET6: sin6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20); comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; break; case AF_IB: sib = (struct sockaddr_ib *) cma_src_addr(id_priv); path_rec.traffic_class = (u8) (be32_to_cpu(sib->sib_flowinfo) >> 20); comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS; break; } id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, id_priv->id.port_num, &path_rec, comp_mask, timeout_ms, GFP_KERNEL, cma_query_handler, work, &id_priv->query); return (id_priv->query_id < 0) ? id_priv->query_id : 0; } static void cma_work_handler(struct work_struct *_work) { struct cma_work *work = container_of(_work, struct cma_work, work); struct rdma_id_private *id_priv = work->id; int destroy = 0; mutex_lock(&id_priv->handler_mutex); if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) goto out; if (id_priv->id.event_handler(&id_priv->id, &work->event)) { cma_exch(id_priv, RDMA_CM_DESTROYING); destroy = 1; } out: mutex_unlock(&id_priv->handler_mutex); cma_deref_id(id_priv); if (destroy) rdma_destroy_id(&id_priv->id); kfree(work); } static void cma_ndev_work_handler(struct work_struct *_work) { struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work); struct rdma_id_private *id_priv = work->id; int destroy = 0; mutex_lock(&id_priv->handler_mutex); if (id_priv->state == RDMA_CM_DESTROYING || id_priv->state == RDMA_CM_DEVICE_REMOVAL) goto out; if (id_priv->id.event_handler(&id_priv->id, &work->event)) { cma_exch(id_priv, RDMA_CM_DESTROYING); destroy = 1; } out: mutex_unlock(&id_priv->handler_mutex); cma_deref_id(id_priv); if (destroy) rdma_destroy_id(&id_priv->id); kfree(work); } static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms) { struct rdma_route *route = &id_priv->id.route; struct cma_work *work; int ret; work = kzalloc(sizeof *work, GFP_KERNEL); if (!work) return -ENOMEM; work->id = id_priv; INIT_WORK(&work->work, cma_work_handler); work->old_state = RDMA_CM_ROUTE_QUERY; work->new_state = RDMA_CM_ROUTE_RESOLVED; work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); if (!route->path_rec) { ret = -ENOMEM; goto err1; } ret = cma_query_ib_route(id_priv, timeout_ms, work); if (ret) goto err2; return 0; err2: kfree(route->path_rec); route->path_rec = NULL; err1: kfree(work); return ret; } int rdma_set_ib_paths(struct rdma_cm_id *id, struct ib_sa_path_rec *path_rec, int num_paths) { struct rdma_id_private *id_priv; int ret; id_priv = container_of(id, struct rdma_id_private, id); if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_RESOLVED)) return -EINVAL; id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths, GFP_KERNEL); if (!id->route.path_rec) { ret = -ENOMEM; goto err; } id->route.num_paths = num_paths; return 0; err: cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED); return ret; } EXPORT_SYMBOL(rdma_set_ib_paths); static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms) { struct cma_work *work; work = kzalloc(sizeof *work, GFP_KERNEL); if (!work) return -ENOMEM; work->id = id_priv; INIT_WORK(&work->work, cma_work_handler); work->old_state = RDMA_CM_ROUTE_QUERY; work->new_state = RDMA_CM_ROUTE_RESOLVED; work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; queue_work(cma_wq, &work->work); return 0; } static int iboe_tos_to_sl(struct net_device *ndev, int tos) { int prio; struct net_device *dev; prio = rt_tos2priority(tos); dev = ndev->priv_flags & IFF_802_1Q_VLAN ? vlan_dev_real_dev(ndev) : ndev; if (dev->num_tc) return netdev_get_prio_tc_map(dev, prio); #if IS_ENABLED(CONFIG_VLAN_8021Q) if (ndev->priv_flags & IFF_802_1Q_VLAN) return (vlan_dev_get_egress_qos_mask(ndev, prio) & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; #endif return 0; } static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) { struct rdma_route *route = &id_priv->id.route; struct rdma_addr *addr = &route->addr; struct cma_work *work; int ret; struct net_device *ndev = NULL; work = kzalloc(sizeof *work, GFP_KERNEL); if (!work) return -ENOMEM; work->id = id_priv; INIT_WORK(&work->work, cma_work_handler); route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL); if (!route->path_rec) { ret = -ENOMEM; goto err1; } route->num_paths = 1; if (addr->dev_addr.bound_dev_if) ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if); if (!ndev) { ret = -ENODEV; goto err2; } route->path_rec->vlan_id = rdma_vlan_dev_vlan_id(ndev); memcpy(route->path_rec->dmac, addr->dev_addr.dst_dev_addr, ETH_ALEN); memcpy(route->path_rec->smac, ndev->dev_addr, ndev->addr_len); rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, &route->path_rec->sgid); rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr, &route->path_rec->dgid); route->path_rec->hop_limit = 1; route->path_rec->reversible = 1; route->path_rec->pkey = cpu_to_be16(0xffff); route->path_rec->mtu_selector = IB_SA_EQ; route->path_rec->sl = iboe_tos_to_sl(ndev, id_priv->tos); route->path_rec->mtu = iboe_get_mtu(ndev->mtu); route->path_rec->rate_selector = IB_SA_EQ; route->path_rec->rate = iboe_get_rate(ndev); dev_put(ndev); route->path_rec->packet_life_time_selector = IB_SA_EQ; route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME; if (!route->path_rec->mtu) { ret = -EINVAL; goto err2; } work->old_state = RDMA_CM_ROUTE_QUERY; work->new_state = RDMA_CM_ROUTE_RESOLVED; work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; work->event.status = 0; queue_work(cma_wq, &work->work); return 0; err2: kfree(route->path_rec); route->path_rec = NULL; err1: kfree(work); return ret; } int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) { struct rdma_id_private *id_priv; int ret; id_priv = container_of(id, struct rdma_id_private, id); if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY)) return -EINVAL; atomic_inc(&id_priv->refcount); switch (rdma_node_get_transport(id->device->node_type)) { case RDMA_TRANSPORT_IB: switch (rdma_port_get_link_layer(id->device, id->port_num)) { case IB_LINK_LAYER_INFINIBAND: ret = cma_resolve_ib_route(id_priv, timeout_ms); break; case IB_LINK_LAYER_ETHERNET: ret = cma_resolve_iboe_route(id_priv); break; default: ret = -ENOSYS; } break; case RDMA_TRANSPORT_IWARP: ret = cma_resolve_iw_route(id_priv, timeout_ms); break; default: ret = -ENOSYS; break; } if (ret) goto err; return 0; err: cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED); cma_deref_id(id_priv); return ret; } EXPORT_SYMBOL(rdma_resolve_route); static void cma_set_loopback(struct sockaddr *addr) { switch (addr->sa_family) { case AF_INET: ((struct sockaddr_in *) addr)->sin_addr.s_addr = htonl(INADDR_LOOPBACK); break; case AF_INET6: ipv6_addr_set(&((struct sockaddr_in6 *) addr)->sin6_addr, 0, 0, 0, htonl(1)); break; default: ib_addr_set(&((struct sockaddr_ib *) addr)->sib_addr, 0, 0, 0, htonl(1)); break; } } static int cma_bind_loopback(struct rdma_id_private *id_priv) { struct cma_device *cma_dev, *cur_dev; struct ib_port_attr port_attr; union ib_gid gid; u16 pkey; int ret; u8 p; cma_dev = NULL; mutex_lock(&lock); list_for_each_entry(cur_dev, &dev_list, list) { if (cma_family(id_priv) == AF_IB && rdma_node_get_transport(cur_dev->device->node_type) != RDMA_TRANSPORT_IB) continue; if (!cma_dev) cma_dev = cur_dev; for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { if (!ib_query_port(cur_dev->device, p, &port_attr) && port_attr.state == IB_PORT_ACTIVE) { cma_dev = cur_dev; goto port_found; } } } if (!cma_dev) { ret = -ENODEV; goto out; } p = 1; port_found: ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid); if (ret) goto out; ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey); if (ret) goto out; id_priv->id.route.addr.dev_addr.dev_type = (rdma_port_get_link_layer(cma_dev->device, p) == IB_LINK_LAYER_INFINIBAND) ? ARPHRD_INFINIBAND : ARPHRD_ETHER; rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); id_priv->id.port_num = p; cma_attach_to_dev(id_priv, cma_dev); cma_set_loopback(cma_src_addr(id_priv)); out: mutex_unlock(&lock); return ret; } static void addr_handler(int status, struct sockaddr *src_addr, struct rdma_dev_addr *dev_addr, void *context) { struct rdma_id_private *id_priv = context; struct rdma_cm_event event; memset(&event, 0, sizeof event); mutex_lock(&id_priv->handler_mutex); if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_RESOLVED)) goto out; memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr)); if (!status && !id_priv->cma_dev) status = cma_acquire_dev(id_priv, NULL); if (status) { if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ADDR_BOUND)) goto out; event.event = RDMA_CM_EVENT_ADDR_ERROR; event.status = status; } else event.event = RDMA_CM_EVENT_ADDR_RESOLVED; if (id_priv->id.event_handler(&id_priv->id, &event)) { cma_exch(id_priv, RDMA_CM_DESTROYING); mutex_unlock(&id_priv->handler_mutex); cma_deref_id(id_priv); rdma_destroy_id(&id_priv->id); return; } out: mutex_unlock(&id_priv->handler_mutex); cma_deref_id(id_priv); } static int cma_resolve_loopback(struct rdma_id_private *id_priv) { struct cma_work *work; union ib_gid gid; int ret; work = kzalloc(sizeof *work, GFP_KERNEL); if (!work) return -ENOMEM; if (!id_priv->cma_dev) { ret = cma_bind_loopback(id_priv); if (ret) goto err; } rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); work->id = id_priv; INIT_WORK(&work->work, cma_work_handler); work->old_state = RDMA_CM_ADDR_QUERY; work->new_state = RDMA_CM_ADDR_RESOLVED; work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; queue_work(cma_wq, &work->work); return 0; err: kfree(work); return ret; } static int cma_resolve_ib_addr(struct rdma_id_private *id_priv) { struct cma_work *work; int ret; work = kzalloc(sizeof *work, GFP_KERNEL); if (!work) return -ENOMEM; if (!id_priv->cma_dev) { ret = cma_resolve_ib_dev(id_priv); if (ret) goto err; } rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *) &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr)); work->id = id_priv; INIT_WORK(&work->work, cma_work_handler); work->old_state = RDMA_CM_ADDR_QUERY; work->new_state = RDMA_CM_ADDR_RESOLVED; work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; queue_work(cma_wq, &work->work); return 0; err: kfree(work); return ret; } static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, struct sockaddr *dst_addr) { if (!src_addr || !src_addr->sa_family) { src_addr = (struct sockaddr *) &id->route.addr.src_addr; src_addr->sa_family = dst_addr->sa_family; if (dst_addr->sa_family == AF_INET6) { ((struct sockaddr_in6 *) src_addr)->sin6_scope_id = ((struct sockaddr_in6 *) dst_addr)->sin6_scope_id; } else if (dst_addr->sa_family == AF_IB) { ((struct sockaddr_ib *) src_addr)->sib_pkey = ((struct sockaddr_ib *) dst_addr)->sib_pkey; } } return rdma_bind_addr(id, src_addr); } int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, struct sockaddr *dst_addr, int timeout_ms) { struct rdma_id_private *id_priv; int ret; id_priv = container_of(id, struct rdma_id_private, id); if (id_priv->state == RDMA_CM_IDLE) { ret = cma_bind_addr(id, src_addr, dst_addr); if (ret) return ret; } if (cma_family(id_priv) != dst_addr->sa_family) return -EINVAL; if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) return -EINVAL; atomic_inc(&id_priv->refcount); memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr)); if (cma_any_addr(dst_addr)) { ret = cma_resolve_loopback(id_priv); } else { if (dst_addr->sa_family == AF_IB) { ret = cma_resolve_ib_addr(id_priv); } else { ret = rdma_resolve_ip(&addr_client, cma_src_addr(id_priv), dst_addr, &id->route.addr.dev_addr, timeout_ms, addr_handler, id_priv); } } if (ret) goto err; return 0; err: cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); cma_deref_id(id_priv); return ret; } EXPORT_SYMBOL(rdma_resolve_addr); int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse) { struct rdma_id_private *id_priv; unsigned long flags; int ret; id_priv = container_of(id, struct rdma_id_private, id); spin_lock_irqsave(&id_priv->lock, flags); if (reuse || id_priv->state == RDMA_CM_IDLE) { id_priv->reuseaddr = reuse; ret = 0; } else { ret = -EINVAL; } spin_unlock_irqrestore(&id_priv->lock, flags); return ret; } EXPORT_SYMBOL(rdma_set_reuseaddr); int rdma_set_afonly(struct rdma_cm_id *id, int afonly) { struct rdma_id_private *id_priv; unsigned long flags; int ret; id_priv = container_of(id, struct rdma_id_private, id); spin_lock_irqsave(&id_priv->lock, flags); if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) { id_priv->options |= (1 << CMA_OPTION_AFONLY); id_priv->afonly = afonly; ret = 0; } else { ret = -EINVAL; } spin_unlock_irqrestore(&id_priv->lock, flags); return ret; } EXPORT_SYMBOL(rdma_set_afonly); static void cma_bind_port(struct rdma_bind_list *bind_list, struct rdma_id_private *id_priv) { struct sockaddr *addr; struct sockaddr_ib *sib; u64 sid, mask; __be16 port; addr = cma_src_addr(id_priv); port = htons(bind_list->port); switch (addr->sa_family) { case AF_INET: ((struct sockaddr_in *) addr)->sin_port = port; break; case AF_INET6: ((struct sockaddr_in6 *) addr)->sin6_port = port; break; case AF_IB: sib = (struct sockaddr_ib *) addr; sid = be64_to_cpu(sib->sib_sid); mask = be64_to_cpu(sib->sib_sid_mask); sib->sib_sid = cpu_to_be64((sid & mask) | (u64) ntohs(port)); sib->sib_sid_mask = cpu_to_be64(~0ULL); break; } id_priv->bind_list = bind_list; hlist_add_head(&id_priv->node, &bind_list->owners); } static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv, unsigned short snum) { struct rdma_bind_list *bind_list; int ret; bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); if (!bind_list) return -ENOMEM; ret = idr_alloc(ps, bind_list, snum, snum + 1, GFP_KERNEL); if (ret < 0) goto err; bind_list->ps = ps; bind_list->port = (unsigned short)ret; cma_bind_port(bind_list, id_priv); return 0; err: kfree(bind_list); return ret == -ENOSPC ? -EADDRNOTAVAIL : ret; } static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv) { static unsigned int last_used_port; int low, high, remaining; unsigned int rover; inet_get_local_port_range(&init_net, &low, &high); remaining = (high - low) + 1; rover = prandom_u32() % remaining + low; retry: if (last_used_port != rover && !idr_find(ps, (unsigned short) rover)) { int ret = cma_alloc_port(ps, id_priv, rover); /* * Remember previously used port number in order to avoid * re-using same port immediately after it is closed. */ if (!ret) last_used_port = rover; if (ret != -EADDRNOTAVAIL) return ret; } if (--remaining) { rover++; if ((rover < low) || (rover > high)) rover = low; goto retry; } return -EADDRNOTAVAIL; } /* * Check that the requested port is available. This is called when trying to * bind to a specific port, or when trying to listen on a bound port. In * the latter case, the provided id_priv may already be on the bind_list, but * we still need to check that it's okay to start listening. */ static int cma_check_port(struct rdma_bind_list *bind_list, struct rdma_id_private *id_priv, uint8_t reuseaddr) { struct rdma_id_private *cur_id; struct sockaddr *addr, *cur_addr; addr = cma_src_addr(id_priv); hlist_for_each_entry(cur_id, &bind_list->owners, node) { if (id_priv == cur_id) continue; if ((cur_id->state != RDMA_CM_LISTEN) && reuseaddr && cur_id->reuseaddr) continue; cur_addr = cma_src_addr(cur_id); if (id_priv->afonly && cur_id->afonly && (addr->sa_family != cur_addr->sa_family)) continue; if (cma_any_addr(addr) || cma_any_addr(cur_addr)) return -EADDRNOTAVAIL; if (!cma_addr_cmp(addr, cur_addr)) return -EADDRINUSE; } return 0; } static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) { struct rdma_bind_list *bind_list; unsigned short snum; int ret; snum = ntohs(cma_port(cma_src_addr(id_priv))); if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) return -EACCES; bind_list = idr_find(ps, snum); if (!bind_list) { ret = cma_alloc_port(ps, id_priv, snum); } else { ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr); if (!ret) cma_bind_port(bind_list, id_priv); } return ret; } static int cma_bind_listen(struct rdma_id_private *id_priv) { struct rdma_bind_list *bind_list = id_priv->bind_list; int ret = 0; mutex_lock(&lock); if (bind_list->owners.first->next) ret = cma_check_port(bind_list, id_priv, 0); mutex_unlock(&lock); return ret; } static struct idr *cma_select_inet_ps(struct rdma_id_private *id_priv) { switch (id_priv->id.ps) { case RDMA_PS_TCP: return &tcp_ps; case RDMA_PS_UDP: return &udp_ps; case RDMA_PS_IPOIB: return &ipoib_ps; case RDMA_PS_IB: return &ib_ps; default: return NULL; } } static struct idr *cma_select_ib_ps(struct rdma_id_private *id_priv) { struct idr *ps = NULL; struct sockaddr_ib *sib; u64 sid_ps, mask, sid; sib = (struct sockaddr_ib *) cma_src_addr(id_priv); mask = be64_to_cpu(sib->sib_sid_mask) & RDMA_IB_IP_PS_MASK; sid = be64_to_cpu(sib->sib_sid) & mask; if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) { sid_ps = RDMA_IB_IP_PS_IB; ps = &ib_ps; } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) && (sid == (RDMA_IB_IP_PS_TCP & mask))) { sid_ps = RDMA_IB_IP_PS_TCP; ps = &tcp_ps; } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) && (sid == (RDMA_IB_IP_PS_UDP & mask))) { sid_ps = RDMA_IB_IP_PS_UDP; ps = &udp_ps; } if (ps) { sib->sib_sid = cpu_to_be64(sid_ps | ntohs(cma_port((struct sockaddr *) sib))); sib->sib_sid_mask = cpu_to_be64(RDMA_IB_IP_PS_MASK | be64_to_cpu(sib->sib_sid_mask)); } return ps; } static int cma_get_port(struct rdma_id_private *id_priv) { struct idr *ps; int ret; if (cma_family(id_priv) != AF_IB) ps = cma_select_inet_ps(id_priv); else ps = cma_select_ib_ps(id_priv); if (!ps) return -EPROTONOSUPPORT; mutex_lock(&lock); if (cma_any_port(cma_src_addr(id_priv))) ret = cma_alloc_any_port(ps, id_priv); else ret = cma_use_port(ps, id_priv); mutex_unlock(&lock); return ret; } static int cma_check_linklocal(struct rdma_dev_addr *dev_addr, struct sockaddr *addr) { #if IS_ENABLED(CONFIG_IPV6) struct sockaddr_in6 *sin6; if (addr->sa_family != AF_INET6) return 0; sin6 = (struct sockaddr_in6 *) addr; if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)) return 0; if (!sin6->sin6_scope_id) return -EINVAL; dev_addr->bound_dev_if = sin6->sin6_scope_id; #endif return 0; } int rdma_listen(struct rdma_cm_id *id, int backlog) { struct rdma_id_private *id_priv; int ret; id_priv = container_of(id, struct rdma_id_private, id); if (id_priv->state == RDMA_CM_IDLE) { id->route.addr.src_addr.ss_family = AF_INET; ret = rdma_bind_addr(id, cma_src_addr(id_priv)); if (ret) return ret; } if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) return -EINVAL; if (id_priv->reuseaddr) { ret = cma_bind_listen(id_priv); if (ret) goto err; } id_priv->backlog = backlog; if (id->device) { switch (rdma_node_get_transport(id->device->node_type)) { case RDMA_TRANSPORT_IB: ret = cma_ib_listen(id_priv); if (ret) goto err; break; case RDMA_TRANSPORT_IWARP: ret = cma_iw_listen(id_priv, backlog); if (ret) goto err; break; default: ret = -ENOSYS; goto err; } } else cma_listen_on_all(id_priv); return 0; err: id_priv->backlog = 0; cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND); return ret; } EXPORT_SYMBOL(rdma_listen); int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) { struct rdma_id_private *id_priv; int ret; if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 && addr->sa_family != AF_IB) return -EAFNOSUPPORT; id_priv = container_of(id, struct rdma_id_private, id); if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND)) return -EINVAL; ret = cma_check_linklocal(&id->route.addr.dev_addr, addr); if (ret) goto err1; memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr)); if (!cma_any_addr(addr)) { ret = cma_translate_addr(addr, &id->route.addr.dev_addr); if (ret) goto err1; ret = cma_acquire_dev(id_priv, NULL); if (ret) goto err1; } if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) { if (addr->sa_family == AF_INET) id_priv->afonly = 1; #if IS_ENABLED(CONFIG_IPV6) else if (addr->sa_family == AF_INET6) id_priv->afonly = init_net.ipv6.sysctl.bindv6only; #endif } ret = cma_get_port(id_priv); if (ret) goto err2; return 0; err2: if (id_priv->cma_dev) cma_release_dev(id_priv); err1: cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE); return ret; } EXPORT_SYMBOL(rdma_bind_addr); static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv) { struct cma_hdr *cma_hdr; cma_hdr = hdr; cma_hdr->cma_version = CMA_VERSION; if (cma_family(id_priv) == AF_INET) { struct sockaddr_in *src4, *dst4; src4 = (struct sockaddr_in *) cma_src_addr(id_priv); dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv); cma_set_ip_ver(cma_hdr, 4); cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; cma_hdr->port = src4->sin_port; } else if (cma_family(id_priv) == AF_INET6) { struct sockaddr_in6 *src6, *dst6; src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv); cma_set_ip_ver(cma_hdr, 6); cma_hdr->src_addr.ip6 = src6->sin6_addr; cma_hdr->dst_addr.ip6 = dst6->sin6_addr; cma_hdr->port = src6->sin6_port; } return 0; } static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) { struct rdma_id_private *id_priv = cm_id->context; struct rdma_cm_event event; struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; int ret = 0; if (cma_disable_callback(id_priv, RDMA_CM_CONNECT)) return 0; memset(&event, 0, sizeof event); switch (ib_event->event) { case IB_CM_SIDR_REQ_ERROR: event.event = RDMA_CM_EVENT_UNREACHABLE; event.status = -ETIMEDOUT; break; case IB_CM_SIDR_REP_RECEIVED: event.param.ud.private_data = ib_event->private_data; event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE; if (rep->status != IB_SIDR_SUCCESS) { event.event = RDMA_CM_EVENT_UNREACHABLE; event.status = ib_event->param.sidr_rep_rcvd.status; break; } ret = cma_set_qkey(id_priv, rep->qkey); if (ret) { event.event = RDMA_CM_EVENT_ADDR_ERROR; event.status = ret; break; } ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num, id_priv->id.route.path_rec, &event.param.ud.ah_attr); event.param.ud.qp_num = rep->qpn; event.param.ud.qkey = rep->qkey; event.event = RDMA_CM_EVENT_ESTABLISHED; event.status = 0; break; default: printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n", ib_event->event); goto out; } ret = id_priv->id.event_handler(&id_priv->id, &event); if (ret) { /* Destroy the CM ID by returning a non-zero value. */ id_priv->cm_id.ib = NULL; cma_exch(id_priv, RDMA_CM_DESTROYING); mutex_unlock(&id_priv->handler_mutex); rdma_destroy_id(&id_priv->id); return ret; } out: mutex_unlock(&id_priv->handler_mutex); return ret; } static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, struct rdma_conn_param *conn_param) { struct ib_cm_sidr_req_param req; struct ib_cm_id *id; void *private_data; int offset, ret; memset(&req, 0, sizeof req); offset = cma_user_data_offset(id_priv); req.private_data_len = offset + conn_param->private_data_len; if (req.private_data_len < conn_param->private_data_len) return -EINVAL; if (req.private_data_len) { private_data = kzalloc(req.private_data_len, GFP_ATOMIC); if (!private_data) return -ENOMEM; } else { private_data = NULL; } if (conn_param->private_data && conn_param->private_data_len) memcpy(private_data + offset, conn_param->private_data, conn_param->private_data_len); if (private_data) { ret = cma_format_hdr(private_data, id_priv); if (ret) goto out; req.private_data = private_data; } id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler, id_priv); if (IS_ERR(id)) { ret = PTR_ERR(id); goto out; } id_priv->cm_id.ib = id; req.path = id_priv->id.route.path_rec; req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); req.max_cm_retries = CMA_MAX_CM_RETRIES; ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req); if (ret) { ib_destroy_cm_id(id_priv->cm_id.ib); id_priv->cm_id.ib = NULL; } out: kfree(private_data); return ret; } static int cma_connect_ib(struct rdma_id_private *id_priv, struct rdma_conn_param *conn_param) { struct ib_cm_req_param req; struct rdma_route *route; void *private_data; struct ib_cm_id *id; int offset, ret; memset(&req, 0, sizeof req); offset = cma_user_data_offset(id_priv); req.private_data_len = offset + conn_param->private_data_len; if (req.private_data_len < conn_param->private_data_len) return -EINVAL; if (req.private_data_len) { private_data = kzalloc(req.private_data_len, GFP_ATOMIC); if (!private_data) return -ENOMEM; } else { private_data = NULL; } if (conn_param->private_data && conn_param->private_data_len) memcpy(private_data + offset, conn_param->private_data, conn_param->private_data_len); id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv); if (IS_ERR(id)) { ret = PTR_ERR(id); goto out; } id_priv->cm_id.ib = id; route = &id_priv->id.route; if (private_data) { ret = cma_format_hdr(private_data, id_priv); if (ret) goto out; req.private_data = private_data; } req.primary_path = &route->path_rec[0]; if (route->num_paths == 2) req.alternate_path = &route->path_rec[1]; req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); req.qp_num = id_priv->qp_num; req.qp_type = id_priv->id.qp_type; req.starting_psn = id_priv->seq_num; req.responder_resources = conn_param->responder_resources; req.initiator_depth = conn_param->initiator_depth; req.flow_control = conn_param->flow_control; req.retry_count = min_t(u8, 7, conn_param->retry_count); req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; req.max_cm_retries = CMA_MAX_CM_RETRIES; req.srq = id_priv->srq ? 1 : 0; ret = ib_send_cm_req(id_priv->cm_id.ib, &req); out: if (ret && !IS_ERR(id)) { ib_destroy_cm_id(id); id_priv->cm_id.ib = NULL; } kfree(private_data); return ret; } static int cma_connect_iw(struct rdma_id_private *id_priv, struct rdma_conn_param *conn_param) { struct iw_cm_id *cm_id; int ret; struct iw_cm_conn_param iw_param; cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv); if (IS_ERR(cm_id)) return PTR_ERR(cm_id); id_priv->cm_id.iw = cm_id; memcpy(&cm_id->local_addr, cma_src_addr(id_priv), rdma_addr_size(cma_src_addr(id_priv))); memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv), rdma_addr_size(cma_dst_addr(id_priv))); ret = cma_modify_qp_rtr(id_priv, conn_param); if (ret) goto out; if (conn_param) { iw_param.ord = conn_param->initiator_depth; iw_param.ird = conn_param->responder_resources; iw_param.private_data = conn_param->private_data; iw_param.private_data_len = conn_param->private_data_len; iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num; } else { memset(&iw_param, 0, sizeof iw_param); iw_param.qpn = id_priv->qp_num; } ret = iw_cm_connect(cm_id, &iw_param); out: if (ret) { iw_destroy_cm_id(cm_id); id_priv->cm_id.iw = NULL; } return ret; } int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) { struct rdma_id_private *id_priv; int ret; id_priv = container_of(id, struct rdma_id_private, id); if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT)) return -EINVAL; if (!id->qp) { id_priv->qp_num = conn_param->qp_num; id_priv->srq = conn_param->srq; } switch (rdma_node_get_transport(id->device->node_type)) { case RDMA_TRANSPORT_IB: if (id->qp_type == IB_QPT_UD) ret = cma_resolve_ib_udp(id_priv, conn_param); else ret = cma_connect_ib(id_priv, conn_param); break; case RDMA_TRANSPORT_IWARP: ret = cma_connect_iw(id_priv, conn_param); break; default: ret = -ENOSYS; break; } if (ret) goto err; return 0; err: cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED); return ret; } EXPORT_SYMBOL(rdma_connect); static int cma_accept_ib(struct rdma_id_private *id_priv, struct rdma_conn_param *conn_param) { struct ib_cm_rep_param rep; int ret; ret = cma_modify_qp_rtr(id_priv, conn_param); if (ret) goto out; ret = cma_modify_qp_rts(id_priv, conn_param); if (ret) goto out; memset(&rep, 0, sizeof rep); rep.qp_num = id_priv->qp_num; rep.starting_psn = id_priv->seq_num; rep.private_data = conn_param->private_data; rep.private_data_len = conn_param->private_data_len; rep.responder_resources = conn_param->responder_resources; rep.initiator_depth = conn_param->initiator_depth; rep.failover_accepted = 0; rep.flow_control = conn_param->flow_control; rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); rep.srq = id_priv->srq ? 1 : 0; ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep); out: return ret; } static int cma_accept_iw(struct rdma_id_private *id_priv, struct rdma_conn_param *conn_param) { struct iw_cm_conn_param iw_param; int ret; ret = cma_modify_qp_rtr(id_priv, conn_param); if (ret) return ret; iw_param.ord = conn_param->initiator_depth; iw_param.ird = conn_param->responder_resources; iw_param.private_data = conn_param->private_data; iw_param.private_data_len = conn_param->private_data_len; if (id_priv->id.qp) { iw_param.qpn = id_priv->qp_num; } else iw_param.qpn = conn_param->qp_num; return iw_cm_accept(id_priv->cm_id.iw, &iw_param); } static int cma_send_sidr_rep(struct rdma_id_private *id_priv, enum ib_cm_sidr_status status, u32 qkey, const void *private_data, int private_data_len) { struct ib_cm_sidr_rep_param rep; int ret; memset(&rep, 0, sizeof rep); rep.status = status; if (status == IB_SIDR_SUCCESS) { ret = cma_set_qkey(id_priv, qkey); if (ret) return ret; rep.qp_num = id_priv->qp_num; rep.qkey = id_priv->qkey; } rep.private_data = private_data; rep.private_data_len = private_data_len; return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep); } int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) { struct rdma_id_private *id_priv; int ret; id_priv = container_of(id, struct rdma_id_private, id); id_priv->owner = task_pid_nr(current); if (!cma_comp(id_priv, RDMA_CM_CONNECT)) return -EINVAL; if (!id->qp && conn_param) { id_priv->qp_num = conn_param->qp_num; id_priv->srq = conn_param->srq; } switch (rdma_node_get_transport(id->device->node_type)) { case RDMA_TRANSPORT_IB: if (id->qp_type == IB_QPT_UD) { if (conn_param) ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, conn_param->qkey, conn_param->private_data, conn_param->private_data_len); else ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 0, NULL, 0); } else { if (conn_param) ret = cma_accept_ib(id_priv, conn_param); else ret = cma_rep_recv(id_priv); } break; case RDMA_TRANSPORT_IWARP: ret = cma_accept_iw(id_priv, conn_param); break; default: ret = -ENOSYS; break; } if (ret) goto reject; return 0; reject: cma_modify_qp_err(id_priv); rdma_reject(id, NULL, 0); return ret; } EXPORT_SYMBOL(rdma_accept); int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event) { struct rdma_id_private *id_priv; int ret; id_priv = container_of(id, struct rdma_id_private, id); if (!id_priv->cm_id.ib) return -EINVAL; switch (id->device->node_type) { case RDMA_NODE_IB_CA: ret = ib_cm_notify(id_priv->cm_id.ib, event); break; default: ret = 0; break; } return ret; } EXPORT_SYMBOL(rdma_notify); int rdma_reject(struct rdma_cm_id *id, const void *private_data, u8 private_data_len) { struct rdma_id_private *id_priv; int ret; id_priv = container_of(id, struct rdma_id_private, id); if (!id_priv->cm_id.ib) return -EINVAL; switch (rdma_node_get_transport(id->device->node_type)) { case RDMA_TRANSPORT_IB: if (id->qp_type == IB_QPT_UD) ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0, private_data, private_data_len); else ret = ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, private_data, private_data_len); break; case RDMA_TRANSPORT_IWARP: ret = iw_cm_reject(id_priv->cm_id.iw, private_data, private_data_len); break; default: ret = -ENOSYS; break; } return ret; } EXPORT_SYMBOL(rdma_reject); int rdma_disconnect(struct rdma_cm_id *id) { struct rdma_id_private *id_priv; int ret; id_priv = container_of(id, struct rdma_id_private, id); if (!id_priv->cm_id.ib) return -EINVAL; switch (rdma_node_get_transport(id->device->node_type)) { case RDMA_TRANSPORT_IB: ret = cma_modify_qp_err(id_priv); if (ret) goto out; /* Initiate or respond to a disconnect. */ if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0); break; case RDMA_TRANSPORT_IWARP: ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); break; default: ret = -EINVAL; break; } out: return ret; } EXPORT_SYMBOL(rdma_disconnect); static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) { struct rdma_id_private *id_priv; struct cma_multicast *mc = multicast->context; struct rdma_cm_event event; int ret; id_priv = mc->id_priv; if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) && cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED)) return 0; if (!status) status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey)); mutex_lock(&id_priv->qp_mutex); if (!status && id_priv->id.qp) status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid, be16_to_cpu(multicast->rec.mlid)); mutex_unlock(&id_priv->qp_mutex); memset(&event, 0, sizeof event); event.status = status; event.param.ud.private_data = mc->context; if (!status) { event.event = RDMA_CM_EVENT_MULTICAST_JOIN; ib_init_ah_from_mcmember(id_priv->id.device, id_priv->id.port_num, &multicast->rec, &event.param.ud.ah_attr); event.param.ud.qp_num = 0xFFFFFF; event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey); } else event.event = RDMA_CM_EVENT_MULTICAST_ERROR; ret = id_priv->id.event_handler(&id_priv->id, &event); if (ret) { cma_exch(id_priv, RDMA_CM_DESTROYING); mutex_unlock(&id_priv->handler_mutex); rdma_destroy_id(&id_priv->id); return 0; } mutex_unlock(&id_priv->handler_mutex); return 0; } static void cma_set_mgid(struct rdma_id_private *id_priv, struct sockaddr *addr, union ib_gid *mgid) { unsigned char mc_map[MAX_ADDR_LEN]; struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; struct sockaddr_in *sin = (struct sockaddr_in *) addr; struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr; if (cma_any_addr(addr)) { memset(mgid, 0, sizeof *mgid); } else if ((addr->sa_family == AF_INET6) && ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) == 0xFF10A01B)) { /* IPv6 address is an SA assigned MGID. */ memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); } else if (addr->sa_family == AF_IB) { memcpy(mgid, &((struct sockaddr_ib *) addr)->sib_addr, sizeof *mgid); } else if ((addr->sa_family == AF_INET6)) { ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map); if (id_priv->id.ps == RDMA_PS_UDP) mc_map[7] = 0x01; /* Use RDMA CM signature */ *mgid = *(union ib_gid *) (mc_map + 4); } else { ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map); if (id_priv->id.ps == RDMA_PS_UDP) mc_map[7] = 0x01; /* Use RDMA CM signature */ *mgid = *(union ib_gid *) (mc_map + 4); } } static int cma_join_ib_multicast(struct rdma_id_private *id_priv, struct cma_multicast *mc) { struct ib_sa_mcmember_rec rec; struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; ib_sa_comp_mask comp_mask; int ret; ib_addr_get_mgid(dev_addr, &rec.mgid); ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num, &rec.mgid, &rec); if (ret) return ret; ret = cma_set_qkey(id_priv, 0); if (ret) return ret; cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid); rec.qkey = cpu_to_be32(id_priv->qkey); rdma_addr_get_sgid(dev_addr, &rec.port_gid); rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); rec.join_state = 1; comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID | IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE | IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL | IB_SA_MCMEMBER_REC_FLOW_LABEL | IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; if (id_priv->id.ps == RDMA_PS_IPOIB) comp_mask |= IB_SA_MCMEMBER_REC_RATE | IB_SA_MCMEMBER_REC_RATE_SELECTOR | IB_SA_MCMEMBER_REC_MTU_SELECTOR | IB_SA_MCMEMBER_REC_MTU | IB_SA_MCMEMBER_REC_HOP_LIMIT; mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device, id_priv->id.port_num, &rec, comp_mask, GFP_KERNEL, cma_ib_mc_handler, mc); return PTR_ERR_OR_ZERO(mc->multicast.ib); } static void iboe_mcast_work_handler(struct work_struct *work) { struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work); struct cma_multicast *mc = mw->mc; struct ib_sa_multicast *m = mc->multicast.ib; mc->multicast.ib->context = mc; cma_ib_mc_handler(0, m); kref_put(&mc->mcref, release_mc); kfree(mw); } static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid) { struct sockaddr_in *sin = (struct sockaddr_in *)addr; struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr; if (cma_any_addr(addr)) { memset(mgid, 0, sizeof *mgid); } else if (addr->sa_family == AF_INET6) { memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); } else { mgid->raw[0] = 0xff; mgid->raw[1] = 0x0e; mgid->raw[2] = 0; mgid->raw[3] = 0; mgid->raw[4] = 0; mgid->raw[5] = 0; mgid->raw[6] = 0; mgid->raw[7] = 0; mgid->raw[8] = 0; mgid->raw[9] = 0; mgid->raw[10] = 0xff; mgid->raw[11] = 0xff; *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr; } } static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, struct cma_multicast *mc) { struct iboe_mcast_work *work; struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; int err; struct sockaddr *addr = (struct sockaddr *)&mc->addr; struct net_device *ndev = NULL; if (cma_zero_addr((struct sockaddr *)&mc->addr)) return -EINVAL; work = kzalloc(sizeof *work, GFP_KERNEL); if (!work) return -ENOMEM; mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL); if (!mc->multicast.ib) { err = -ENOMEM; goto out1; } cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid); mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff); if (id_priv->id.ps == RDMA_PS_UDP) mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); if (dev_addr->bound_dev_if) ndev = dev_get_by_index(&init_net, dev_addr->bound_dev_if); if (!ndev) { err = -ENODEV; goto out2; } mc->multicast.ib->rec.rate = iboe_get_rate(ndev); mc->multicast.ib->rec.hop_limit = 1; mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->mtu); dev_put(ndev); if (!mc->multicast.ib->rec.mtu) { err = -EINVAL; goto out2; } rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, &mc->multicast.ib->rec.port_gid); work->id = id_priv; work->mc = mc; INIT_WORK(&work->work, iboe_mcast_work_handler); kref_get(&mc->mcref); queue_work(cma_wq, &work->work); return 0; out2: kfree(mc->multicast.ib); out1: kfree(work); return err; } int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, void *context) { struct rdma_id_private *id_priv; struct cma_multicast *mc; int ret; id_priv = container_of(id, struct rdma_id_private, id); if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) && !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED)) return -EINVAL; mc = kmalloc(sizeof *mc, GFP_KERNEL); if (!mc) return -ENOMEM; memcpy(&mc->addr, addr, rdma_addr_size(addr)); mc->context = context; mc->id_priv = id_priv; spin_lock(&id_priv->lock); list_add(&mc->list, &id_priv->mc_list); spin_unlock(&id_priv->lock); switch (rdma_node_get_transport(id->device->node_type)) { case RDMA_TRANSPORT_IB: switch (rdma_port_get_link_layer(id->device, id->port_num)) { case IB_LINK_LAYER_INFINIBAND: ret = cma_join_ib_multicast(id_priv, mc); break; case IB_LINK_LAYER_ETHERNET: kref_init(&mc->mcref); ret = cma_iboe_join_multicast(id_priv, mc); break; default: ret = -EINVAL; } break; default: ret = -ENOSYS; break; } if (ret) { spin_lock_irq(&id_priv->lock); list_del(&mc->list); spin_unlock_irq(&id_priv->lock); kfree(mc); } return ret; } EXPORT_SYMBOL(rdma_join_multicast); void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) { struct rdma_id_private *id_priv; struct cma_multicast *mc; id_priv = container_of(id, struct rdma_id_private, id); spin_lock_irq(&id_priv->lock); list_for_each_entry(mc, &id_priv->mc_list, list) { if (!memcmp(&mc->addr, addr, rdma_addr_size(addr))) { list_del(&mc->list); spin_unlock_irq(&id_priv->lock); if (id->qp) ib_detach_mcast(id->qp, &mc->multicast.ib->rec.mgid, be16_to_cpu(mc->multicast.ib->rec.mlid)); if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) { switch (rdma_port_get_link_layer(id->device, id->port_num)) { case IB_LINK_LAYER_INFINIBAND: ib_sa_free_multicast(mc->multicast.ib); kfree(mc); break; case IB_LINK_LAYER_ETHERNET: kref_put(&mc->mcref, release_mc); break; default: break; } } return; } } spin_unlock_irq(&id_priv->lock); } EXPORT_SYMBOL(rdma_leave_multicast); static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv) { struct rdma_dev_addr *dev_addr; struct cma_ndev_work *work; dev_addr = &id_priv->id.route.addr.dev_addr; if ((dev_addr->bound_dev_if == ndev->ifindex) && memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) { printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n", ndev->name, &id_priv->id); work = kzalloc(sizeof *work, GFP_KERNEL); if (!work) return -ENOMEM; INIT_WORK(&work->work, cma_ndev_work_handler); work->id = id_priv; work->event.event = RDMA_CM_EVENT_ADDR_CHANGE; atomic_inc(&id_priv->refcount); queue_work(cma_wq, &work->work); } return 0; } static int cma_netdev_callback(struct notifier_block *self, unsigned long event, void *ptr) { struct net_device *ndev = netdev_notifier_info_to_dev(ptr); struct cma_device *cma_dev; struct rdma_id_private *id_priv; int ret = NOTIFY_DONE; if (dev_net(ndev) != &init_net) return NOTIFY_DONE; if (event != NETDEV_BONDING_FAILOVER) return NOTIFY_DONE; if (!(ndev->flags & IFF_MASTER) || !(ndev->priv_flags & IFF_BONDING)) return NOTIFY_DONE; mutex_lock(&lock); list_for_each_entry(cma_dev, &dev_list, list) list_for_each_entry(id_priv, &cma_dev->id_list, list) { ret = cma_netdev_change(ndev, id_priv); if (ret) goto out; } out: mutex_unlock(&lock); return ret; } static struct notifier_block cma_nb = { .notifier_call = cma_netdev_callback }; static void cma_add_one(struct ib_device *device) { struct cma_device *cma_dev; struct rdma_id_private *id_priv; cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL); if (!cma_dev) return; cma_dev->device = device; init_completion(&cma_dev->comp); atomic_set(&cma_dev->refcount, 1); INIT_LIST_HEAD(&cma_dev->id_list); ib_set_client_data(device, &cma_client, cma_dev); mutex_lock(&lock); list_add_tail(&cma_dev->list, &dev_list); list_for_each_entry(id_priv, &listen_any_list, list) cma_listen_on_dev(id_priv, cma_dev); mutex_unlock(&lock); } static int cma_remove_id_dev(struct rdma_id_private *id_priv) { struct rdma_cm_event event; enum rdma_cm_state state; int ret = 0; /* Record that we want to remove the device */ state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL); if (state == RDMA_CM_DESTROYING) return 0; cma_cancel_operation(id_priv, state); mutex_lock(&id_priv->handler_mutex); /* Check for destruction from another callback. */ if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL)) goto out; memset(&event, 0, sizeof event); event.event = RDMA_CM_EVENT_DEVICE_REMOVAL; ret = id_priv->id.event_handler(&id_priv->id, &event); out: mutex_unlock(&id_priv->handler_mutex); return ret; } static void cma_process_remove(struct cma_device *cma_dev) { struct rdma_id_private *id_priv; int ret; mutex_lock(&lock); while (!list_empty(&cma_dev->id_list)) { id_priv = list_entry(cma_dev->id_list.next, struct rdma_id_private, list); list_del(&id_priv->listen_list); list_del_init(&id_priv->list); atomic_inc(&id_priv->refcount); mutex_unlock(&lock); ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv); cma_deref_id(id_priv); if (ret) rdma_destroy_id(&id_priv->id); mutex_lock(&lock); } mutex_unlock(&lock); cma_deref_dev(cma_dev); wait_for_completion(&cma_dev->comp); } static void cma_remove_one(struct ib_device *device) { struct cma_device *cma_dev; cma_dev = ib_get_client_data(device, &cma_client); if (!cma_dev) return; mutex_lock(&lock); list_del(&cma_dev->list); mutex_unlock(&lock); cma_process_remove(cma_dev); kfree(cma_dev); } static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb) { struct nlmsghdr *nlh; struct rdma_cm_id_stats *id_stats; struct rdma_id_private *id_priv; struct rdma_cm_id *id = NULL; struct cma_device *cma_dev; int i_dev = 0, i_id = 0; /* * We export all of the IDs as a sequence of messages. Each * ID gets its own netlink message. */ mutex_lock(&lock); list_for_each_entry(cma_dev, &dev_list, list) { if (i_dev < cb->args[0]) { i_dev++; continue; } i_id = 0; list_for_each_entry(id_priv, &cma_dev->id_list, list) { if (i_id < cb->args[1]) { i_id++; continue; } id_stats = ibnl_put_msg(skb, &nlh, cb->nlh->nlmsg_seq, sizeof *id_stats, RDMA_NL_RDMA_CM, RDMA_NL_RDMA_CM_ID_STATS); if (!id_stats) goto out; memset(id_stats, 0, sizeof *id_stats); id = &id_priv->id; id_stats->node_type = id->route.addr.dev_addr.dev_type; id_stats->port_num = id->port_num; id_stats->bound_dev_if = id->route.addr.dev_addr.bound_dev_if; if (ibnl_put_attr(skb, nlh, rdma_addr_size(cma_src_addr(id_priv)), cma_src_addr(id_priv), RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) goto out; if (ibnl_put_attr(skb, nlh, rdma_addr_size(cma_src_addr(id_priv)), cma_dst_addr(id_priv), RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) goto out; id_stats->pid = id_priv->owner; id_stats->port_space = id->ps; id_stats->cm_state = id_priv->state; id_stats->qp_num = id_priv->qp_num; id_stats->qp_type = id->qp_type; i_id++; } cb->args[1] = 0; i_dev++; } out: mutex_unlock(&lock); cb->args[0] = i_dev; cb->args[1] = i_id; return skb->len; } static const struct ibnl_client_cbs cma_cb_table[] = { [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats, .module = THIS_MODULE }, }; static int __init cma_init(void) { int ret; cma_wq = create_singlethread_workqueue("rdma_cm"); if (!cma_wq) return -ENOMEM; ib_sa_register_client(&sa_client); rdma_addr_register_client(&addr_client); register_netdevice_notifier(&cma_nb); ret = ib_register_client(&cma_client); if (ret) goto err; if (ibnl_add_client(RDMA_NL_RDMA_CM, RDMA_NL_RDMA_CM_NUM_OPS, cma_cb_table)) printk(KERN_WARNING "RDMA CMA: failed to add netlink callback\n"); return 0; err: unregister_netdevice_notifier(&cma_nb); rdma_addr_unregister_client(&addr_client); ib_sa_unregister_client(&sa_client); destroy_workqueue(cma_wq); return ret; } static void __exit cma_cleanup(void) { ibnl_remove_client(RDMA_NL_RDMA_CM); ib_unregister_client(&cma_client); unregister_netdevice_notifier(&cma_nb); rdma_addr_unregister_client(&addr_client); ib_sa_unregister_client(&sa_client); destroy_workqueue(cma_wq); idr_destroy(&tcp_ps); idr_destroy(&udp_ps); idr_destroy(&ipoib_ps); idr_destroy(&ib_ps); } module_init(cma_init); module_exit(cma_cleanup);
./CrossVul/dataset_final_sorted/CWE-20/c/bad_2117_1
crossvul-cpp_data_bad_3581_2
404: Not Found
./CrossVul/dataset_final_sorted/CWE-20/c/bad_3581_2
crossvul-cpp_data_good_5533_0
/* diskstore.c implements a very simple disk backed key-value store used * by Redis for the "disk" backend. This implementation uses the filesystem * to store key/value pairs. Every file represents a given key. * * The key path is calculated using the SHA1 of the key name. For instance * the key "foo" is stored as a file name called: * * /0b/ee/0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33 * * The couples of characters from the hex output of SHA1 are also used * to locate two two levels of directories to store the file (as most * filesystems are not able to handle too many files in a single dir). * * In the end there are 65536 final directories (256 directories inside * every 256 top level directories), so that with 1 billion of files every * directory will contain in the average 15258 entires, that is ok with * most filesystems implementation. * * Note that since Redis supports multiple databases, the actual key name * is: * * /0b/ee/<dbid>_0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33 * * so for instance if the key is inside DB 0: * * /0b/ee/0_0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33 * * The actaul implementation of this disk store is highly dependant to the * filesystem implementation itself. This implementation may be replaced by * a B+TREE implementation in future implementations. * * Data ok every key is serialized using the same format used for .rdb * serialization. Everything is serialized on every entry: key name, * ttl information in case of keys with an associated expire time, and the * serialized value itself. * * Because the format is the same of the .rdb files it is trivial to create * an .rdb file starting from this format just by mean of scanning the * directories and concatenating entries, with the sole addition of an * .rdb header at the start and the end-of-db opcode at the end. * * ------------------------------------------------------------------------- * * Copyright (c) 2010-2011, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "redis.h" #include <fcntl.h> #include <sys/stat.h> int dsOpen(void) { struct stat sb; int retval; char *path = server.ds_path; if ((retval = stat(path,&sb) == -1) && errno != ENOENT) { redisLog(REDIS_WARNING, "Error opening disk store at %s: %s", path, strerror(errno)); return REDIS_ERR; } /* Directory already in place. Assume everything is ok. */ if (retval == 0 && S_ISDIR(sb.st_mode)) return REDIS_OK; /* File exists but it's not a directory */ if (retval == 0 && !S_ISDIR(sb.st_mode)) { redisLog(REDIS_WARNING,"Disk store at %s is not a directory", path); return REDIS_ERR; } /* New disk store, create the directory structure now, as creating * them in a lazy way is not a good idea, after very few insertions * we'll need most of the 65536 directories anyway. */ if (mkdir(path) == -1) { redisLog(REDIS_WARNING,"Disk store init failed creating dir %s: %s", path, strerror(errno)); return REDIS_ERR; } return REDIS_OK; } int dsClose(void) { return REDIS_OK; } int dsSet(redisDb *db, robj *key, robj *val) { } robj *dsGet(redisDb *db, robj *key) { } int dsExists(redisDb *db, robj *key) { }
./CrossVul/dataset_final_sorted/CWE-20/c/good_5533_0
crossvul-cpp_data_bad_2577_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M M PPPP CCCC % % MM MM P P C % % M M M PPPP C % % M M P C % % M M P CCCC % % % % % % Read/Write Magick Persistant Cache Image Format % % % % Software Design % % Cristy % % March 2000 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/constitute.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/geometry.h" #include "magick/hashmap.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/module.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/quantum-private.h" #include "magick/resource_.h" #include "magick/static.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/utility.h" #include "magick/version-private.h" /* Forward declarations. */ static MagickBooleanType WriteMPCImage(const ImageInfo *,Image *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s M P C % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsMPC() returns MagickTrue if the image format type, identified by the % magick string, is an Magick Persistent Cache image. % % The format of the IsMPC method is: % % MagickBooleanType IsMPC(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsMPC(const unsigned char *magick,const size_t length) { if (length < 14) return(MagickFalse); if (LocaleNCompare((const char *) magick,"id=MagickCache",14) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d C A C H E I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadMPCImage() reads an Magick Persistent Cache image file and returns % it. It allocates the memory necessary for the new Image structure and % returns a pointer to the new image. % % The format of the ReadMPCImage method is: % % Image *ReadMPCImage(const ImageInfo *image_info,ExceptionInfo *exception) % % Decompression code contributed by Kyle Shorter. % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadMPCImage(const ImageInfo *image_info,ExceptionInfo *exception) { char cache_filename[MaxTextExtent], id[MaxTextExtent], keyword[MaxTextExtent], *options; const unsigned char *p; GeometryInfo geometry_info; Image *image; int c; LinkedListInfo *profiles; MagickBooleanType status; MagickOffsetType offset; MagickStatusType flags; register ssize_t i; size_t depth, length; ssize_t count; StringInfo *profile; unsigned int signature; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } (void) CopyMagickString(cache_filename,image->filename,MaxTextExtent); AppendImageFormat("cache",cache_filename); c=ReadBlobByte(image); if (c == EOF) { image=DestroyImage(image); return((Image *) NULL); } *id='\0'; (void) ResetMagickMemory(keyword,0,sizeof(keyword)); offset=0; do { /* Decode image header; header terminates one character beyond a ':'. */ profiles=(LinkedListInfo *) NULL; length=MaxTextExtent; options=AcquireString((char *) NULL); signature=GetMagickSignature((const StringInfo *) NULL); image->depth=8; image->compression=NoCompression; while ((isgraph(c) != MagickFalse) && (c != (int) ':')) { register char *p; if (c == (int) '{') { char *comment; /* Read comment-- any text between { }. */ length=MaxTextExtent; comment=AcquireString((char *) NULL); for (p=comment; comment != (char *) NULL; p++) { c=ReadBlobByte(image); if (c == (int) '\\') c=ReadBlobByte(image); else if ((c == EOF) || (c == (int) '}')) break; if ((size_t) (p-comment+1) >= length) { *p='\0'; length<<=1; comment=(char *) ResizeQuantumMemory(comment,length+ MaxTextExtent,sizeof(*comment)); if (comment == (char *) NULL) break; p=comment+strlen(comment); } *p=(char) c; } if (comment == (char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); *p='\0'; (void) SetImageProperty(image,"comment",comment); comment=DestroyString(comment); c=ReadBlobByte(image); } else if (isalnum(c) != MagickFalse) { /* Get the keyword. */ length=MaxTextExtent; p=keyword; do { if (c == (int) '=') break; if ((size_t) (p-keyword) < (MaxTextExtent-1)) *p++=(char) c; c=ReadBlobByte(image); } while (c != EOF); *p='\0'; p=options; while (isspace((int) ((unsigned char) c)) != 0) c=ReadBlobByte(image); if (c == (int) '=') { /* Get the keyword value. */ c=ReadBlobByte(image); while ((c != (int) '}') && (c != EOF)) { if ((size_t) (p-options+1) >= length) { *p='\0'; length<<=1; options=(char *) ResizeQuantumMemory(options,length+ MaxTextExtent,sizeof(*options)); if (options == (char *) NULL) break; p=options+strlen(options); } *p++=(char) c; c=ReadBlobByte(image); if (c == '\\') { c=ReadBlobByte(image); if (c == (int) '}') { *p++=(char) c; c=ReadBlobByte(image); } } if (*options != '{') if (isspace((int) ((unsigned char) c)) != 0) break; } if (options == (char *) NULL) ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); } *p='\0'; if (*options == '{') (void) CopyMagickString(options,options+1,strlen(options)); /* Assign a value to the specified keyword. */ switch (*keyword) { case 'b': case 'B': { if (LocaleCompare(keyword,"background-color") == 0) { (void) QueryColorDatabase(options,&image->background_color, exception); break; } if (LocaleCompare(keyword,"blue-primary") == 0) { flags=ParseGeometry(options,&geometry_info); image->chromaticity.blue_primary.x=geometry_info.rho; image->chromaticity.blue_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.blue_primary.y= image->chromaticity.blue_primary.x; break; } if (LocaleCompare(keyword,"border-color") == 0) { (void) QueryColorDatabase(options,&image->border_color, exception); break; } (void) SetImageProperty(image,keyword,options); break; } case 'c': case 'C': { if (LocaleCompare(keyword,"class") == 0) { ssize_t storage_class; storage_class=ParseCommandOption(MagickClassOptions, MagickFalse,options); if (storage_class < 0) break; image->storage_class=(ClassType) storage_class; break; } if (LocaleCompare(keyword,"colors") == 0) { image->colors=StringToUnsignedLong(options); break; } if (LocaleCompare(keyword,"colorspace") == 0) { ssize_t colorspace; colorspace=ParseCommandOption(MagickColorspaceOptions, MagickFalse,options); if (colorspace < 0) break; image->colorspace=(ColorspaceType) colorspace; break; } if (LocaleCompare(keyword,"compression") == 0) { ssize_t compression; compression=ParseCommandOption(MagickCompressOptions, MagickFalse,options); if (compression < 0) break; image->compression=(CompressionType) compression; break; } if (LocaleCompare(keyword,"columns") == 0) { image->columns=StringToUnsignedLong(options); break; } (void) SetImageProperty(image,keyword,options); break; } case 'd': case 'D': { if (LocaleCompare(keyword,"delay") == 0) { image->delay=StringToUnsignedLong(options); break; } if (LocaleCompare(keyword,"depth") == 0) { image->depth=StringToUnsignedLong(options); break; } if (LocaleCompare(keyword,"dispose") == 0) { ssize_t dispose; dispose=ParseCommandOption(MagickDisposeOptions,MagickFalse, options); if (dispose < 0) break; image->dispose=(DisposeType) dispose; break; } (void) SetImageProperty(image,keyword,options); break; } case 'e': case 'E': { if (LocaleCompare(keyword,"endian") == 0) { ssize_t endian; endian=ParseCommandOption(MagickEndianOptions,MagickFalse, options); if (endian < 0) break; image->endian=(EndianType) endian; break; } if (LocaleCompare(keyword,"error") == 0) { image->error.mean_error_per_pixel=StringToDouble(options, (char **) NULL); break; } (void) SetImageProperty(image,keyword,options); break; } case 'g': case 'G': { if (LocaleCompare(keyword,"gamma") == 0) { image->gamma=StringToDouble(options,(char **) NULL); break; } if (LocaleCompare(keyword,"green-primary") == 0) { flags=ParseGeometry(options,&geometry_info); image->chromaticity.green_primary.x=geometry_info.rho; image->chromaticity.green_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.green_primary.y= image->chromaticity.green_primary.x; break; } (void) SetImageProperty(image,keyword,options); break; } case 'i': case 'I': { if (LocaleCompare(keyword,"id") == 0) { (void) CopyMagickString(id,options,MaxTextExtent); break; } if (LocaleCompare(keyword,"iterations") == 0) { image->iterations=StringToUnsignedLong(options); break; } (void) SetImageProperty(image,keyword,options); break; } case 'm': case 'M': { if (LocaleCompare(keyword,"magick-signature") == 0) { signature=(unsigned int) StringToUnsignedLong(options); break; } if (LocaleCompare(keyword,"matte") == 0) { ssize_t matte; matte=ParseCommandOption(MagickBooleanOptions,MagickFalse, options); if (matte < 0) break; image->matte=(MagickBooleanType) matte; break; } if (LocaleCompare(keyword,"matte-color") == 0) { (void) QueryColorDatabase(options,&image->matte_color, exception); break; } if (LocaleCompare(keyword,"maximum-error") == 0) { image->error.normalized_maximum_error=StringToDouble( options,(char **) NULL); break; } if (LocaleCompare(keyword,"mean-error") == 0) { image->error.normalized_mean_error=StringToDouble(options, (char **) NULL); break; } if (LocaleCompare(keyword,"montage") == 0) { (void) CloneString(&image->montage,options); break; } (void) SetImageProperty(image,keyword,options); break; } case 'o': case 'O': { if (LocaleCompare(keyword,"opaque") == 0) { ssize_t matte; matte=ParseCommandOption(MagickBooleanOptions,MagickFalse, options); if (matte < 0) break; image->matte=(MagickBooleanType) matte; break; } if (LocaleCompare(keyword,"orientation") == 0) { ssize_t orientation; orientation=ParseCommandOption(MagickOrientationOptions, MagickFalse,options); if (orientation < 0) break; image->orientation=(OrientationType) orientation; break; } (void) SetImageProperty(image,keyword,options); break; } case 'p': case 'P': { if (LocaleCompare(keyword,"page") == 0) { char *geometry; geometry=GetPageGeometry(options); (void) ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); break; } if (LocaleCompare(keyword,"pixel-intensity") == 0) { ssize_t intensity; intensity=ParseCommandOption(MagickPixelIntensityOptions, MagickFalse,options); if (intensity < 0) break; image->intensity=(PixelIntensityMethod) intensity; break; } if ((LocaleNCompare(keyword,"profile:",8) == 0) || (LocaleNCompare(keyword,"profile-",8) == 0)) { if (profiles == (LinkedListInfo *) NULL) profiles=NewLinkedList(0); (void) AppendValueToLinkedList(profiles, AcquireString(keyword+8)); profile=BlobToStringInfo((const void *) NULL,(size_t) StringToLong(options)); if (profile == (StringInfo *) NULL) ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); (void) SetImageProfile(image,keyword+8,profile); profile=DestroyStringInfo(profile); break; } (void) SetImageProperty(image,keyword,options); break; } case 'q': case 'Q': { if (LocaleCompare(keyword,"quality") == 0) { image->quality=StringToUnsignedLong(options); break; } (void) SetImageProperty(image,keyword,options); break; } case 'r': case 'R': { if (LocaleCompare(keyword,"red-primary") == 0) { flags=ParseGeometry(options,&geometry_info); image->chromaticity.red_primary.x=geometry_info.rho; if ((flags & SigmaValue) != 0) image->chromaticity.red_primary.y=geometry_info.sigma; break; } if (LocaleCompare(keyword,"rendering-intent") == 0) { ssize_t rendering_intent; rendering_intent=ParseCommandOption(MagickIntentOptions, MagickFalse,options); if (rendering_intent < 0) break; image->rendering_intent=(RenderingIntent) rendering_intent; break; } if (LocaleCompare(keyword,"resolution") == 0) { flags=ParseGeometry(options,&geometry_info); image->x_resolution=geometry_info.rho; image->y_resolution=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->y_resolution=image->x_resolution; break; } if (LocaleCompare(keyword,"rows") == 0) { image->rows=StringToUnsignedLong(options); break; } (void) SetImageProperty(image,keyword,options); break; } case 's': case 'S': { if (LocaleCompare(keyword,"scene") == 0) { image->scene=StringToUnsignedLong(options); break; } (void) SetImageProperty(image,keyword,options); break; } case 't': case 'T': { if (LocaleCompare(keyword,"ticks-per-second") == 0) { image->ticks_per_second=(ssize_t) StringToLong(options); break; } if (LocaleCompare(keyword,"tile-offset") == 0) { char *geometry; geometry=GetPageGeometry(options); (void) ParseAbsoluteGeometry(geometry,&image->tile_offset); geometry=DestroyString(geometry); } if (LocaleCompare(keyword,"type") == 0) { ssize_t type; type=ParseCommandOption(MagickTypeOptions,MagickFalse, options); if (type < 0) break; image->type=(ImageType) type; break; } (void) SetImageProperty(image,keyword,options); break; } case 'u': case 'U': { if (LocaleCompare(keyword,"units") == 0) { ssize_t units; units=ParseCommandOption(MagickResolutionOptions,MagickFalse, options); if (units < 0) break; image->units=(ResolutionType) units; break; } (void) SetImageProperty(image,keyword,options); break; } case 'w': case 'W': { if (LocaleCompare(keyword,"white-point") == 0) { flags=ParseGeometry(options,&geometry_info); image->chromaticity.white_point.x=geometry_info.rho; image->chromaticity.white_point.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.white_point.y= image->chromaticity.white_point.x; break; } (void) SetImageProperty(image,keyword,options); break; } default: { (void) SetImageProperty(image,keyword,options); break; } } } else c=ReadBlobByte(image); while (isspace((int) ((unsigned char) c)) != 0) c=ReadBlobByte(image); } options=DestroyString(options); (void) ReadBlobByte(image); /* Verify that required image information is defined. */ if ((LocaleCompare(id,"MagickCache") != 0) || (image->storage_class == UndefinedClass) || (image->compression == UndefinedCompression) || (image->columns == 0) || (image->rows == 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (signature != GetMagickSignature((const StringInfo *) NULL)) ThrowReaderException(CacheError,"IncompatibleAPI"); if (image->montage != (char *) NULL) { register char *p; /* Image directory. */ length=MaxTextExtent; image->directory=AcquireString((char *) NULL); p=image->directory; do { *p='\0'; if ((strlen(image->directory)+MaxTextExtent) >= length) { /* Allocate more memory for the image directory. */ length<<=1; image->directory=(char *) ResizeQuantumMemory(image->directory, length+MaxTextExtent,sizeof(*image->directory)); if (image->directory == (char *) NULL) ThrowReaderException(CorruptImageError,"UnableToReadImageData"); p=image->directory+strlen(image->directory); } c=ReadBlobByte(image); *p++=(char) c; } while (c != (int) '\0'); } if (profiles != (LinkedListInfo *) NULL) { const char *name; const StringInfo *profile; register unsigned char *p; /* Read image profiles. */ ResetLinkedListIterator(profiles); name=(const char *) GetNextValueInLinkedList(profiles); while (name != (const char *) NULL) { profile=GetImageProfile(image,name); if (profile != (StringInfo *) NULL) { p=GetStringInfoDatum(profile); (void) ReadBlob(image,GetStringInfoLength(profile),p); } name=(const char *) GetNextValueInLinkedList(profiles); } profiles=DestroyLinkedList(profiles,RelinquishMagickMemory); } depth=GetImageQuantumDepth(image,MagickFalse); if (image->storage_class == PseudoClass) { size_t packet_size; unsigned char *colormap; /* Create image colormap. */ packet_size=(size_t) (3UL*depth/8UL); if ((packet_size*image->colors) > GetBlobSize(image)) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); image->colormap=(PixelPacket *) AcquireQuantumMemory(image->colors+1, sizeof(*image->colormap)); if (image->colormap == (PixelPacket *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (image->colors != 0) { /* Read image colormap from file. */ colormap=(unsigned char *) AcquireQuantumMemory(image->colors, packet_size*sizeof(*colormap)); if (colormap == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); count=ReadBlob(image,packet_size*image->colors,colormap); if (count != (ssize_t) (packet_size*image->colors)) { colormap=(unsigned char *) RelinquishMagickMemory(colormap); ThrowReaderException(CorruptImageError, "InsufficientImageDataInFile"); } p=colormap; switch (depth) { default: colormap=(unsigned char *) RelinquishMagickMemory(colormap); ThrowReaderException(CorruptImageError, "ImageDepthNotSupported"); case 8: { unsigned char pixel; for (i=0; i < (ssize_t) image->colors; i++) { p=PushCharPixel(p,&pixel); image->colormap[i].red=ScaleCharToQuantum(pixel); p=PushCharPixel(p,&pixel); image->colormap[i].green=ScaleCharToQuantum(pixel); p=PushCharPixel(p,&pixel); image->colormap[i].blue=ScaleCharToQuantum(pixel); } break; } case 16: { unsigned short pixel; for (i=0; i < (ssize_t) image->colors; i++) { p=PushShortPixel(MSBEndian,p,&pixel); image->colormap[i].red=ScaleShortToQuantum(pixel); p=PushShortPixel(MSBEndian,p,&pixel); image->colormap[i].green=ScaleShortToQuantum(pixel); p=PushShortPixel(MSBEndian,p,&pixel); image->colormap[i].blue=ScaleShortToQuantum(pixel); } break; } case 32: { unsigned int pixel; for (i=0; i < (ssize_t) image->colors; i++) { p=PushLongPixel(MSBEndian,p,&pixel); image->colormap[i].red=ScaleLongToQuantum(pixel); p=PushLongPixel(MSBEndian,p,&pixel); image->colormap[i].green=ScaleLongToQuantum(pixel); p=PushLongPixel(MSBEndian,p,&pixel); image->colormap[i].blue=ScaleLongToQuantum(pixel); } break; } } colormap=(unsigned char *) RelinquishMagickMemory(colormap); } } if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); break; } if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0)) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; if ((AcquireMagickResource(WidthResource,image->columns) == MagickFalse) || (AcquireMagickResource(HeightResource,image->rows) == MagickFalse)) ThrowReaderException(ImageError,"WidthOrHeightExceedsLimit"); /* Attach persistent pixel cache. */ status=PersistPixelCache(image,cache_filename,MagickTrue,&offset,exception); if (status == MagickFalse) ThrowReaderException(CacheError,"UnableToPersistPixelCache"); /* Proceed to next image. */ do { c=ReadBlobByte(image); } while ((isgraph(c) == MagickFalse) && (c != EOF)); if (c != EOF) { /* Allocate next image structure. */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) { image=DestroyImageList(image); return((Image *) NULL); } image=SyncNextImageInList(image); status=SetImageProgress(image,LoadImagesTag,TellBlob(image), GetBlobSize(image)); if (status == MagickFalse) break; } } while (c != EOF); (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r M P C I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterMPCImage() adds properties for the Cache image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterMPCImage method is: % % size_t RegisterMPCImage(void) % */ ModuleExport size_t RegisterMPCImage(void) { MagickInfo *entry; entry=SetMagickInfo("CACHE"); entry->description=ConstantString("Magick Persistent Cache image format"); entry->module=ConstantString("MPC"); entry->stealth=MagickTrue; (void) RegisterMagickInfo(entry); entry=SetMagickInfo("MPC"); entry->decoder=(DecodeImageHandler *) ReadMPCImage; entry->encoder=(EncodeImageHandler *) WriteMPCImage; entry->magick=(IsImageFormatHandler *) IsMPC; entry->description=ConstantString("Magick Persistent Cache image format"); entry->module=ConstantString("MPC"); (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r M P C I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterMPCImage() removes format registrations made by the % MPC module from the list of supported formats. % % The format of the UnregisterMPCImage method is: % % UnregisterMPCImage(void) % */ ModuleExport void UnregisterMPCImage(void) { (void) UnregisterMagickInfo("CACHE"); (void) UnregisterMagickInfo("MPC"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e M P C I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteMPCImage() writes an Magick Persistent Cache image to a file. % % The format of the WriteMPCImage method is: % % MagickBooleanType WriteMPCImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % */ static MagickBooleanType WriteMPCImage(const ImageInfo *image_info,Image *image) { char buffer[MaxTextExtent], cache_filename[MaxTextExtent]; const char *property, *value; MagickBooleanType status; MagickOffsetType offset, scene; register ssize_t i; size_t depth, one; /* Open persistent cache. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception); if (status == MagickFalse) return(status); (void) CopyMagickString(cache_filename,image->filename,MaxTextExtent); AppendImageFormat("cache",cache_filename); scene=0; offset=0; one=1; do { /* Write persistent cache meta-information. */ depth=GetImageQuantumDepth(image,MagickTrue); if ((image->storage_class == PseudoClass) && (image->colors > (one << depth))) image->storage_class=DirectClass; (void) WriteBlobString(image,"id=MagickCache\n"); (void) FormatLocaleString(buffer,MaxTextExtent,"magick-signature=%u\n", GetMagickSignature((const StringInfo *) NULL)); (void) WriteBlobString(image,buffer); (void) FormatLocaleString(buffer,MaxTextExtent, "class=%s colors=%.20g matte=%s\n",CommandOptionToMnemonic( MagickClassOptions,image->storage_class),(double) image->colors, CommandOptionToMnemonic(MagickBooleanOptions,(ssize_t) image->matte)); (void) WriteBlobString(image,buffer); (void) FormatLocaleString(buffer,MaxTextExtent, "columns=%.20g rows=%.20g depth=%.20g\n",(double) image->columns, (double) image->rows,(double) image->depth); (void) WriteBlobString(image,buffer); if (image->type != UndefinedType) { (void) FormatLocaleString(buffer,MaxTextExtent,"type=%s\n", CommandOptionToMnemonic(MagickTypeOptions,image->type)); (void) WriteBlobString(image,buffer); } if (image->colorspace != UndefinedColorspace) { (void) FormatLocaleString(buffer,MaxTextExtent,"colorspace=%s\n", CommandOptionToMnemonic(MagickColorspaceOptions,image->colorspace)); (void) WriteBlobString(image,buffer); } if (image->intensity != UndefinedPixelIntensityMethod) { (void) FormatLocaleString(buffer,MaxTextExtent,"pixel-intensity=%s\n", CommandOptionToMnemonic(MagickPixelIntensityOptions, image->intensity)); (void) WriteBlobString(image,buffer); } if (image->endian != UndefinedEndian) { (void) FormatLocaleString(buffer,MaxTextExtent,"endian=%s\n", CommandOptionToMnemonic(MagickEndianOptions,image->endian)); (void) WriteBlobString(image,buffer); } if (image->compression != UndefinedCompression) { (void) FormatLocaleString(buffer,MaxTextExtent, "compression=%s quality=%.20g\n",CommandOptionToMnemonic( MagickCompressOptions,image->compression),(double) image->quality); (void) WriteBlobString(image,buffer); } if (image->units != UndefinedResolution) { (void) FormatLocaleString(buffer,MaxTextExtent,"units=%s\n", CommandOptionToMnemonic(MagickResolutionOptions,image->units)); (void) WriteBlobString(image,buffer); } if ((image->x_resolution != 0) || (image->y_resolution != 0)) { (void) FormatLocaleString(buffer,MaxTextExtent, "resolution=%gx%g\n",image->x_resolution,image->y_resolution); (void) WriteBlobString(image,buffer); } if ((image->page.width != 0) || (image->page.height != 0)) { (void) FormatLocaleString(buffer,MaxTextExtent, "page=%.20gx%.20g%+.20g%+.20g\n",(double) image->page.width,(double) image->page.height,(double) image->page.x,(double) image->page.y); (void) WriteBlobString(image,buffer); } else if ((image->page.x != 0) || (image->page.y != 0)) { (void) FormatLocaleString(buffer,MaxTextExtent,"page=%+ld%+ld\n", (long) image->page.x,(long) image->page.y); (void) WriteBlobString(image,buffer); } if ((image->tile_offset.x != 0) || (image->tile_offset.y != 0)) { (void) FormatLocaleString(buffer,MaxTextExtent,"tile-offset=%+ld%+ld\n", (long) image->tile_offset.x,(long) image->tile_offset.y); (void) WriteBlobString(image,buffer); } if ((GetNextImageInList(image) != (Image *) NULL) || (GetPreviousImageInList(image) != (Image *) NULL)) { if (image->scene == 0) (void) FormatLocaleString(buffer,MaxTextExtent, "iterations=%.20g delay=%.20g ticks-per-second=%.20g\n",(double) image->iterations,(double) image->delay,(double) image->ticks_per_second); else (void) FormatLocaleString(buffer,MaxTextExtent,"scene=%.20g " "iterations=%.20g delay=%.20g ticks-per-second=%.20g\n", (double) image->scene,(double) image->iterations,(double) image->delay,(double) image->ticks_per_second); (void) WriteBlobString(image,buffer); } else { if (image->scene != 0) { (void) FormatLocaleString(buffer,MaxTextExtent,"scene=%.20g\n", (double) image->scene); (void) WriteBlobString(image,buffer); } if (image->iterations != 0) { (void) FormatLocaleString(buffer,MaxTextExtent,"iterations=%.20g\n", (double) image->iterations); (void) WriteBlobString(image,buffer); } if (image->delay != 0) { (void) FormatLocaleString(buffer,MaxTextExtent,"delay=%.20g\n", (double) image->delay); (void) WriteBlobString(image,buffer); } if (image->ticks_per_second != UndefinedTicksPerSecond) { (void) FormatLocaleString(buffer,MaxTextExtent, "ticks-per-second=%.20g\n",(double) image->ticks_per_second); (void) WriteBlobString(image,buffer); } } if (image->gravity != UndefinedGravity) { (void) FormatLocaleString(buffer,MaxTextExtent,"gravity=%s\n", CommandOptionToMnemonic(MagickGravityOptions,image->gravity)); (void) WriteBlobString(image,buffer); } if (image->dispose != UndefinedDispose) { (void) FormatLocaleString(buffer,MaxTextExtent,"dispose=%s\n", CommandOptionToMnemonic(MagickDisposeOptions,image->dispose)); (void) WriteBlobString(image,buffer); } if (image->rendering_intent != UndefinedIntent) { (void) FormatLocaleString(buffer,MaxTextExtent, "rendering-intent=%s\n",CommandOptionToMnemonic(MagickIntentOptions, image->rendering_intent)); (void) WriteBlobString(image,buffer); } if (image->gamma != 0.0) { (void) FormatLocaleString(buffer,MaxTextExtent,"gamma=%g\n", image->gamma); (void) WriteBlobString(image,buffer); } if (image->chromaticity.white_point.x != 0.0) { /* Note chomaticity points. */ (void) FormatLocaleString(buffer,MaxTextExtent,"red-primary=" "%g,%g green-primary=%g,%g blue-primary=%g,%g\n", image->chromaticity.red_primary.x,image->chromaticity.red_primary.y, image->chromaticity.green_primary.x, image->chromaticity.green_primary.y, image->chromaticity.blue_primary.x, image->chromaticity.blue_primary.y); (void) WriteBlobString(image,buffer); (void) FormatLocaleString(buffer,MaxTextExtent, "white-point=%g,%g\n",image->chromaticity.white_point.x, image->chromaticity.white_point.y); (void) WriteBlobString(image,buffer); } if (image->orientation != UndefinedOrientation) { (void) FormatLocaleString(buffer,MaxTextExtent, "orientation=%s\n",CommandOptionToMnemonic(MagickOrientationOptions, image->orientation)); (void) WriteBlobString(image,buffer); } if (image->profiles != (void *) NULL) { const char *name; const StringInfo *profile; /* Generic profile. */ ResetImageProfileIterator(image); for (name=GetNextImageProfile(image); name != (const char *) NULL; ) { profile=GetImageProfile(image,name); if (profile != (StringInfo *) NULL) { (void) FormatLocaleString(buffer,MaxTextExtent, "profile:%s=%.20g\n",name,(double) GetStringInfoLength(profile)); (void) WriteBlobString(image,buffer); } name=GetNextImageProfile(image); } } if (image->montage != (char *) NULL) { (void) FormatLocaleString(buffer,MaxTextExtent,"montage=%s\n", image->montage); (void) WriteBlobString(image,buffer); } ResetImagePropertyIterator(image); property=GetNextImageProperty(image); while (property != (const char *) NULL) { (void) FormatLocaleString(buffer,MaxTextExtent,"%s=",property); (void) WriteBlobString(image,buffer); value=GetImageProperty(image,property); if (value != (const char *) NULL) { size_t length; length=strlen(value); for (i=0; i < (ssize_t) length; i++) if (isspace((int) ((unsigned char) value[i])) != 0) break; if ((i == (ssize_t) length) && (i != 0)) (void) WriteBlob(image,length,(const unsigned char *) value); else { (void) WriteBlobByte(image,'{'); if (strchr(value,'}') == (char *) NULL) (void) WriteBlob(image,length,(const unsigned char *) value); else for (i=0; i < (ssize_t) length; i++) { if (value[i] == (int) '}') (void) WriteBlobByte(image,'\\'); (void) WriteBlobByte(image,value[i]); } (void) WriteBlobByte(image,'}'); } } (void) WriteBlobByte(image,'\n'); property=GetNextImageProperty(image); } (void) WriteBlobString(image,"\f\n:\032"); if (image->montage != (char *) NULL) { /* Write montage tile directory. */ if (image->directory != (char *) NULL) (void) WriteBlobString(image,image->directory); (void) WriteBlobByte(image,'\0'); } if (image->profiles != 0) { const char *name; const StringInfo *profile; /* Write image profiles. */ ResetImageProfileIterator(image); name=GetNextImageProfile(image); while (name != (const char *) NULL) { profile=GetImageProfile(image,name); (void) WriteBlob(image,GetStringInfoLength(profile), GetStringInfoDatum(profile)); name=GetNextImageProfile(image); } } if (image->storage_class == PseudoClass) { size_t packet_size; unsigned char *colormap, *q; /* Allocate colormap. */ packet_size=(size_t) (3UL*depth/8UL); colormap=(unsigned char *) AcquireQuantumMemory(image->colors, packet_size*sizeof(*colormap)); if (colormap == (unsigned char *) NULL) return(MagickFalse); /* Write colormap to file. */ q=colormap; for (i=0; i < (ssize_t) image->colors; i++) { switch (depth) { default: ThrowWriterException(CorruptImageError,"ImageDepthNotSupported"); case 32: { unsigned int pixel; pixel=ScaleQuantumToLong(image->colormap[i].red); q=PopLongPixel(MSBEndian,pixel,q); pixel=ScaleQuantumToLong(image->colormap[i].green); q=PopLongPixel(MSBEndian,pixel,q); pixel=ScaleQuantumToLong(image->colormap[i].blue); q=PopLongPixel(MSBEndian,pixel,q); break; } case 16: { unsigned short pixel; pixel=ScaleQuantumToShort(image->colormap[i].red); q=PopShortPixel(MSBEndian,pixel,q); pixel=ScaleQuantumToShort(image->colormap[i].green); q=PopShortPixel(MSBEndian,pixel,q); pixel=ScaleQuantumToShort(image->colormap[i].blue); q=PopShortPixel(MSBEndian,pixel,q); break; } case 8: { unsigned char pixel; pixel=(unsigned char) ScaleQuantumToChar(image->colormap[i].red); q=PopCharPixel(pixel,q); pixel=(unsigned char) ScaleQuantumToChar( image->colormap[i].green); q=PopCharPixel(pixel,q); pixel=(unsigned char) ScaleQuantumToChar(image->colormap[i].blue); q=PopCharPixel(pixel,q); break; } } } (void) WriteBlob(image,packet_size*image->colors,colormap); colormap=(unsigned char *) RelinquishMagickMemory(colormap); } /* Initialize persistent pixel cache. */ status=PersistPixelCache(image,cache_filename,MagickFalse,&offset, &image->exception); if (status == MagickFalse) ThrowWriterException(CacheError,"UnableToPersistPixelCache"); if (GetNextImageInList(image) == (Image *) NULL) break; image=SyncNextImageInList(image); if (image->progress_monitor != (MagickProgressMonitor) NULL) { status=image->progress_monitor(SaveImagesTag,scene, GetImageListLength(image),image->client_data); if (status == MagickFalse) break; } scene++; } while (image_info->adjoin != MagickFalse); (void) CloseBlob(image); return(status); }
./CrossVul/dataset_final_sorted/CWE-20/c/bad_2577_0
crossvul-cpp_data_bad_5526_1
/* -*- C -*- * $Id$ */ #include <ruby.h> #include "dl.h" VALUE rb_cDLHandle; void dlhandle_free(struct dl_handle *dlhandle) { if( dlhandle->ptr && dlhandle->open && dlhandle->enable_close ){ dlclose(dlhandle->ptr); } } VALUE rb_dlhandle_close(VALUE self) { struct dl_handle *dlhandle; Data_Get_Struct(self, struct dl_handle, dlhandle); dlhandle->open = 0; return INT2NUM(dlclose(dlhandle->ptr)); } VALUE rb_dlhandle_s_allocate(VALUE klass) { VALUE obj; struct dl_handle *dlhandle; obj = Data_Make_Struct(rb_cDLHandle, struct dl_handle, 0, dlhandle_free, dlhandle); dlhandle->ptr = 0; dlhandle->open = 0; dlhandle->enable_close = 0; return obj; } VALUE rb_dlhandle_initialize(int argc, VALUE argv[], VALUE self) { void *ptr; struct dl_handle *dlhandle; VALUE lib, flag; char *clib; int cflag; const char *err; switch( rb_scan_args(argc, argv, "02", &lib, &flag) ){ case 0: clib = NULL; cflag = RTLD_LAZY | RTLD_GLOBAL; break; case 1: clib = NIL_P(lib) ? NULL : StringValuePtr(lib); cflag = RTLD_LAZY | RTLD_GLOBAL; break; case 2: clib = NIL_P(lib) ? NULL : StringValuePtr(lib); cflag = NUM2INT(flag); break; default: rb_bug("rb_dlhandle_new"); } ptr = dlopen(clib, cflag); #if defined(HAVE_DLERROR) if( !ptr && (err = dlerror()) ){ rb_raise(rb_eDLError, "%s", err); } #else if( !ptr ){ err = dlerror(); rb_raise(rb_eDLError, "%s", err); } #endif Data_Get_Struct(self, struct dl_handle, dlhandle); if( dlhandle->ptr && dlhandle->open && dlhandle->enable_close ){ dlclose(dlhandle->ptr); } dlhandle->ptr = ptr; dlhandle->open = 1; dlhandle->enable_close = 0; if( rb_block_given_p() ){ rb_ensure(rb_yield, self, rb_dlhandle_close, self); } return Qnil; } VALUE rb_dlhandle_enable_close(VALUE self) { struct dl_handle *dlhandle; Data_Get_Struct(self, struct dl_handle, dlhandle); dlhandle->enable_close = 1; return Qnil; } VALUE rb_dlhandle_disable_close(VALUE self) { struct dl_handle *dlhandle; Data_Get_Struct(self, struct dl_handle, dlhandle); dlhandle->enable_close = 0; return Qnil; } VALUE rb_dlhandle_to_i(VALUE self) { struct dl_handle *dlhandle; Data_Get_Struct(self, struct dl_handle, dlhandle); return PTR2NUM(dlhandle); } VALUE rb_dlhandle_sym(VALUE self, VALUE sym) { void (*func)(); struct dl_handle *dlhandle; void *handle; const char *name; const char *err; int i; #if defined(HAVE_DLERROR) # define CHECK_DLERROR if( err = dlerror() ){ func = 0; } #else # define CHECK_DLERROR #endif rb_secure(2); name = StringValuePtr(sym); Data_Get_Struct(self, struct dl_handle, dlhandle); if( ! dlhandle->open ){ rb_raise(rb_eDLError, "closed handle"); } handle = dlhandle->ptr; func = dlsym(handle, name); CHECK_DLERROR; #if defined(FUNC_STDCALL) if( !func ){ int len = strlen(name); char *name_n; #if defined(__CYGWIN__) || defined(_WIN32) || defined(__MINGW32__) { char *name_a = (char*)xmalloc(len+2); strcpy(name_a, name); name_n = name_a; name_a[len] = 'A'; name_a[len+1] = '\0'; func = dlsym(handle, name_a); CHECK_DLERROR; if( func ) goto found; name_n = xrealloc(name_a, len+6); } #else name_n = (char*)xmalloc(len+6); #endif memcpy(name_n, name, len); name_n[len++] = '@'; for( i = 0; i < 256; i += 4 ){ sprintf(name_n + len, "%d", i); func = dlsym(handle, name_n); CHECK_DLERROR; if( func ) break; } if( func ) goto found; name_n[len-1] = 'A'; name_n[len++] = '@'; for( i = 0; i < 256; i += 4 ){ sprintf(name_n + len, "%d", i); func = dlsym(handle, name_n); CHECK_DLERROR; if( func ) break; } found: xfree(name_n); } #endif if( !func ){ rb_raise(rb_eDLError, "unknown symbol \"%s\"", name); } return PTR2NUM(func); } void Init_dlhandle() { rb_cDLHandle = rb_define_class_under(rb_mDL, "Handle", rb_cObject); rb_define_alloc_func(rb_cDLHandle, rb_dlhandle_s_allocate); rb_define_method(rb_cDLHandle, "initialize", rb_dlhandle_initialize, -1); rb_define_method(rb_cDLHandle, "to_i", rb_dlhandle_to_i, 0); rb_define_method(rb_cDLHandle, "close", rb_dlhandle_close, 0); rb_define_method(rb_cDLHandle, "sym", rb_dlhandle_sym, 1); rb_define_method(rb_cDLHandle, "[]", rb_dlhandle_sym, 1); rb_define_method(rb_cDLHandle, "disable_close", rb_dlhandle_disable_close, 0); rb_define_method(rb_cDLHandle, "enable_close", rb_dlhandle_enable_close, 0); }
./CrossVul/dataset_final_sorted/CWE-20/c/bad_5526_1
crossvul-cpp_data_bad_5586_0
#include <linux/mutex.h> #include <linux/socket.h> #include <linux/skbuff.h> #include <net/netlink.h> #include <net/net_namespace.h> #include <linux/module.h> #include <net/sock.h> #include <linux/inet_diag.h> #include <linux/sock_diag.h> static const struct sock_diag_handler *sock_diag_handlers[AF_MAX]; static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh); static DEFINE_MUTEX(sock_diag_table_mutex); int sock_diag_check_cookie(void *sk, __u32 *cookie) { if ((cookie[0] != INET_DIAG_NOCOOKIE || cookie[1] != INET_DIAG_NOCOOKIE) && ((u32)(unsigned long)sk != cookie[0] || (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1])) return -ESTALE; else return 0; } EXPORT_SYMBOL_GPL(sock_diag_check_cookie); void sock_diag_save_cookie(void *sk, __u32 *cookie) { cookie[0] = (u32)(unsigned long)sk; cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1); } EXPORT_SYMBOL_GPL(sock_diag_save_cookie); int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype) { u32 mem[SK_MEMINFO_VARS]; mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk); mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf; mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk); mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf; mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc; mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued; mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc); mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len; return nla_put(skb, attrtype, sizeof(mem), &mem); } EXPORT_SYMBOL_GPL(sock_diag_put_meminfo); void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)) { mutex_lock(&sock_diag_table_mutex); inet_rcv_compat = fn; mutex_unlock(&sock_diag_table_mutex); } EXPORT_SYMBOL_GPL(sock_diag_register_inet_compat); void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)) { mutex_lock(&sock_diag_table_mutex); inet_rcv_compat = NULL; mutex_unlock(&sock_diag_table_mutex); } EXPORT_SYMBOL_GPL(sock_diag_unregister_inet_compat); int sock_diag_register(const struct sock_diag_handler *hndl) { int err = 0; if (hndl->family >= AF_MAX) return -EINVAL; mutex_lock(&sock_diag_table_mutex); if (sock_diag_handlers[hndl->family]) err = -EBUSY; else sock_diag_handlers[hndl->family] = hndl; mutex_unlock(&sock_diag_table_mutex); return err; } EXPORT_SYMBOL_GPL(sock_diag_register); void sock_diag_unregister(const struct sock_diag_handler *hnld) { int family = hnld->family; if (family >= AF_MAX) return; mutex_lock(&sock_diag_table_mutex); BUG_ON(sock_diag_handlers[family] != hnld); sock_diag_handlers[family] = NULL; mutex_unlock(&sock_diag_table_mutex); } EXPORT_SYMBOL_GPL(sock_diag_unregister); static const inline struct sock_diag_handler *sock_diag_lock_handler(int family) { if (sock_diag_handlers[family] == NULL) request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, NETLINK_SOCK_DIAG, family); mutex_lock(&sock_diag_table_mutex); return sock_diag_handlers[family]; } static inline void sock_diag_unlock_handler(const struct sock_diag_handler *h) { mutex_unlock(&sock_diag_table_mutex); } static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) { int err; struct sock_diag_req *req = nlmsg_data(nlh); const struct sock_diag_handler *hndl; if (nlmsg_len(nlh) < sizeof(*req)) return -EINVAL; hndl = sock_diag_lock_handler(req->sdiag_family); if (hndl == NULL) err = -ENOENT; else err = hndl->dump(skb, nlh); sock_diag_unlock_handler(hndl); return err; } static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) { int ret; switch (nlh->nlmsg_type) { case TCPDIAG_GETSOCK: case DCCPDIAG_GETSOCK: if (inet_rcv_compat == NULL) request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, NETLINK_SOCK_DIAG, AF_INET); mutex_lock(&sock_diag_table_mutex); if (inet_rcv_compat != NULL) ret = inet_rcv_compat(skb, nlh); else ret = -EOPNOTSUPP; mutex_unlock(&sock_diag_table_mutex); return ret; case SOCK_DIAG_BY_FAMILY: return __sock_diag_rcv_msg(skb, nlh); default: return -EINVAL; } } static DEFINE_MUTEX(sock_diag_mutex); static void sock_diag_rcv(struct sk_buff *skb) { mutex_lock(&sock_diag_mutex); netlink_rcv_skb(skb, &sock_diag_rcv_msg); mutex_unlock(&sock_diag_mutex); } static int __net_init diag_net_init(struct net *net) { struct netlink_kernel_cfg cfg = { .input = sock_diag_rcv, }; net->diag_nlsk = netlink_kernel_create(net, NETLINK_SOCK_DIAG, &cfg); return net->diag_nlsk == NULL ? -ENOMEM : 0; } static void __net_exit diag_net_exit(struct net *net) { netlink_kernel_release(net->diag_nlsk); net->diag_nlsk = NULL; } static struct pernet_operations diag_net_ops = { .init = diag_net_init, .exit = diag_net_exit, }; static int __init sock_diag_init(void) { return register_pernet_subsys(&diag_net_ops); } static void __exit sock_diag_exit(void) { unregister_pernet_subsys(&diag_net_ops); } module_init(sock_diag_init); module_exit(sock_diag_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_SOCK_DIAG);
./CrossVul/dataset_final_sorted/CWE-20/c/bad_5586_0
crossvul-cpp_data_bad_3070_1
/* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include <stdio.h> #include <limits.h> #include <errno.h> #define USE_SOCKETS #include "../ssl_locl.h" #include <openssl/evp.h> #include <openssl/buffer.h> #include <openssl/rand.h> #include "record_locl.h" #if defined(OPENSSL_SMALL_FOOTPRINT) || \ !( defined(AES_ASM) && ( \ defined(__x86_64) || defined(__x86_64__) || \ defined(_M_AMD64) || defined(_M_X64) ) \ ) # undef EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK # define EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK 0 #endif void RECORD_LAYER_init(RECORD_LAYER *rl, SSL *s) { rl->s = s; RECORD_LAYER_set_first_record(&s->rlayer); SSL3_RECORD_clear(rl->rrec, SSL_MAX_PIPELINES); } void RECORD_LAYER_clear(RECORD_LAYER *rl) { rl->rstate = SSL_ST_READ_HEADER; /* * Do I need to clear read_ahead? As far as I can tell read_ahead did not * previously get reset by SSL_clear...so I'll keep it that way..but is * that right? */ rl->packet = NULL; rl->packet_length = 0; rl->wnum = 0; memset(rl->alert_fragment, 0, sizeof(rl->alert_fragment)); rl->alert_fragment_len = 0; memset(rl->handshake_fragment, 0, sizeof(rl->handshake_fragment)); rl->handshake_fragment_len = 0; rl->wpend_tot = 0; rl->wpend_type = 0; rl->wpend_ret = 0; rl->wpend_buf = NULL; SSL3_BUFFER_clear(&rl->rbuf); ssl3_release_write_buffer(rl->s); rl->numrpipes = 0; SSL3_RECORD_clear(rl->rrec, SSL_MAX_PIPELINES); RECORD_LAYER_reset_read_sequence(rl); RECORD_LAYER_reset_write_sequence(rl); if (rl->d) DTLS_RECORD_LAYER_clear(rl); } void RECORD_LAYER_release(RECORD_LAYER *rl) { if (SSL3_BUFFER_is_initialised(&rl->rbuf)) ssl3_release_read_buffer(rl->s); if (rl->numwpipes > 0) ssl3_release_write_buffer(rl->s); SSL3_RECORD_release(rl->rrec, SSL_MAX_PIPELINES); } int RECORD_LAYER_read_pending(const RECORD_LAYER *rl) { return SSL3_BUFFER_get_left(&rl->rbuf) != 0; } int RECORD_LAYER_write_pending(const RECORD_LAYER *rl) { return (rl->numwpipes > 0) && SSL3_BUFFER_get_left(&rl->wbuf[rl->numwpipes - 1]) != 0; } int RECORD_LAYER_set_data(RECORD_LAYER *rl, const unsigned char *buf, int len) { rl->packet_length = len; if (len != 0) { rl->rstate = SSL_ST_READ_HEADER; if (!SSL3_BUFFER_is_initialised(&rl->rbuf)) if (!ssl3_setup_read_buffer(rl->s)) return 0; } rl->packet = SSL3_BUFFER_get_buf(&rl->rbuf); SSL3_BUFFER_set_data(&rl->rbuf, buf, len); return 1; } void RECORD_LAYER_reset_read_sequence(RECORD_LAYER *rl) { memset(rl->read_sequence, 0, sizeof(rl->read_sequence)); } void RECORD_LAYER_reset_write_sequence(RECORD_LAYER *rl) { memset(rl->write_sequence, 0, sizeof(rl->write_sequence)); } int ssl3_pending(const SSL *s) { unsigned int i; int num = 0; if (s->rlayer.rstate == SSL_ST_READ_BODY) return 0; for (i = 0; i < RECORD_LAYER_get_numrpipes(&s->rlayer); i++) { if (SSL3_RECORD_get_type(&s->rlayer.rrec[i]) != SSL3_RT_APPLICATION_DATA) return 0; num += SSL3_RECORD_get_length(&s->rlayer.rrec[i]); } return num; } void SSL_CTX_set_default_read_buffer_len(SSL_CTX *ctx, size_t len) { ctx->default_read_buf_len = len; } void SSL_set_default_read_buffer_len(SSL *s, size_t len) { SSL3_BUFFER_set_default_len(RECORD_LAYER_get_rbuf(&s->rlayer), len); } const char *SSL_rstate_string_long(const SSL *s) { switch (s->rlayer.rstate) { case SSL_ST_READ_HEADER: return "read header"; case SSL_ST_READ_BODY: return "read body"; case SSL_ST_READ_DONE: return "read done"; default: return "unknown"; } } const char *SSL_rstate_string(const SSL *s) { switch (s->rlayer.rstate) { case SSL_ST_READ_HEADER: return "RH"; case SSL_ST_READ_BODY: return "RB"; case SSL_ST_READ_DONE: return "RD"; default: return "unknown"; } } /* * Return values are as per SSL_read() */ int ssl3_read_n(SSL *s, int n, int max, int extend, int clearold) { /* * If extend == 0, obtain new n-byte packet; if extend == 1, increase * packet by another n bytes. The packet will be in the sub-array of * s->s3->rbuf.buf specified by s->packet and s->packet_length. (If * s->rlayer.read_ahead is set, 'max' bytes may be stored in rbuf [plus * s->packet_length bytes if extend == 1].) * if clearold == 1, move the packet to the start of the buffer; if * clearold == 0 then leave any old packets where they were */ int i, len, left; size_t align = 0; unsigned char *pkt; SSL3_BUFFER *rb; if (n <= 0) return n; rb = &s->rlayer.rbuf; if (rb->buf == NULL) if (!ssl3_setup_read_buffer(s)) return -1; left = rb->left; #if defined(SSL3_ALIGN_PAYLOAD) && SSL3_ALIGN_PAYLOAD!=0 align = (size_t)rb->buf + SSL3_RT_HEADER_LENGTH; align = SSL3_ALIGN_PAYLOAD - 1 - ((align - 1) % SSL3_ALIGN_PAYLOAD); #endif if (!extend) { /* start with empty packet ... */ if (left == 0) rb->offset = align; else if (align != 0 && left >= SSL3_RT_HEADER_LENGTH) { /* * check if next packet length is large enough to justify payload * alignment... */ pkt = rb->buf + rb->offset; if (pkt[0] == SSL3_RT_APPLICATION_DATA && (pkt[3] << 8 | pkt[4]) >= 128) { /* * Note that even if packet is corrupted and its length field * is insane, we can only be led to wrong decision about * whether memmove will occur or not. Header values has no * effect on memmove arguments and therefore no buffer * overrun can be triggered. */ memmove(rb->buf + align, pkt, left); rb->offset = align; } } s->rlayer.packet = rb->buf + rb->offset; s->rlayer.packet_length = 0; /* ... now we can act as if 'extend' was set */ } len = s->rlayer.packet_length; pkt = rb->buf + align; /* * Move any available bytes to front of buffer: 'len' bytes already * pointed to by 'packet', 'left' extra ones at the end */ if (s->rlayer.packet != pkt && clearold == 1) { memmove(pkt, s->rlayer.packet, len + left); s->rlayer.packet = pkt; rb->offset = len + align; } /* * For DTLS/UDP reads should not span multiple packets because the read * operation returns the whole packet at once (as long as it fits into * the buffer). */ if (SSL_IS_DTLS(s)) { if (left == 0 && extend) return 0; if (left > 0 && n > left) n = left; } /* if there is enough in the buffer from a previous read, take some */ if (left >= n) { s->rlayer.packet_length += n; rb->left = left - n; rb->offset += n; return (n); } /* else we need to read more data */ if (n > (int)(rb->len - rb->offset)) { /* does not happen */ SSLerr(SSL_F_SSL3_READ_N, ERR_R_INTERNAL_ERROR); return -1; } /* We always act like read_ahead is set for DTLS */ if (!s->rlayer.read_ahead && !SSL_IS_DTLS(s)) /* ignore max parameter */ max = n; else { if (max < n) max = n; if (max > (int)(rb->len - rb->offset)) max = rb->len - rb->offset; } while (left < n) { /* * Now we have len+left bytes at the front of s->s3->rbuf.buf and * need to read in more until we have len+n (up to len+max if * possible) */ clear_sys_error(); if (s->rbio != NULL) { s->rwstate = SSL_READING; i = BIO_read(s->rbio, pkt + len + left, max - left); } else { SSLerr(SSL_F_SSL3_READ_N, SSL_R_READ_BIO_NOT_SET); i = -1; } if (i <= 0) { rb->left = left; if (s->mode & SSL_MODE_RELEASE_BUFFERS && !SSL_IS_DTLS(s)) if (len + left == 0) ssl3_release_read_buffer(s); return i; } left += i; /* * reads should *never* span multiple packets for DTLS because the * underlying transport protocol is message oriented as opposed to * byte oriented as in the TLS case. */ if (SSL_IS_DTLS(s)) { if (n > left) n = left; /* makes the while condition false */ } } /* done reading, now the book-keeping */ rb->offset += n; rb->left = left - n; s->rlayer.packet_length += n; s->rwstate = SSL_NOTHING; return (n); } /* * Call this to write data in records of type 'type' It will return <= 0 if * not all data has been sent or non-blocking IO. */ int ssl3_write_bytes(SSL *s, int type, const void *buf_, int len) { const unsigned char *buf = buf_; int tot; unsigned int n, split_send_fragment, maxpipes; #if !defined(OPENSSL_NO_MULTIBLOCK) && EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK unsigned int max_send_fragment, nw; unsigned int u_len = (unsigned int)len; #endif SSL3_BUFFER *wb = &s->rlayer.wbuf[0]; int i; if (len < 0) { SSLerr(SSL_F_SSL3_WRITE_BYTES, SSL_R_SSL_NEGATIVE_LENGTH); return -1; } s->rwstate = SSL_NOTHING; tot = s->rlayer.wnum; /* * ensure that if we end up with a smaller value of data to write out * than the the original len from a write which didn't complete for * non-blocking I/O and also somehow ended up avoiding the check for * this in ssl3_write_pending/SSL_R_BAD_WRITE_RETRY as it must never be * possible to end up with (len-tot) as a large number that will then * promptly send beyond the end of the users buffer ... so we trap and * report the error in a way the user will notice */ if ((unsigned int)len < s->rlayer.wnum) { SSLerr(SSL_F_SSL3_WRITE_BYTES, SSL_R_BAD_LENGTH); return -1; } s->rlayer.wnum = 0; if (SSL_in_init(s) && !ossl_statem_get_in_handshake(s)) { i = s->handshake_func(s); if (i < 0) return (i); if (i == 0) { SSLerr(SSL_F_SSL3_WRITE_BYTES, SSL_R_SSL_HANDSHAKE_FAILURE); return -1; } } /* * first check if there is a SSL3_BUFFER still being written out. This * will happen with non blocking IO */ if (wb->left != 0) { i = ssl3_write_pending(s, type, &buf[tot], s->rlayer.wpend_tot); if (i <= 0) { /* XXX should we ssl3_release_write_buffer if i<0? */ s->rlayer.wnum = tot; return i; } tot += i; /* this might be last fragment */ } #if !defined(OPENSSL_NO_MULTIBLOCK) && EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK /* * Depending on platform multi-block can deliver several *times* * better performance. Downside is that it has to allocate * jumbo buffer to accommodate up to 8 records, but the * compromise is considered worthy. */ if (type == SSL3_RT_APPLICATION_DATA && u_len >= 4 * (max_send_fragment = s->max_send_fragment) && s->compress == NULL && s->msg_callback == NULL && !SSL_USE_ETM(s) && SSL_USE_EXPLICIT_IV(s) && EVP_CIPHER_flags(EVP_CIPHER_CTX_cipher(s->enc_write_ctx)) & EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK) { unsigned char aad[13]; EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM mb_param; int packlen; /* minimize address aliasing conflicts */ if ((max_send_fragment & 0xfff) == 0) max_send_fragment -= 512; if (tot == 0 || wb->buf == NULL) { /* allocate jumbo buffer */ ssl3_release_write_buffer(s); packlen = EVP_CIPHER_CTX_ctrl(s->enc_write_ctx, EVP_CTRL_TLS1_1_MULTIBLOCK_MAX_BUFSIZE, max_send_fragment, NULL); if (u_len >= 8 * max_send_fragment) packlen *= 8; else packlen *= 4; if (!ssl3_setup_write_buffer(s, 1, packlen)) { SSLerr(SSL_F_SSL3_WRITE_BYTES, ERR_R_MALLOC_FAILURE); return -1; } } else if (tot == len) { /* done? */ /* free jumbo buffer */ ssl3_release_write_buffer(s); return tot; } n = (len - tot); for (;;) { if (n < 4 * max_send_fragment) { /* free jumbo buffer */ ssl3_release_write_buffer(s); break; } if (s->s3->alert_dispatch) { i = s->method->ssl_dispatch_alert(s); if (i <= 0) { s->rlayer.wnum = tot; return i; } } if (n >= 8 * max_send_fragment) nw = max_send_fragment * (mb_param.interleave = 8); else nw = max_send_fragment * (mb_param.interleave = 4); memcpy(aad, s->rlayer.write_sequence, 8); aad[8] = type; aad[9] = (unsigned char)(s->version >> 8); aad[10] = (unsigned char)(s->version); aad[11] = 0; aad[12] = 0; mb_param.out = NULL; mb_param.inp = aad; mb_param.len = nw; packlen = EVP_CIPHER_CTX_ctrl(s->enc_write_ctx, EVP_CTRL_TLS1_1_MULTIBLOCK_AAD, sizeof(mb_param), &mb_param); if (packlen <= 0 || packlen > (int)wb->len) { /* never happens */ /* free jumbo buffer */ ssl3_release_write_buffer(s); break; } mb_param.out = wb->buf; mb_param.inp = &buf[tot]; mb_param.len = nw; if (EVP_CIPHER_CTX_ctrl(s->enc_write_ctx, EVP_CTRL_TLS1_1_MULTIBLOCK_ENCRYPT, sizeof(mb_param), &mb_param) <= 0) return -1; s->rlayer.write_sequence[7] += mb_param.interleave; if (s->rlayer.write_sequence[7] < mb_param.interleave) { int j = 6; while (j >= 0 && (++s->rlayer.write_sequence[j--]) == 0) ; } wb->offset = 0; wb->left = packlen; s->rlayer.wpend_tot = nw; s->rlayer.wpend_buf = &buf[tot]; s->rlayer.wpend_type = type; s->rlayer.wpend_ret = nw; i = ssl3_write_pending(s, type, &buf[tot], nw); if (i <= 0) { if (i < 0 && (!s->wbio || !BIO_should_retry(s->wbio))) { /* free jumbo buffer */ ssl3_release_write_buffer(s); } s->rlayer.wnum = tot; return i; } if (i == (int)n) { /* free jumbo buffer */ ssl3_release_write_buffer(s); return tot + i; } n -= i; tot += i; } } else #endif if (tot == len) { /* done? */ if (s->mode & SSL_MODE_RELEASE_BUFFERS && !SSL_IS_DTLS(s)) ssl3_release_write_buffer(s); return tot; } n = (len - tot); split_send_fragment = s->split_send_fragment; /* * If max_pipelines is 0 then this means "undefined" and we default to * 1 pipeline. Similarly if the cipher does not support pipelined * processing then we also only use 1 pipeline, or if we're not using * explicit IVs */ maxpipes = s->max_pipelines; if (maxpipes > SSL_MAX_PIPELINES) { /* * We should have prevented this when we set max_pipelines so we * shouldn't get here */ SSLerr(SSL_F_SSL3_WRITE_BYTES, ERR_R_INTERNAL_ERROR); return -1; } if (maxpipes == 0 || s->enc_write_ctx == NULL || !(EVP_CIPHER_flags(EVP_CIPHER_CTX_cipher(s->enc_write_ctx)) & EVP_CIPH_FLAG_PIPELINE) || !SSL_USE_EXPLICIT_IV(s)) maxpipes = 1; if (s->max_send_fragment == 0 || split_send_fragment > s->max_send_fragment || split_send_fragment == 0) { /* * We should have prevented this when we set the split and max send * fragments so we shouldn't get here */ SSLerr(SSL_F_SSL3_WRITE_BYTES, ERR_R_INTERNAL_ERROR); return -1; } for (;;) { unsigned int pipelens[SSL_MAX_PIPELINES], tmppipelen, remain; unsigned int numpipes, j; if (n == 0) numpipes = 1; else numpipes = ((n - 1) / split_send_fragment) + 1; if (numpipes > maxpipes) numpipes = maxpipes; if (n / numpipes >= s->max_send_fragment) { /* * We have enough data to completely fill all available * pipelines */ for (j = 0; j < numpipes; j++) { pipelens[j] = s->max_send_fragment; } } else { /* We can partially fill all available pipelines */ tmppipelen = n / numpipes; remain = n % numpipes; for (j = 0; j < numpipes; j++) { pipelens[j] = tmppipelen; if (j < remain) pipelens[j]++; } } i = do_ssl3_write(s, type, &(buf[tot]), pipelens, numpipes, 0); if (i <= 0) { /* XXX should we ssl3_release_write_buffer if i<0? */ s->rlayer.wnum = tot; return i; } if ((i == (int)n) || (type == SSL3_RT_APPLICATION_DATA && (s->mode & SSL_MODE_ENABLE_PARTIAL_WRITE))) { /* * next chunk of data should get another prepended empty fragment * in ciphersuites with known-IV weakness: */ s->s3->empty_fragment_done = 0; if ((i == (int)n) && s->mode & SSL_MODE_RELEASE_BUFFERS && !SSL_IS_DTLS(s)) ssl3_release_write_buffer(s); return tot + i; } n -= i; tot += i; } } int do_ssl3_write(SSL *s, int type, const unsigned char *buf, unsigned int *pipelens, unsigned int numpipes, int create_empty_fragment) { unsigned char *outbuf[SSL_MAX_PIPELINES], *plen[SSL_MAX_PIPELINES]; SSL3_RECORD wr[SSL_MAX_PIPELINES]; int i, mac_size, clear = 0; int prefix_len = 0; int eivlen; size_t align = 0; SSL3_BUFFER *wb; SSL_SESSION *sess; unsigned int totlen = 0; unsigned int j; for (j = 0; j < numpipes; j++) totlen += pipelens[j]; /* * first check if there is a SSL3_BUFFER still being written out. This * will happen with non blocking IO */ if (RECORD_LAYER_write_pending(&s->rlayer)) return (ssl3_write_pending(s, type, buf, totlen)); /* If we have an alert to send, lets send it */ if (s->s3->alert_dispatch) { i = s->method->ssl_dispatch_alert(s); if (i <= 0) return (i); /* if it went, fall through and send more stuff */ } if (s->rlayer.numwpipes < numpipes) if (!ssl3_setup_write_buffer(s, numpipes, 0)) return -1; if (totlen == 0 && !create_empty_fragment) return 0; sess = s->session; if ((sess == NULL) || (s->enc_write_ctx == NULL) || (EVP_MD_CTX_md(s->write_hash) == NULL)) { clear = s->enc_write_ctx ? 0 : 1; /* must be AEAD cipher */ mac_size = 0; } else { mac_size = EVP_MD_CTX_size(s->write_hash); if (mac_size < 0) goto err; } /* * 'create_empty_fragment' is true only when this function calls itself */ if (!clear && !create_empty_fragment && !s->s3->empty_fragment_done) { /* * countermeasure against known-IV weakness in CBC ciphersuites (see * http://www.openssl.org/~bodo/tls-cbc.txt) */ if (s->s3->need_empty_fragments && type == SSL3_RT_APPLICATION_DATA) { /* * recursive function call with 'create_empty_fragment' set; this * prepares and buffers the data for an empty fragment (these * 'prefix_len' bytes are sent out later together with the actual * payload) */ unsigned int tmppipelen = 0; prefix_len = do_ssl3_write(s, type, buf, &tmppipelen, 1, 1); if (prefix_len <= 0) goto err; if (prefix_len > (SSL3_RT_HEADER_LENGTH + SSL3_RT_SEND_MAX_ENCRYPTED_OVERHEAD)) { /* insufficient space */ SSLerr(SSL_F_DO_SSL3_WRITE, ERR_R_INTERNAL_ERROR); goto err; } } s->s3->empty_fragment_done = 1; } if (create_empty_fragment) { wb = &s->rlayer.wbuf[0]; #if defined(SSL3_ALIGN_PAYLOAD) && SSL3_ALIGN_PAYLOAD!=0 /* * extra fragment would be couple of cipher blocks, which would be * multiple of SSL3_ALIGN_PAYLOAD, so if we want to align the real * payload, then we can just pretend we simply have two headers. */ align = (size_t)SSL3_BUFFER_get_buf(wb) + 2 * SSL3_RT_HEADER_LENGTH; align = SSL3_ALIGN_PAYLOAD - 1 - ((align - 1) % SSL3_ALIGN_PAYLOAD); #endif outbuf[0] = SSL3_BUFFER_get_buf(wb) + align; SSL3_BUFFER_set_offset(wb, align); } else if (prefix_len) { wb = &s->rlayer.wbuf[0]; outbuf[0] = SSL3_BUFFER_get_buf(wb) + SSL3_BUFFER_get_offset(wb) + prefix_len; } else { for (j = 0; j < numpipes; j++) { wb = &s->rlayer.wbuf[j]; #if defined(SSL3_ALIGN_PAYLOAD) && SSL3_ALIGN_PAYLOAD!=0 align = (size_t)SSL3_BUFFER_get_buf(wb) + SSL3_RT_HEADER_LENGTH; align = SSL3_ALIGN_PAYLOAD - 1 - ((align - 1) % SSL3_ALIGN_PAYLOAD); #endif outbuf[j] = SSL3_BUFFER_get_buf(wb) + align; SSL3_BUFFER_set_offset(wb, align); } } /* Explicit IV length, block ciphers appropriate version flag */ if (s->enc_write_ctx && SSL_USE_EXPLICIT_IV(s)) { int mode = EVP_CIPHER_CTX_mode(s->enc_write_ctx); if (mode == EVP_CIPH_CBC_MODE) { eivlen = EVP_CIPHER_CTX_iv_length(s->enc_write_ctx); if (eivlen <= 1) eivlen = 0; } /* Need explicit part of IV for GCM mode */ else if (mode == EVP_CIPH_GCM_MODE) eivlen = EVP_GCM_TLS_EXPLICIT_IV_LEN; else if (mode == EVP_CIPH_CCM_MODE) eivlen = EVP_CCM_TLS_EXPLICIT_IV_LEN; else eivlen = 0; } else eivlen = 0; totlen = 0; /* Clear our SSL3_RECORD structures */ memset(wr, 0, sizeof wr); for (j = 0; j < numpipes; j++) { /* write the header */ *(outbuf[j]++) = type & 0xff; SSL3_RECORD_set_type(&wr[j], type); *(outbuf[j]++) = (s->version >> 8); /* * Some servers hang if initial client hello is larger than 256 bytes * and record version number > TLS 1.0 */ if (SSL_get_state(s) == TLS_ST_CW_CLNT_HELLO && !s->renegotiate && TLS1_get_version(s) > TLS1_VERSION) *(outbuf[j]++) = 0x1; else *(outbuf[j]++) = s->version & 0xff; /* field where we are to write out packet length */ plen[j] = outbuf[j]; outbuf[j] += 2; /* lets setup the record stuff. */ SSL3_RECORD_set_data(&wr[j], outbuf[j] + eivlen); SSL3_RECORD_set_length(&wr[j], (int)pipelens[j]); SSL3_RECORD_set_input(&wr[j], (unsigned char *)&buf[totlen]); totlen += pipelens[j]; /* * we now 'read' from wr->input, wr->length bytes into wr->data */ /* first we compress */ if (s->compress != NULL) { if (!ssl3_do_compress(s, &wr[j])) { SSLerr(SSL_F_DO_SSL3_WRITE, SSL_R_COMPRESSION_FAILURE); goto err; } } else { memcpy(wr[j].data, wr[j].input, wr[j].length); SSL3_RECORD_reset_input(&wr[j]); } /* * we should still have the output to wr->data and the input from * wr->input. Length should be wr->length. wr->data still points in the * wb->buf */ if (!SSL_USE_ETM(s) && mac_size != 0) { if (s->method->ssl3_enc->mac(s, &wr[j], &(outbuf[j][wr[j].length + eivlen]), 1) < 0) goto err; SSL3_RECORD_add_length(&wr[j], mac_size); } SSL3_RECORD_set_data(&wr[j], outbuf[j]); SSL3_RECORD_reset_input(&wr[j]); if (eivlen) { /* * if (RAND_pseudo_bytes(p, eivlen) <= 0) goto err; */ SSL3_RECORD_add_length(&wr[j], eivlen); } } if (s->method->ssl3_enc->enc(s, wr, numpipes, 1) < 1) goto err; for (j = 0; j < numpipes; j++) { if (SSL_USE_ETM(s) && mac_size != 0) { if (s->method->ssl3_enc->mac(s, &wr[j], outbuf[j] + wr[j].length, 1) < 0) goto err; SSL3_RECORD_add_length(&wr[j], mac_size); } /* record length after mac and block padding */ s2n(SSL3_RECORD_get_length(&wr[j]), plen[j]); if (s->msg_callback) s->msg_callback(1, 0, SSL3_RT_HEADER, plen[j] - 5, 5, s, s->msg_callback_arg); /* * we should now have wr->data pointing to the encrypted data, which is * wr->length long */ SSL3_RECORD_set_type(&wr[j], type); /* not needed but helps for * debugging */ SSL3_RECORD_add_length(&wr[j], SSL3_RT_HEADER_LENGTH); if (create_empty_fragment) { /* * we are in a recursive call; just return the length, don't write * out anything here */ if (j > 0) { /* We should never be pipelining an empty fragment!! */ SSLerr(SSL_F_DO_SSL3_WRITE, ERR_R_INTERNAL_ERROR); goto err; } return SSL3_RECORD_get_length(wr); } /* now let's set up wb */ SSL3_BUFFER_set_left(&s->rlayer.wbuf[j], prefix_len + SSL3_RECORD_get_length(&wr[j])); } /* * memorize arguments so that ssl3_write_pending can detect bad write * retries later */ s->rlayer.wpend_tot = totlen; s->rlayer.wpend_buf = buf; s->rlayer.wpend_type = type; s->rlayer.wpend_ret = totlen; /* we now just need to write the buffer */ return ssl3_write_pending(s, type, buf, totlen); err: return -1; } /* if s->s3->wbuf.left != 0, we need to call this * * Return values are as per SSL_write() */ int ssl3_write_pending(SSL *s, int type, const unsigned char *buf, unsigned int len) { int i; SSL3_BUFFER *wb = s->rlayer.wbuf; unsigned int currbuf = 0; /* XXXX */ if ((s->rlayer.wpend_tot > (int)len) || ((s->rlayer.wpend_buf != buf) && !(s->mode & SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER)) || (s->rlayer.wpend_type != type)) { SSLerr(SSL_F_SSL3_WRITE_PENDING, SSL_R_BAD_WRITE_RETRY); return (-1); } for (;;) { /* Loop until we find a buffer we haven't written out yet */ if (SSL3_BUFFER_get_left(&wb[currbuf]) == 0 && currbuf < s->rlayer.numwpipes - 1) { currbuf++; continue; } clear_sys_error(); if (s->wbio != NULL) { s->rwstate = SSL_WRITING; i = BIO_write(s->wbio, (char *) &(SSL3_BUFFER_get_buf(&wb[currbuf]) [SSL3_BUFFER_get_offset(&wb[currbuf])]), (unsigned int)SSL3_BUFFER_get_left(&wb[currbuf])); } else { SSLerr(SSL_F_SSL3_WRITE_PENDING, SSL_R_BIO_NOT_SET); i = -1; } if (i == SSL3_BUFFER_get_left(&wb[currbuf])) { SSL3_BUFFER_set_left(&wb[currbuf], 0); SSL3_BUFFER_add_offset(&wb[currbuf], i); if (currbuf + 1 < s->rlayer.numwpipes) continue; s->rwstate = SSL_NOTHING; return (s->rlayer.wpend_ret); } else if (i <= 0) { if (SSL_IS_DTLS(s)) { /* * For DTLS, just drop it. That's kind of the whole point in * using a datagram service */ SSL3_BUFFER_set_left(&wb[currbuf], 0); } return i; } SSL3_BUFFER_add_offset(&wb[currbuf], i); SSL3_BUFFER_add_left(&wb[currbuf], -i); } } /*- * Return up to 'len' payload bytes received in 'type' records. * 'type' is one of the following: * * - SSL3_RT_HANDSHAKE (when ssl3_get_message calls us) * - SSL3_RT_APPLICATION_DATA (when ssl3_read calls us) * - 0 (during a shutdown, no data has to be returned) * * If we don't have stored data to work from, read a SSL/TLS record first * (possibly multiple records if we still don't have anything to return). * * This function must handle any surprises the peer may have for us, such as * Alert records (e.g. close_notify) or renegotiation requests. ChangeCipherSpec * messages are treated as if they were handshake messages *if* the |recd_type| * argument is non NULL. * Also if record payloads contain fragments too small to process, we store * them until there is enough for the respective protocol (the record protocol * may use arbitrary fragmentation and even interleaving): * Change cipher spec protocol * just 1 byte needed, no need for keeping anything stored * Alert protocol * 2 bytes needed (AlertLevel, AlertDescription) * Handshake protocol * 4 bytes needed (HandshakeType, uint24 length) -- we just have * to detect unexpected Client Hello and Hello Request messages * here, anything else is handled by higher layers * Application data protocol * none of our business */ int ssl3_read_bytes(SSL *s, int type, int *recvd_type, unsigned char *buf, int len, int peek) { int al, i, j, ret; unsigned int n, curr_rec, num_recs, read_bytes; SSL3_RECORD *rr; SSL3_BUFFER *rbuf; void (*cb) (const SSL *ssl, int type2, int val) = NULL; rbuf = &s->rlayer.rbuf; if (!SSL3_BUFFER_is_initialised(rbuf)) { /* Not initialized yet */ if (!ssl3_setup_read_buffer(s)) return (-1); } if ((type && (type != SSL3_RT_APPLICATION_DATA) && (type != SSL3_RT_HANDSHAKE)) || (peek && (type != SSL3_RT_APPLICATION_DATA))) { SSLerr(SSL_F_SSL3_READ_BYTES, ERR_R_INTERNAL_ERROR); return -1; } if ((type == SSL3_RT_HANDSHAKE) && (s->rlayer.handshake_fragment_len > 0)) /* (partially) satisfy request from storage */ { unsigned char *src = s->rlayer.handshake_fragment; unsigned char *dst = buf; unsigned int k; /* peek == 0 */ n = 0; while ((len > 0) && (s->rlayer.handshake_fragment_len > 0)) { *dst++ = *src++; len--; s->rlayer.handshake_fragment_len--; n++; } /* move any remaining fragment bytes: */ for (k = 0; k < s->rlayer.handshake_fragment_len; k++) s->rlayer.handshake_fragment[k] = *src++; if (recvd_type != NULL) *recvd_type = SSL3_RT_HANDSHAKE; return n; } /* * Now s->rlayer.handshake_fragment_len == 0 if type == SSL3_RT_HANDSHAKE. */ if (!ossl_statem_get_in_handshake(s) && SSL_in_init(s)) { /* type == SSL3_RT_APPLICATION_DATA */ i = s->handshake_func(s); if (i < 0) return (i); if (i == 0) { SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_SSL_HANDSHAKE_FAILURE); return (-1); } } start: s->rwstate = SSL_NOTHING; /*- * For each record 'i' up to |num_recs] * rr[i].type - is the type of record * rr[i].data, - data * rr[i].off, - offset into 'data' for next read * rr[i].length, - number of bytes. */ rr = s->rlayer.rrec; num_recs = RECORD_LAYER_get_numrpipes(&s->rlayer); do { /* get new records if necessary */ if (num_recs == 0) { ret = ssl3_get_record(s); if (ret <= 0) return (ret); num_recs = RECORD_LAYER_get_numrpipes(&s->rlayer); if (num_recs == 0) { /* Shouldn't happen */ al = SSL_AD_INTERNAL_ERROR; SSLerr(SSL_F_SSL3_READ_BYTES, ERR_R_INTERNAL_ERROR); goto f_err; } } /* Skip over any records we have already read */ for (curr_rec = 0; curr_rec < num_recs && SSL3_RECORD_is_read(&rr[curr_rec]); curr_rec++) ; if (curr_rec == num_recs) { RECORD_LAYER_set_numrpipes(&s->rlayer, 0); num_recs = 0; curr_rec = 0; } } while (num_recs == 0); rr = &rr[curr_rec]; /* * Reset the count of consecutive warning alerts if we've got a non-empty * record that isn't an alert. */ if (SSL3_RECORD_get_type(rr) != SSL3_RT_ALERT && SSL3_RECORD_get_length(rr) != 0) s->rlayer.alert_count = 0; /* we now have a packet which can be read and processed */ if (s->s3->change_cipher_spec /* set when we receive ChangeCipherSpec, * reset by ssl3_get_finished */ && (SSL3_RECORD_get_type(rr) != SSL3_RT_HANDSHAKE)) { al = SSL_AD_UNEXPECTED_MESSAGE; SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_DATA_BETWEEN_CCS_AND_FINISHED); goto f_err; } /* * If the other end has shut down, throw anything we read away (even in * 'peek' mode) */ if (s->shutdown & SSL_RECEIVED_SHUTDOWN) { SSL3_RECORD_set_length(rr, 0); s->rwstate = SSL_NOTHING; return (0); } if (type == SSL3_RECORD_get_type(rr) || (SSL3_RECORD_get_type(rr) == SSL3_RT_CHANGE_CIPHER_SPEC && type == SSL3_RT_HANDSHAKE && recvd_type != NULL)) { /* * SSL3_RT_APPLICATION_DATA or * SSL3_RT_HANDSHAKE or * SSL3_RT_CHANGE_CIPHER_SPEC */ /* * make sure that we are not getting application data when we are * doing a handshake for the first time */ if (SSL_in_init(s) && (type == SSL3_RT_APPLICATION_DATA) && (s->enc_read_ctx == NULL)) { al = SSL_AD_UNEXPECTED_MESSAGE; SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_APP_DATA_IN_HANDSHAKE); goto f_err; } if (type == SSL3_RT_HANDSHAKE && SSL3_RECORD_get_type(rr) == SSL3_RT_CHANGE_CIPHER_SPEC && s->rlayer.handshake_fragment_len > 0) { al = SSL_AD_UNEXPECTED_MESSAGE; SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_CCS_RECEIVED_EARLY); goto f_err; } if (recvd_type != NULL) *recvd_type = SSL3_RECORD_get_type(rr); if (len <= 0) return (len); read_bytes = 0; do { if ((unsigned int)len - read_bytes > SSL3_RECORD_get_length(rr)) n = SSL3_RECORD_get_length(rr); else n = (unsigned int)len - read_bytes; memcpy(buf, &(rr->data[rr->off]), n); buf += n; if (peek) { /* Mark any zero length record as consumed CVE-2016-6305 */ if (SSL3_RECORD_get_length(rr) == 0) SSL3_RECORD_set_read(rr); } else { SSL3_RECORD_sub_length(rr, n); SSL3_RECORD_add_off(rr, n); if (SSL3_RECORD_get_length(rr) == 0) { s->rlayer.rstate = SSL_ST_READ_HEADER; SSL3_RECORD_set_off(rr, 0); SSL3_RECORD_set_read(rr); } } if (SSL3_RECORD_get_length(rr) == 0 || (peek && n == SSL3_RECORD_get_length(rr))) { curr_rec++; rr++; } read_bytes += n; } while (type == SSL3_RT_APPLICATION_DATA && curr_rec < num_recs && read_bytes < (unsigned int)len); if (read_bytes == 0) { /* We must have read empty records. Get more data */ goto start; } if (!peek && curr_rec == num_recs && (s->mode & SSL_MODE_RELEASE_BUFFERS) && SSL3_BUFFER_get_left(rbuf) == 0) ssl3_release_read_buffer(s); return read_bytes; } /* * If we get here, then type != rr->type; if we have a handshake message, * then it was unexpected (Hello Request or Client Hello) or invalid (we * were actually expecting a CCS). */ /* * Lets just double check that we've not got an SSLv2 record */ if (rr->rec_version == SSL2_VERSION) { /* * Should never happen. ssl3_get_record() should only give us an SSLv2 * record back if this is the first packet and we are looking for an * initial ClientHello. Therefore |type| should always be equal to * |rr->type|. If not then something has gone horribly wrong */ al = SSL_AD_INTERNAL_ERROR; SSLerr(SSL_F_SSL3_READ_BYTES, ERR_R_INTERNAL_ERROR); goto f_err; } if (s->method->version == TLS_ANY_VERSION && (s->server || rr->type != SSL3_RT_ALERT)) { /* * If we've got this far and still haven't decided on what version * we're using then this must be a client side alert we're dealing with * (we don't allow heartbeats yet). We shouldn't be receiving anything * other than a ClientHello if we are a server. */ s->version = rr->rec_version; al = SSL_AD_UNEXPECTED_MESSAGE; SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_UNEXPECTED_MESSAGE); goto f_err; } /* * In case of record types for which we have 'fragment' storage, fill * that so that we can process the data at a fixed place. */ { unsigned int dest_maxlen = 0; unsigned char *dest = NULL; unsigned int *dest_len = NULL; if (SSL3_RECORD_get_type(rr) == SSL3_RT_HANDSHAKE) { dest_maxlen = sizeof s->rlayer.handshake_fragment; dest = s->rlayer.handshake_fragment; dest_len = &s->rlayer.handshake_fragment_len; } else if (SSL3_RECORD_get_type(rr) == SSL3_RT_ALERT) { dest_maxlen = sizeof s->rlayer.alert_fragment; dest = s->rlayer.alert_fragment; dest_len = &s->rlayer.alert_fragment_len; } if (dest_maxlen > 0) { n = dest_maxlen - *dest_len; /* available space in 'dest' */ if (SSL3_RECORD_get_length(rr) < n) n = SSL3_RECORD_get_length(rr); /* available bytes */ /* now move 'n' bytes: */ while (n-- > 0) { dest[(*dest_len)++] = SSL3_RECORD_get_data(rr)[SSL3_RECORD_get_off(rr)]; SSL3_RECORD_add_off(rr, 1); SSL3_RECORD_add_length(rr, -1); } if (*dest_len < dest_maxlen) { SSL3_RECORD_set_read(rr); goto start; /* fragment was too small */ } } } /*- * s->rlayer.handshake_fragment_len == 4 iff rr->type == SSL3_RT_HANDSHAKE; * s->rlayer.alert_fragment_len == 2 iff rr->type == SSL3_RT_ALERT. * (Possibly rr is 'empty' now, i.e. rr->length may be 0.) */ /* If we are a client, check for an incoming 'Hello Request': */ if ((!s->server) && (s->rlayer.handshake_fragment_len >= 4) && (s->rlayer.handshake_fragment[0] == SSL3_MT_HELLO_REQUEST) && (s->session != NULL) && (s->session->cipher != NULL)) { s->rlayer.handshake_fragment_len = 0; if ((s->rlayer.handshake_fragment[1] != 0) || (s->rlayer.handshake_fragment[2] != 0) || (s->rlayer.handshake_fragment[3] != 0)) { al = SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_BAD_HELLO_REQUEST); goto f_err; } if (s->msg_callback) s->msg_callback(0, s->version, SSL3_RT_HANDSHAKE, s->rlayer.handshake_fragment, 4, s, s->msg_callback_arg); if (SSL_is_init_finished(s) && !(s->s3->flags & SSL3_FLAGS_NO_RENEGOTIATE_CIPHERS) && !s->s3->renegotiate) { ssl3_renegotiate(s); if (ssl3_renegotiate_check(s)) { i = s->handshake_func(s); if (i < 0) return (i); if (i == 0) { SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_SSL_HANDSHAKE_FAILURE); return (-1); } if (!(s->mode & SSL_MODE_AUTO_RETRY)) { if (SSL3_BUFFER_get_left(rbuf) == 0) { /* no read-ahead left? */ BIO *bio; /* * In the case where we try to read application data, * but we trigger an SSL handshake, we return -1 with * the retry option set. Otherwise renegotiation may * cause nasty problems in the blocking world */ s->rwstate = SSL_READING; bio = SSL_get_rbio(s); BIO_clear_retry_flags(bio); BIO_set_retry_read(bio); return (-1); } } } else { SSL3_RECORD_set_read(rr); } } else { /* Does this ever happen? */ SSL3_RECORD_set_read(rr); } /* * we either finished a handshake or ignored the request, now try * again to obtain the (application) data we were asked for */ goto start; } /* * If we are a server and get a client hello when renegotiation isn't * allowed send back a no renegotiation alert and carry on. WARNING: * experimental code, needs reviewing (steve) */ if (s->server && SSL_is_init_finished(s) && !s->s3->send_connection_binding && (s->version > SSL3_VERSION) && (s->rlayer.handshake_fragment_len >= 4) && (s->rlayer.handshake_fragment[0] == SSL3_MT_CLIENT_HELLO) && (s->session != NULL) && (s->session->cipher != NULL) && !(s->ctx->options & SSL_OP_ALLOW_UNSAFE_LEGACY_RENEGOTIATION)) { SSL3_RECORD_set_length(rr, 0); SSL3_RECORD_set_read(rr); ssl3_send_alert(s, SSL3_AL_WARNING, SSL_AD_NO_RENEGOTIATION); goto start; } if (s->rlayer.alert_fragment_len >= 2) { int alert_level = s->rlayer.alert_fragment[0]; int alert_descr = s->rlayer.alert_fragment[1]; s->rlayer.alert_fragment_len = 0; if (s->msg_callback) s->msg_callback(0, s->version, SSL3_RT_ALERT, s->rlayer.alert_fragment, 2, s, s->msg_callback_arg); if (s->info_callback != NULL) cb = s->info_callback; else if (s->ctx->info_callback != NULL) cb = s->ctx->info_callback; if (cb != NULL) { j = (alert_level << 8) | alert_descr; cb(s, SSL_CB_READ_ALERT, j); } if (alert_level == SSL3_AL_WARNING) { s->s3->warn_alert = alert_descr; SSL3_RECORD_set_read(rr); s->rlayer.alert_count++; if (s->rlayer.alert_count == MAX_WARN_ALERT_COUNT) { al = SSL_AD_UNEXPECTED_MESSAGE; SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_TOO_MANY_WARN_ALERTS); goto f_err; } if (alert_descr == SSL_AD_CLOSE_NOTIFY) { s->shutdown |= SSL_RECEIVED_SHUTDOWN; return (0); } /* * This is a warning but we receive it if we requested * renegotiation and the peer denied it. Terminate with a fatal * alert because if application tried to renegotiate it * presumably had a good reason and expects it to succeed. In * future we might have a renegotiation where we don't care if * the peer refused it where we carry on. */ else if (alert_descr == SSL_AD_NO_RENEGOTIATION) { al = SSL_AD_HANDSHAKE_FAILURE; SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_NO_RENEGOTIATION); goto f_err; } #ifdef SSL_AD_MISSING_SRP_USERNAME else if (alert_descr == SSL_AD_MISSING_SRP_USERNAME) return (0); #endif } else if (alert_level == SSL3_AL_FATAL) { char tmp[16]; s->rwstate = SSL_NOTHING; s->s3->fatal_alert = alert_descr; SSLerr(SSL_F_SSL3_READ_BYTES, SSL_AD_REASON_OFFSET + alert_descr); BIO_snprintf(tmp, sizeof tmp, "%d", alert_descr); ERR_add_error_data(2, "SSL alert number ", tmp); s->shutdown |= SSL_RECEIVED_SHUTDOWN; SSL3_RECORD_set_read(rr); SSL_CTX_remove_session(s->session_ctx, s->session); return (0); } else { al = SSL_AD_ILLEGAL_PARAMETER; SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_UNKNOWN_ALERT_TYPE); goto f_err; } goto start; } if (s->shutdown & SSL_SENT_SHUTDOWN) { /* but we have not received a * shutdown */ s->rwstate = SSL_NOTHING; SSL3_RECORD_set_length(rr, 0); SSL3_RECORD_set_read(rr); return (0); } if (SSL3_RECORD_get_type(rr) == SSL3_RT_CHANGE_CIPHER_SPEC) { al = SSL_AD_UNEXPECTED_MESSAGE; SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_CCS_RECEIVED_EARLY); goto f_err; } /* * Unexpected handshake message (Client Hello, or protocol violation) */ if ((s->rlayer.handshake_fragment_len >= 4) && !ossl_statem_get_in_handshake(s)) { if (SSL_is_init_finished(s) && !(s->s3->flags & SSL3_FLAGS_NO_RENEGOTIATE_CIPHERS)) { ossl_statem_set_in_init(s, 1); s->renegotiate = 1; s->new_session = 1; } i = s->handshake_func(s); if (i < 0) return (i); if (i == 0) { SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_SSL_HANDSHAKE_FAILURE); return (-1); } if (!(s->mode & SSL_MODE_AUTO_RETRY)) { if (SSL3_BUFFER_get_left(rbuf) == 0) { /* no read-ahead left? */ BIO *bio; /* * In the case where we try to read application data, but we * trigger an SSL handshake, we return -1 with the retry * option set. Otherwise renegotiation may cause nasty * problems in the blocking world */ s->rwstate = SSL_READING; bio = SSL_get_rbio(s); BIO_clear_retry_flags(bio); BIO_set_retry_read(bio); return (-1); } } goto start; } switch (SSL3_RECORD_get_type(rr)) { default: /* * TLS 1.0 and 1.1 say you SHOULD ignore unrecognised record types, but * TLS 1.2 says you MUST send an unexpected message alert. We use the * TLS 1.2 behaviour for all protocol versions to prevent issues where * no progress is being made and the peer continually sends unrecognised * record types, using up resources processing them. */ al = SSL_AD_UNEXPECTED_MESSAGE; SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_UNEXPECTED_RECORD); goto f_err; case SSL3_RT_CHANGE_CIPHER_SPEC: case SSL3_RT_ALERT: case SSL3_RT_HANDSHAKE: /* * we already handled all of these, with the possible exception of * SSL3_RT_HANDSHAKE when ossl_statem_get_in_handshake(s) is true, but * that should not happen when type != rr->type */ al = SSL_AD_UNEXPECTED_MESSAGE; SSLerr(SSL_F_SSL3_READ_BYTES, ERR_R_INTERNAL_ERROR); goto f_err; case SSL3_RT_APPLICATION_DATA: /* * At this point, we were expecting handshake data, but have * application data. If the library was running inside ssl3_read() * (i.e. in_read_app_data is set) and it makes sense to read * application data at this point (session renegotiation not yet * started), we will indulge it. */ if (ossl_statem_app_data_allowed(s)) { s->s3->in_read_app_data = 2; return (-1); } else { al = SSL_AD_UNEXPECTED_MESSAGE; SSLerr(SSL_F_SSL3_READ_BYTES, SSL_R_UNEXPECTED_RECORD); goto f_err; } } /* not reached */ f_err: ssl3_send_alert(s, SSL3_AL_FATAL, al); return (-1); } void ssl3_record_sequence_update(unsigned char *seq) { int i; for (i = 7; i >= 0; i--) { ++seq[i]; if (seq[i] != 0) break; } } /* * Returns true if the current rrec was sent in SSLv2 backwards compatible * format and false otherwise. */ int RECORD_LAYER_is_sslv2_record(RECORD_LAYER *rl) { return SSL3_RECORD_is_sslv2_record(&rl->rrec[0]); } /* * Returns the length in bytes of the current rrec */ unsigned int RECORD_LAYER_get_rrec_length(RECORD_LAYER *rl) { return SSL3_RECORD_get_length(&rl->rrec[0]); }
./CrossVul/dataset_final_sorted/CWE-20/c/bad_3070_1
crossvul-cpp_data_good_2035_1
/* * linux/fs/proc/base.c * * Copyright (C) 1991, 1992 Linus Torvalds * * proc base directory handling functions * * 1999, Al Viro. Rewritten. Now it covers the whole per-process part. * Instead of using magical inumbers to determine the kind of object * we allocate and fill in-core inodes upon lookup. They don't even * go into icache. We cache the reference to task_struct upon lookup too. * Eventually it should become a filesystem in its own. We don't use the * rest of procfs anymore. * * * Changelog: * 17-Jan-2005 * Allan Bezerra * Bruna Moreira <bruna.moreira@indt.org.br> * Edjard Mota <edjard.mota@indt.org.br> * Ilias Biris <ilias.biris@indt.org.br> * Mauricio Lin <mauricio.lin@indt.org.br> * * Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT * * A new process specific entry (smaps) included in /proc. It shows the * size of rss for each memory area. The maps entry lacks information * about physical memory size (rss) for each mapped file, i.e., * rss information for executables and library files. * This additional information is useful for any tools that need to know * about physical memory consumption for a process specific library. * * Changelog: * 21-Feb-2005 * Embedded Linux Lab - 10LE Instituto Nokia de Tecnologia - INdT * Pud inclusion in the page table walking. * * ChangeLog: * 10-Mar-2005 * 10LE Instituto Nokia de Tecnologia - INdT: * A better way to walks through the page table as suggested by Hugh Dickins. * * Simo Piiroinen <simo.piiroinen@nokia.com>: * Smaps information related to shared, private, clean and dirty pages. * * Paul Mundt <paul.mundt@nokia.com>: * Overall revision about smaps. */ #include <asm/uaccess.h> #include <linux/errno.h> #include <linux/time.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/task_io_accounting_ops.h> #include <linux/init.h> #include <linux/capability.h> #include <linux/file.h> #include <linux/fdtable.h> #include <linux/string.h> #include <linux/seq_file.h> #include <linux/namei.h> #include <linux/mnt_namespace.h> #include <linux/mm.h> #include <linux/rcupdate.h> #include <linux/kallsyms.h> #include <linux/stacktrace.h> #include <linux/resource.h> #include <linux/module.h> #include <linux/mount.h> #include <linux/security.h> #include <linux/ptrace.h> #include <linux/tracehook.h> #include <linux/cgroup.h> #include <linux/cpuset.h> #include <linux/audit.h> #include <linux/poll.h> #include <linux/nsproxy.h> #include <linux/oom.h> #include <linux/elf.h> #include <linux/pid_namespace.h> #include <linux/fs_struct.h> #include "internal.h" /* NOTE: * Implementing inode permission operations in /proc is almost * certainly an error. Permission checks need to happen during * each system call not at open time. The reason is that most of * what we wish to check for permissions in /proc varies at runtime. * * The classic example of a problem is opening file descriptors * in /proc for a task before it execs a suid executable. */ struct pid_entry { char *name; int len; mode_t mode; const struct inode_operations *iop; const struct file_operations *fop; union proc_op op; }; #define NOD(NAME, MODE, IOP, FOP, OP) { \ .name = (NAME), \ .len = sizeof(NAME) - 1, \ .mode = MODE, \ .iop = IOP, \ .fop = FOP, \ .op = OP, \ } #define DIR(NAME, MODE, iops, fops) \ NOD(NAME, (S_IFDIR|(MODE)), &iops, &fops, {} ) #define LNK(NAME, get_link) \ NOD(NAME, (S_IFLNK|S_IRWXUGO), \ &proc_pid_link_inode_operations, NULL, \ { .proc_get_link = get_link } ) #define REG(NAME, MODE, fops) \ NOD(NAME, (S_IFREG|(MODE)), NULL, &fops, {}) #define INF(NAME, MODE, read) \ NOD(NAME, (S_IFREG|(MODE)), \ NULL, &proc_info_file_operations, \ { .proc_read = read } ) #define ONE(NAME, MODE, show) \ NOD(NAME, (S_IFREG|(MODE)), \ NULL, &proc_single_file_operations, \ { .proc_show = show } ) /* * Count the number of hardlinks for the pid_entry table, excluding the . * and .. links. */ static unsigned int pid_entry_count_dirs(const struct pid_entry *entries, unsigned int n) { unsigned int i; unsigned int count; count = 0; for (i = 0; i < n; ++i) { if (S_ISDIR(entries[i].mode)) ++count; } return count; } static int get_fs_path(struct task_struct *task, struct path *path, bool root) { struct fs_struct *fs; int result = -ENOENT; task_lock(task); fs = task->fs; if (fs) { read_lock(&fs->lock); *path = root ? fs->root : fs->pwd; path_get(path); read_unlock(&fs->lock); result = 0; } task_unlock(task); return result; } static int get_nr_threads(struct task_struct *tsk) { unsigned long flags; int count = 0; if (lock_task_sighand(tsk, &flags)) { count = atomic_read(&tsk->signal->count); unlock_task_sighand(tsk, &flags); } return count; } static int proc_cwd_link(struct inode *inode, struct path *path) { struct task_struct *task = get_proc_task(inode); int result = -ENOENT; if (task) { result = get_fs_path(task, path, 0); put_task_struct(task); } return result; } static int proc_root_link(struct inode *inode, struct path *path) { struct task_struct *task = get_proc_task(inode); int result = -ENOENT; if (task) { result = get_fs_path(task, path, 1); put_task_struct(task); } return result; } /* * Return zero if current may access user memory in @task, -error if not. */ static int check_mem_permission(struct task_struct *task) { /* * A task can always look at itself, in case it chooses * to use system calls instead of load instructions. */ if (task == current) return 0; /* * If current is actively ptrace'ing, and would also be * permitted to freshly attach with ptrace now, permit it. */ if (task_is_stopped_or_traced(task)) { int match; rcu_read_lock(); match = (tracehook_tracer_task(task) == current); rcu_read_unlock(); if (match && ptrace_may_access(task, PTRACE_MODE_ATTACH)) return 0; } /* * Noone else is allowed. */ return -EPERM; } struct mm_struct *mm_for_maps(struct task_struct *task) { struct mm_struct *mm; if (mutex_lock_killable(&task->cred_guard_mutex)) return NULL; mm = get_task_mm(task); if (mm && mm != current->mm && !ptrace_may_access(task, PTRACE_MODE_READ)) { mmput(mm); mm = NULL; } mutex_unlock(&task->cred_guard_mutex); return mm; } static int proc_pid_cmdline(struct task_struct *task, char * buffer) { int res = 0; unsigned int len; struct mm_struct *mm = get_task_mm(task); if (!mm) goto out; if (!mm->arg_end) goto out_mm; /* Shh! No looking before we're done */ len = mm->arg_end - mm->arg_start; if (len > PAGE_SIZE) len = PAGE_SIZE; res = access_process_vm(task, mm->arg_start, buffer, len, 0); // If the nul at the end of args has been overwritten, then // assume application is using setproctitle(3). if (res > 0 && buffer[res-1] != '\0' && len < PAGE_SIZE) { len = strnlen(buffer, res); if (len < res) { res = len; } else { len = mm->env_end - mm->env_start; if (len > PAGE_SIZE - res) len = PAGE_SIZE - res; res += access_process_vm(task, mm->env_start, buffer+res, len, 0); res = strnlen(buffer, res); } } out_mm: mmput(mm); out: return res; } static int proc_pid_auxv(struct task_struct *task, char *buffer) { int res = 0; struct mm_struct *mm = get_task_mm(task); if (mm) { unsigned int nwords = 0; do { nwords += 2; } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */ res = nwords * sizeof(mm->saved_auxv[0]); if (res > PAGE_SIZE) res = PAGE_SIZE; memcpy(buffer, mm->saved_auxv, res); mmput(mm); } return res; } #ifdef CONFIG_KALLSYMS /* * Provides a wchan file via kallsyms in a proper one-value-per-file format. * Returns the resolved symbol. If that fails, simply return the address. */ static int proc_pid_wchan(struct task_struct *task, char *buffer) { unsigned long wchan; char symname[KSYM_NAME_LEN]; wchan = get_wchan(task); if (lookup_symbol_name(wchan, symname) < 0) if (!ptrace_may_access(task, PTRACE_MODE_READ)) return 0; else return sprintf(buffer, "%lu", wchan); else return sprintf(buffer, "%s", symname); } #endif /* CONFIG_KALLSYMS */ #ifdef CONFIG_STACKTRACE #define MAX_STACK_TRACE_DEPTH 64 static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { struct stack_trace trace; unsigned long *entries; int i; entries = kmalloc(MAX_STACK_TRACE_DEPTH * sizeof(*entries), GFP_KERNEL); if (!entries) return -ENOMEM; trace.nr_entries = 0; trace.max_entries = MAX_STACK_TRACE_DEPTH; trace.entries = entries; trace.skip = 0; save_stack_trace_tsk(task, &trace); for (i = 0; i < trace.nr_entries; i++) { seq_printf(m, "[<%p>] %pS\n", (void *)entries[i], (void *)entries[i]); } kfree(entries); return 0; } #endif #ifdef CONFIG_SCHEDSTATS /* * Provides /proc/PID/schedstat */ static int proc_pid_schedstat(struct task_struct *task, char *buffer) { return sprintf(buffer, "%llu %llu %lu\n", (unsigned long long)task->se.sum_exec_runtime, (unsigned long long)task->sched_info.run_delay, task->sched_info.pcount); } #endif #ifdef CONFIG_LATENCYTOP static int lstats_show_proc(struct seq_file *m, void *v) { int i; struct inode *inode = m->private; struct task_struct *task = get_proc_task(inode); if (!task) return -ESRCH; seq_puts(m, "Latency Top version : v0.1\n"); for (i = 0; i < 32; i++) { if (task->latency_record[i].backtrace[0]) { int q; seq_printf(m, "%i %li %li ", task->latency_record[i].count, task->latency_record[i].time, task->latency_record[i].max); for (q = 0; q < LT_BACKTRACEDEPTH; q++) { char sym[KSYM_SYMBOL_LEN]; char *c; if (!task->latency_record[i].backtrace[q]) break; if (task->latency_record[i].backtrace[q] == ULONG_MAX) break; sprint_symbol(sym, task->latency_record[i].backtrace[q]); c = strchr(sym, '+'); if (c) *c = 0; seq_printf(m, "%s ", sym); } seq_printf(m, "\n"); } } put_task_struct(task); return 0; } static int lstats_open(struct inode *inode, struct file *file) { return single_open(file, lstats_show_proc, inode); } static ssize_t lstats_write(struct file *file, const char __user *buf, size_t count, loff_t *offs) { struct task_struct *task = get_proc_task(file->f_dentry->d_inode); if (!task) return -ESRCH; clear_all_latency_tracing(task); put_task_struct(task); return count; } static const struct file_operations proc_lstats_operations = { .open = lstats_open, .read = seq_read, .write = lstats_write, .llseek = seq_lseek, .release = single_release, }; #endif /* The badness from the OOM killer */ unsigned long badness(struct task_struct *p, unsigned long uptime); static int proc_oom_score(struct task_struct *task, char *buffer) { unsigned long points; struct timespec uptime; do_posix_clock_monotonic_gettime(&uptime); read_lock(&tasklist_lock); points = badness(task->group_leader, uptime.tv_sec); read_unlock(&tasklist_lock); return sprintf(buffer, "%lu\n", points); } struct limit_names { char *name; char *unit; }; static const struct limit_names lnames[RLIM_NLIMITS] = { [RLIMIT_CPU] = {"Max cpu time", "seconds"}, [RLIMIT_FSIZE] = {"Max file size", "bytes"}, [RLIMIT_DATA] = {"Max data size", "bytes"}, [RLIMIT_STACK] = {"Max stack size", "bytes"}, [RLIMIT_CORE] = {"Max core file size", "bytes"}, [RLIMIT_RSS] = {"Max resident set", "bytes"}, [RLIMIT_NPROC] = {"Max processes", "processes"}, [RLIMIT_NOFILE] = {"Max open files", "files"}, [RLIMIT_MEMLOCK] = {"Max locked memory", "bytes"}, [RLIMIT_AS] = {"Max address space", "bytes"}, [RLIMIT_LOCKS] = {"Max file locks", "locks"}, [RLIMIT_SIGPENDING] = {"Max pending signals", "signals"}, [RLIMIT_MSGQUEUE] = {"Max msgqueue size", "bytes"}, [RLIMIT_NICE] = {"Max nice priority", NULL}, [RLIMIT_RTPRIO] = {"Max realtime priority", NULL}, [RLIMIT_RTTIME] = {"Max realtime timeout", "us"}, }; /* Display limits for a process */ static int proc_pid_limits(struct task_struct *task, char *buffer) { unsigned int i; int count = 0; unsigned long flags; char *bufptr = buffer; struct rlimit rlim[RLIM_NLIMITS]; if (!lock_task_sighand(task, &flags)) return 0; memcpy(rlim, task->signal->rlim, sizeof(struct rlimit) * RLIM_NLIMITS); unlock_task_sighand(task, &flags); /* * print the file header */ count += sprintf(&bufptr[count], "%-25s %-20s %-20s %-10s\n", "Limit", "Soft Limit", "Hard Limit", "Units"); for (i = 0; i < RLIM_NLIMITS; i++) { if (rlim[i].rlim_cur == RLIM_INFINITY) count += sprintf(&bufptr[count], "%-25s %-20s ", lnames[i].name, "unlimited"); else count += sprintf(&bufptr[count], "%-25s %-20lu ", lnames[i].name, rlim[i].rlim_cur); if (rlim[i].rlim_max == RLIM_INFINITY) count += sprintf(&bufptr[count], "%-20s ", "unlimited"); else count += sprintf(&bufptr[count], "%-20lu ", rlim[i].rlim_max); if (lnames[i].unit) count += sprintf(&bufptr[count], "%-10s\n", lnames[i].unit); else count += sprintf(&bufptr[count], "\n"); } return count; } #ifdef CONFIG_HAVE_ARCH_TRACEHOOK static int proc_pid_syscall(struct task_struct *task, char *buffer) { long nr; unsigned long args[6], sp, pc; if (task_current_syscall(task, &nr, args, 6, &sp, &pc)) return sprintf(buffer, "running\n"); if (nr < 0) return sprintf(buffer, "%ld 0x%lx 0x%lx\n", nr, sp, pc); return sprintf(buffer, "%ld 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n", nr, args[0], args[1], args[2], args[3], args[4], args[5], sp, pc); } #endif /* CONFIG_HAVE_ARCH_TRACEHOOK */ /************************************************************************/ /* Here the fs part begins */ /************************************************************************/ /* permission checks */ static int proc_fd_access_allowed(struct inode *inode) { struct task_struct *task; int allowed = 0; /* Allow access to a task's file descriptors if it is us or we * may use ptrace attach to the process and find out that * information. */ task = get_proc_task(inode); if (task) { allowed = ptrace_may_access(task, PTRACE_MODE_READ); put_task_struct(task); } return allowed; } static int proc_setattr(struct dentry *dentry, struct iattr *attr) { int error; struct inode *inode = dentry->d_inode; if (attr->ia_valid & ATTR_MODE) return -EPERM; error = inode_change_ok(inode, attr); if (!error) error = inode_setattr(inode, attr); return error; } static const struct inode_operations proc_def_inode_operations = { .setattr = proc_setattr, }; static int mounts_open_common(struct inode *inode, struct file *file, const struct seq_operations *op) { struct task_struct *task = get_proc_task(inode); struct nsproxy *nsp; struct mnt_namespace *ns = NULL; struct path root; struct proc_mounts *p; int ret = -EINVAL; if (task) { rcu_read_lock(); nsp = task_nsproxy(task); if (nsp) { ns = nsp->mnt_ns; if (ns) get_mnt_ns(ns); } rcu_read_unlock(); if (ns && get_fs_path(task, &root, 1) == 0) ret = 0; put_task_struct(task); } if (!ns) goto err; if (ret) goto err_put_ns; ret = -ENOMEM; p = kmalloc(sizeof(struct proc_mounts), GFP_KERNEL); if (!p) goto err_put_path; file->private_data = &p->m; ret = seq_open(file, op); if (ret) goto err_free; p->m.private = p; p->ns = ns; p->root = root; p->event = ns->event; return 0; err_free: kfree(p); err_put_path: path_put(&root); err_put_ns: put_mnt_ns(ns); err: return ret; } static int mounts_release(struct inode *inode, struct file *file) { struct proc_mounts *p = file->private_data; path_put(&p->root); put_mnt_ns(p->ns); return seq_release(inode, file); } static unsigned mounts_poll(struct file *file, poll_table *wait) { struct proc_mounts *p = file->private_data; struct mnt_namespace *ns = p->ns; unsigned res = POLLIN | POLLRDNORM; poll_wait(file, &ns->poll, wait); spin_lock(&vfsmount_lock); if (p->event != ns->event) { p->event = ns->event; res |= POLLERR | POLLPRI; } spin_unlock(&vfsmount_lock); return res; } static int mounts_open(struct inode *inode, struct file *file) { return mounts_open_common(inode, file, &mounts_op); } static const struct file_operations proc_mounts_operations = { .open = mounts_open, .read = seq_read, .llseek = seq_lseek, .release = mounts_release, .poll = mounts_poll, }; static int mountinfo_open(struct inode *inode, struct file *file) { return mounts_open_common(inode, file, &mountinfo_op); } static const struct file_operations proc_mountinfo_operations = { .open = mountinfo_open, .read = seq_read, .llseek = seq_lseek, .release = mounts_release, .poll = mounts_poll, }; static int mountstats_open(struct inode *inode, struct file *file) { return mounts_open_common(inode, file, &mountstats_op); } static const struct file_operations proc_mountstats_operations = { .open = mountstats_open, .read = seq_read, .llseek = seq_lseek, .release = mounts_release, }; #define PROC_BLOCK_SIZE (3*1024) /* 4K page size but our output routines use some slack for overruns */ static ssize_t proc_info_read(struct file * file, char __user * buf, size_t count, loff_t *ppos) { struct inode * inode = file->f_path.dentry->d_inode; unsigned long page; ssize_t length; struct task_struct *task = get_proc_task(inode); length = -ESRCH; if (!task) goto out_no_task; if (count > PROC_BLOCK_SIZE) count = PROC_BLOCK_SIZE; length = -ENOMEM; if (!(page = __get_free_page(GFP_TEMPORARY))) goto out; length = PROC_I(inode)->op.proc_read(task, (char*)page); if (length >= 0) length = simple_read_from_buffer(buf, count, ppos, (char *)page, length); free_page(page); out: put_task_struct(task); out_no_task: return length; } static const struct file_operations proc_info_file_operations = { .read = proc_info_read, }; static int proc_single_show(struct seq_file *m, void *v) { struct inode *inode = m->private; struct pid_namespace *ns; struct pid *pid; struct task_struct *task; int ret; ns = inode->i_sb->s_fs_info; pid = proc_pid(inode); task = get_pid_task(pid, PIDTYPE_PID); if (!task) return -ESRCH; ret = PROC_I(inode)->op.proc_show(m, ns, pid, task); put_task_struct(task); return ret; } static int proc_single_open(struct inode *inode, struct file *filp) { int ret; ret = single_open(filp, proc_single_show, NULL); if (!ret) { struct seq_file *m = filp->private_data; m->private = inode; } return ret; } static const struct file_operations proc_single_file_operations = { .open = proc_single_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int mem_open(struct inode* inode, struct file* file) { file->private_data = (void*)((long)current->self_exec_id); return 0; } static ssize_t mem_read(struct file * file, char __user * buf, size_t count, loff_t *ppos) { struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode); char *page; unsigned long src = *ppos; int ret = -ESRCH; struct mm_struct *mm; if (!task) goto out_no_task; if (check_mem_permission(task)) goto out; ret = -ENOMEM; page = (char *)__get_free_page(GFP_TEMPORARY); if (!page) goto out; ret = 0; mm = get_task_mm(task); if (!mm) goto out_free; ret = -EIO; if (file->private_data != (void*)((long)current->self_exec_id)) goto out_put; ret = 0; while (count > 0) { int this_len, retval; this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count; retval = access_process_vm(task, src, page, this_len, 0); if (!retval || check_mem_permission(task)) { if (!ret) ret = -EIO; break; } if (copy_to_user(buf, page, retval)) { ret = -EFAULT; break; } ret += retval; src += retval; buf += retval; count -= retval; } *ppos = src; out_put: mmput(mm); out_free: free_page((unsigned long) page); out: put_task_struct(task); out_no_task: return ret; } #define mem_write NULL #ifndef mem_write /* This is a security hazard */ static ssize_t mem_write(struct file * file, const char __user *buf, size_t count, loff_t *ppos) { int copied; char *page; struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode); unsigned long dst = *ppos; copied = -ESRCH; if (!task) goto out_no_task; if (check_mem_permission(task)) goto out; copied = -ENOMEM; page = (char *)__get_free_page(GFP_TEMPORARY); if (!page) goto out; copied = 0; while (count > 0) { int this_len, retval; this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count; if (copy_from_user(page, buf, this_len)) { copied = -EFAULT; break; } retval = access_process_vm(task, dst, page, this_len, 1); if (!retval) { if (!copied) copied = -EIO; break; } copied += retval; buf += retval; dst += retval; count -= retval; } *ppos = dst; free_page((unsigned long) page); out: put_task_struct(task); out_no_task: return copied; } #endif loff_t mem_lseek(struct file *file, loff_t offset, int orig) { switch (orig) { case 0: file->f_pos = offset; break; case 1: file->f_pos += offset; break; default: return -EINVAL; } force_successful_syscall_return(); return file->f_pos; } static const struct file_operations proc_mem_operations = { .llseek = mem_lseek, .read = mem_read, .write = mem_write, .open = mem_open, }; static ssize_t environ_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct task_struct *task = get_proc_task(file->f_dentry->d_inode); char *page; unsigned long src = *ppos; int ret = -ESRCH; struct mm_struct *mm; if (!task) goto out_no_task; if (!ptrace_may_access(task, PTRACE_MODE_READ)) goto out; ret = -ENOMEM; page = (char *)__get_free_page(GFP_TEMPORARY); if (!page) goto out; ret = 0; mm = get_task_mm(task); if (!mm) goto out_free; while (count > 0) { int this_len, retval, max_len; this_len = mm->env_end - (mm->env_start + src); if (this_len <= 0) break; max_len = (count > PAGE_SIZE) ? PAGE_SIZE : count; this_len = (this_len > max_len) ? max_len : this_len; retval = access_process_vm(task, (mm->env_start + src), page, this_len, 0); if (retval <= 0) { ret = retval; break; } if (copy_to_user(buf, page, retval)) { ret = -EFAULT; break; } ret += retval; src += retval; buf += retval; count -= retval; } *ppos = src; mmput(mm); out_free: free_page((unsigned long) page); out: put_task_struct(task); out_no_task: return ret; } static const struct file_operations proc_environ_operations = { .read = environ_read, }; static ssize_t oom_adjust_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode); char buffer[PROC_NUMBUF]; size_t len; int oom_adjust = OOM_DISABLE; unsigned long flags; if (!task) return -ESRCH; if (lock_task_sighand(task, &flags)) { oom_adjust = task->signal->oom_adj; unlock_task_sighand(task, &flags); } put_task_struct(task); len = snprintf(buffer, sizeof(buffer), "%i\n", oom_adjust); return simple_read_from_buffer(buf, count, ppos, buffer, len); } static ssize_t oom_adjust_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct task_struct *task; char buffer[PROC_NUMBUF]; long oom_adjust; unsigned long flags; int err; memset(buffer, 0, sizeof(buffer)); if (count > sizeof(buffer) - 1) count = sizeof(buffer) - 1; if (copy_from_user(buffer, buf, count)) return -EFAULT; err = strict_strtol(strstrip(buffer), 0, &oom_adjust); if (err) return -EINVAL; if ((oom_adjust < OOM_ADJUST_MIN || oom_adjust > OOM_ADJUST_MAX) && oom_adjust != OOM_DISABLE) return -EINVAL; task = get_proc_task(file->f_path.dentry->d_inode); if (!task) return -ESRCH; if (!lock_task_sighand(task, &flags)) { put_task_struct(task); return -ESRCH; } if (oom_adjust < task->signal->oom_adj && !capable(CAP_SYS_RESOURCE)) { unlock_task_sighand(task, &flags); put_task_struct(task); return -EACCES; } task->signal->oom_adj = oom_adjust; unlock_task_sighand(task, &flags); put_task_struct(task); return count; } static const struct file_operations proc_oom_adjust_operations = { .read = oom_adjust_read, .write = oom_adjust_write, }; #ifdef CONFIG_AUDITSYSCALL #define TMPBUFLEN 21 static ssize_t proc_loginuid_read(struct file * file, char __user * buf, size_t count, loff_t *ppos) { struct inode * inode = file->f_path.dentry->d_inode; struct task_struct *task = get_proc_task(inode); ssize_t length; char tmpbuf[TMPBUFLEN]; if (!task) return -ESRCH; length = scnprintf(tmpbuf, TMPBUFLEN, "%u", audit_get_loginuid(task)); put_task_struct(task); return simple_read_from_buffer(buf, count, ppos, tmpbuf, length); } static ssize_t proc_loginuid_write(struct file * file, const char __user * buf, size_t count, loff_t *ppos) { struct inode * inode = file->f_path.dentry->d_inode; char *page, *tmp; ssize_t length; uid_t loginuid; if (!capable(CAP_AUDIT_CONTROL)) return -EPERM; if (current != pid_task(proc_pid(inode), PIDTYPE_PID)) return -EPERM; if (count >= PAGE_SIZE) count = PAGE_SIZE - 1; if (*ppos != 0) { /* No partial writes. */ return -EINVAL; } page = (char*)__get_free_page(GFP_TEMPORARY); if (!page) return -ENOMEM; length = -EFAULT; if (copy_from_user(page, buf, count)) goto out_free_page; page[count] = '\0'; loginuid = simple_strtoul(page, &tmp, 10); if (tmp == page) { length = -EINVAL; goto out_free_page; } length = audit_set_loginuid(current, loginuid); if (likely(length == 0)) length = count; out_free_page: free_page((unsigned long) page); return length; } static const struct file_operations proc_loginuid_operations = { .read = proc_loginuid_read, .write = proc_loginuid_write, }; static ssize_t proc_sessionid_read(struct file * file, char __user * buf, size_t count, loff_t *ppos) { struct inode * inode = file->f_path.dentry->d_inode; struct task_struct *task = get_proc_task(inode); ssize_t length; char tmpbuf[TMPBUFLEN]; if (!task) return -ESRCH; length = scnprintf(tmpbuf, TMPBUFLEN, "%u", audit_get_sessionid(task)); put_task_struct(task); return simple_read_from_buffer(buf, count, ppos, tmpbuf, length); } static const struct file_operations proc_sessionid_operations = { .read = proc_sessionid_read, }; #endif #ifdef CONFIG_FAULT_INJECTION static ssize_t proc_fault_inject_read(struct file * file, char __user * buf, size_t count, loff_t *ppos) { struct task_struct *task = get_proc_task(file->f_dentry->d_inode); char buffer[PROC_NUMBUF]; size_t len; int make_it_fail; if (!task) return -ESRCH; make_it_fail = task->make_it_fail; put_task_struct(task); len = snprintf(buffer, sizeof(buffer), "%i\n", make_it_fail); return simple_read_from_buffer(buf, count, ppos, buffer, len); } static ssize_t proc_fault_inject_write(struct file * file, const char __user * buf, size_t count, loff_t *ppos) { struct task_struct *task; char buffer[PROC_NUMBUF], *end; int make_it_fail; if (!capable(CAP_SYS_RESOURCE)) return -EPERM; memset(buffer, 0, sizeof(buffer)); if (count > sizeof(buffer) - 1) count = sizeof(buffer) - 1; if (copy_from_user(buffer, buf, count)) return -EFAULT; make_it_fail = simple_strtol(strstrip(buffer), &end, 0); if (*end) return -EINVAL; task = get_proc_task(file->f_dentry->d_inode); if (!task) return -ESRCH; task->make_it_fail = make_it_fail; put_task_struct(task); return count; } static const struct file_operations proc_fault_inject_operations = { .read = proc_fault_inject_read, .write = proc_fault_inject_write, }; #endif #ifdef CONFIG_SCHED_DEBUG /* * Print out various scheduling related per-task fields: */ static int sched_show(struct seq_file *m, void *v) { struct inode *inode = m->private; struct task_struct *p; p = get_proc_task(inode); if (!p) return -ESRCH; proc_sched_show_task(p, m); put_task_struct(p); return 0; } static ssize_t sched_write(struct file *file, const char __user *buf, size_t count, loff_t *offset) { struct inode *inode = file->f_path.dentry->d_inode; struct task_struct *p; p = get_proc_task(inode); if (!p) return -ESRCH; proc_sched_set_task(p); put_task_struct(p); return count; } static int sched_open(struct inode *inode, struct file *filp) { int ret; ret = single_open(filp, sched_show, NULL); if (!ret) { struct seq_file *m = filp->private_data; m->private = inode; } return ret; } static const struct file_operations proc_pid_sched_operations = { .open = sched_open, .read = seq_read, .write = sched_write, .llseek = seq_lseek, .release = single_release, }; #endif static ssize_t comm_write(struct file *file, const char __user *buf, size_t count, loff_t *offset) { struct inode *inode = file->f_path.dentry->d_inode; struct task_struct *p; char buffer[TASK_COMM_LEN]; memset(buffer, 0, sizeof(buffer)); if (count > sizeof(buffer) - 1) count = sizeof(buffer) - 1; if (copy_from_user(buffer, buf, count)) return -EFAULT; p = get_proc_task(inode); if (!p) return -ESRCH; if (same_thread_group(current, p)) set_task_comm(p, buffer); else count = -EINVAL; put_task_struct(p); return count; } static int comm_show(struct seq_file *m, void *v) { struct inode *inode = m->private; struct task_struct *p; p = get_proc_task(inode); if (!p) return -ESRCH; task_lock(p); seq_printf(m, "%s\n", p->comm); task_unlock(p); put_task_struct(p); return 0; } static int comm_open(struct inode *inode, struct file *filp) { int ret; ret = single_open(filp, comm_show, NULL); if (!ret) { struct seq_file *m = filp->private_data; m->private = inode; } return ret; } static const struct file_operations proc_pid_set_comm_operations = { .open = comm_open, .read = seq_read, .write = comm_write, .llseek = seq_lseek, .release = single_release, }; /* * We added or removed a vma mapping the executable. The vmas are only mapped * during exec and are not mapped with the mmap system call. * Callers must hold down_write() on the mm's mmap_sem for these */ void added_exe_file_vma(struct mm_struct *mm) { mm->num_exe_file_vmas++; } void removed_exe_file_vma(struct mm_struct *mm) { mm->num_exe_file_vmas--; if ((mm->num_exe_file_vmas == 0) && mm->exe_file){ fput(mm->exe_file); mm->exe_file = NULL; } } void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) { if (new_exe_file) get_file(new_exe_file); if (mm->exe_file) fput(mm->exe_file); mm->exe_file = new_exe_file; mm->num_exe_file_vmas = 0; } struct file *get_mm_exe_file(struct mm_struct *mm) { struct file *exe_file; /* We need mmap_sem to protect against races with removal of * VM_EXECUTABLE vmas */ down_read(&mm->mmap_sem); exe_file = mm->exe_file; if (exe_file) get_file(exe_file); up_read(&mm->mmap_sem); return exe_file; } void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm) { /* It's safe to write the exe_file pointer without exe_file_lock because * this is called during fork when the task is not yet in /proc */ newmm->exe_file = get_mm_exe_file(oldmm); } static int proc_exe_link(struct inode *inode, struct path *exe_path) { struct task_struct *task; struct mm_struct *mm; struct file *exe_file; task = get_proc_task(inode); if (!task) return -ENOENT; mm = get_task_mm(task); put_task_struct(task); if (!mm) return -ENOENT; exe_file = get_mm_exe_file(mm); mmput(mm); if (exe_file) { *exe_path = exe_file->f_path; path_get(&exe_file->f_path); fput(exe_file); return 0; } else return -ENOENT; } static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd) { struct inode *inode = dentry->d_inode; int error = -EACCES; /* We don't need a base pointer in the /proc filesystem */ path_put(&nd->path); /* Are we allowed to snoop on the tasks file descriptors? */ if (!proc_fd_access_allowed(inode)) goto out; error = PROC_I(inode)->op.proc_get_link(inode, &nd->path); out: return ERR_PTR(error); } static int do_proc_readlink(struct path *path, char __user *buffer, int buflen) { char *tmp = (char*)__get_free_page(GFP_TEMPORARY); char *pathname; int len; if (!tmp) return -ENOMEM; pathname = d_path(path, tmp, PAGE_SIZE); len = PTR_ERR(pathname); if (IS_ERR(pathname)) goto out; len = tmp + PAGE_SIZE - 1 - pathname; if (len > buflen) len = buflen; if (copy_to_user(buffer, pathname, len)) len = -EFAULT; out: free_page((unsigned long)tmp); return len; } static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int buflen) { int error = -EACCES; struct inode *inode = dentry->d_inode; struct path path; /* Are we allowed to snoop on the tasks file descriptors? */ if (!proc_fd_access_allowed(inode)) goto out; error = PROC_I(inode)->op.proc_get_link(inode, &path); if (error) goto out; error = do_proc_readlink(&path, buffer, buflen); path_put(&path); out: return error; } static const struct inode_operations proc_pid_link_inode_operations = { .readlink = proc_pid_readlink, .follow_link = proc_pid_follow_link, .setattr = proc_setattr, }; /* building an inode */ static int task_dumpable(struct task_struct *task) { int dumpable = 0; struct mm_struct *mm; task_lock(task); mm = task->mm; if (mm) dumpable = get_dumpable(mm); task_unlock(task); if(dumpable == 1) return 1; return 0; } static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *task) { struct inode * inode; struct proc_inode *ei; const struct cred *cred; /* We need a new inode */ inode = new_inode(sb); if (!inode) goto out; /* Common stuff */ ei = PROC_I(inode); inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; inode->i_op = &proc_def_inode_operations; /* * grab the reference to task. */ ei->pid = get_task_pid(task, PIDTYPE_PID); if (!ei->pid) goto out_unlock; if (task_dumpable(task)) { rcu_read_lock(); cred = __task_cred(task); inode->i_uid = cred->euid; inode->i_gid = cred->egid; rcu_read_unlock(); } security_task_to_inode(task, inode); out: return inode; out_unlock: iput(inode); return NULL; } static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct inode *inode = dentry->d_inode; struct task_struct *task; const struct cred *cred; generic_fillattr(inode, stat); rcu_read_lock(); stat->uid = 0; stat->gid = 0; task = pid_task(proc_pid(inode), PIDTYPE_PID); if (task) { if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) || task_dumpable(task)) { cred = __task_cred(task); stat->uid = cred->euid; stat->gid = cred->egid; } } rcu_read_unlock(); return 0; } /* dentry stuff */ /* * Exceptional case: normally we are not allowed to unhash a busy * directory. In this case, however, we can do it - no aliasing problems * due to the way we treat inodes. * * Rewrite the inode's ownerships here because the owning task may have * performed a setuid(), etc. * * Before the /proc/pid/status file was created the only way to read * the effective uid of a /process was to stat /proc/pid. Reading * /proc/pid/status is slow enough that procps and other packages * kept stating /proc/pid. To keep the rules in /proc simple I have * made this apply to all per process world readable and executable * directories. */ static int pid_revalidate(struct dentry *dentry, struct nameidata *nd) { struct inode *inode = dentry->d_inode; struct task_struct *task = get_proc_task(inode); const struct cred *cred; if (task) { if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) || task_dumpable(task)) { rcu_read_lock(); cred = __task_cred(task); inode->i_uid = cred->euid; inode->i_gid = cred->egid; rcu_read_unlock(); } else { inode->i_uid = 0; inode->i_gid = 0; } inode->i_mode &= ~(S_ISUID | S_ISGID); security_task_to_inode(task, inode); put_task_struct(task); return 1; } d_drop(dentry); return 0; } static int pid_delete_dentry(struct dentry * dentry) { /* Is the task we represent dead? * If so, then don't put the dentry on the lru list, * kill it immediately. */ return !proc_pid(dentry->d_inode)->tasks[PIDTYPE_PID].first; } static const struct dentry_operations pid_dentry_operations = { .d_revalidate = pid_revalidate, .d_delete = pid_delete_dentry, }; /* Lookups */ typedef struct dentry *instantiate_t(struct inode *, struct dentry *, struct task_struct *, const void *); /* * Fill a directory entry. * * If possible create the dcache entry and derive our inode number and * file type from dcache entry. * * Since all of the proc inode numbers are dynamically generated, the inode * numbers do not exist until the inode is cache. This means creating the * the dcache entry in readdir is necessary to keep the inode numbers * reported by readdir in sync with the inode numbers reported * by stat. */ static int proc_fill_cache(struct file *filp, void *dirent, filldir_t filldir, char *name, int len, instantiate_t instantiate, struct task_struct *task, const void *ptr) { struct dentry *child, *dir = filp->f_path.dentry; struct inode *inode; struct qstr qname; ino_t ino = 0; unsigned type = DT_UNKNOWN; qname.name = name; qname.len = len; qname.hash = full_name_hash(name, len); child = d_lookup(dir, &qname); if (!child) { struct dentry *new; new = d_alloc(dir, &qname); if (new) { child = instantiate(dir->d_inode, new, task, ptr); if (child) dput(new); else child = new; } } if (!child || IS_ERR(child) || !child->d_inode) goto end_instantiate; inode = child->d_inode; if (inode) { ino = inode->i_ino; type = inode->i_mode >> 12; } dput(child); end_instantiate: if (!ino) ino = find_inode_number(dir, &qname); if (!ino) ino = 1; return filldir(dirent, name, len, filp->f_pos, ino, type); } static unsigned name_to_int(struct dentry *dentry) { const char *name = dentry->d_name.name; int len = dentry->d_name.len; unsigned n = 0; if (len > 1 && *name == '0') goto out; while (len-- > 0) { unsigned c = *name++ - '0'; if (c > 9) goto out; if (n >= (~0U-9)/10) goto out; n *= 10; n += c; } return n; out: return ~0U; } #define PROC_FDINFO_MAX 64 static int proc_fd_info(struct inode *inode, struct path *path, char *info) { struct task_struct *task = get_proc_task(inode); struct files_struct *files = NULL; struct file *file; int fd = proc_fd(inode); if (task) { files = get_files_struct(task); put_task_struct(task); } if (files) { /* * We are not taking a ref to the file structure, so we must * hold ->file_lock. */ spin_lock(&files->file_lock); file = fcheck_files(files, fd); if (file) { if (path) { *path = file->f_path; path_get(&file->f_path); } if (info) snprintf(info, PROC_FDINFO_MAX, "pos:\t%lli\n" "flags:\t0%o\n", (long long) file->f_pos, file->f_flags); spin_unlock(&files->file_lock); put_files_struct(files); return 0; } spin_unlock(&files->file_lock); put_files_struct(files); } return -ENOENT; } static int proc_fd_link(struct inode *inode, struct path *path) { return proc_fd_info(inode, path, NULL); } static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd) { struct inode *inode = dentry->d_inode; struct task_struct *task = get_proc_task(inode); int fd = proc_fd(inode); struct files_struct *files; const struct cred *cred; if (task) { files = get_files_struct(task); if (files) { rcu_read_lock(); if (fcheck_files(files, fd)) { rcu_read_unlock(); put_files_struct(files); if (task_dumpable(task)) { rcu_read_lock(); cred = __task_cred(task); inode->i_uid = cred->euid; inode->i_gid = cred->egid; rcu_read_unlock(); } else { inode->i_uid = 0; inode->i_gid = 0; } inode->i_mode &= ~(S_ISUID | S_ISGID); security_task_to_inode(task, inode); put_task_struct(task); return 1; } rcu_read_unlock(); put_files_struct(files); } put_task_struct(task); } d_drop(dentry); return 0; } static const struct dentry_operations tid_fd_dentry_operations = { .d_revalidate = tid_fd_revalidate, .d_delete = pid_delete_dentry, }; static struct dentry *proc_fd_instantiate(struct inode *dir, struct dentry *dentry, struct task_struct *task, const void *ptr) { unsigned fd = *(const unsigned *)ptr; struct file *file; struct files_struct *files; struct inode *inode; struct proc_inode *ei; struct dentry *error = ERR_PTR(-ENOENT); inode = proc_pid_make_inode(dir->i_sb, task); if (!inode) goto out; ei = PROC_I(inode); ei->fd = fd; files = get_files_struct(task); if (!files) goto out_iput; inode->i_mode = S_IFLNK; /* * We are not taking a ref to the file structure, so we must * hold ->file_lock. */ spin_lock(&files->file_lock); file = fcheck_files(files, fd); if (!file) goto out_unlock; if (file->f_mode & FMODE_READ) inode->i_mode |= S_IRUSR | S_IXUSR; if (file->f_mode & FMODE_WRITE) inode->i_mode |= S_IWUSR | S_IXUSR; spin_unlock(&files->file_lock); put_files_struct(files); inode->i_op = &proc_pid_link_inode_operations; inode->i_size = 64; ei->op.proc_get_link = proc_fd_link; dentry->d_op = &tid_fd_dentry_operations; d_add(dentry, inode); /* Close the race of the process dying before we return the dentry */ if (tid_fd_revalidate(dentry, NULL)) error = NULL; out: return error; out_unlock: spin_unlock(&files->file_lock); put_files_struct(files); out_iput: iput(inode); goto out; } static struct dentry *proc_lookupfd_common(struct inode *dir, struct dentry *dentry, instantiate_t instantiate) { struct task_struct *task = get_proc_task(dir); unsigned fd = name_to_int(dentry); struct dentry *result = ERR_PTR(-ENOENT); if (!task) goto out_no_task; if (fd == ~0U) goto out; result = instantiate(dir, dentry, task, &fd); out: put_task_struct(task); out_no_task: return result; } static int proc_readfd_common(struct file * filp, void * dirent, filldir_t filldir, instantiate_t instantiate) { struct dentry *dentry = filp->f_path.dentry; struct inode *inode = dentry->d_inode; struct task_struct *p = get_proc_task(inode); unsigned int fd, ino; int retval; struct files_struct * files; retval = -ENOENT; if (!p) goto out_no_task; retval = 0; fd = filp->f_pos; switch (fd) { case 0: if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0) goto out; filp->f_pos++; case 1: ino = parent_ino(dentry); if (filldir(dirent, "..", 2, 1, ino, DT_DIR) < 0) goto out; filp->f_pos++; default: files = get_files_struct(p); if (!files) goto out; rcu_read_lock(); for (fd = filp->f_pos-2; fd < files_fdtable(files)->max_fds; fd++, filp->f_pos++) { char name[PROC_NUMBUF]; int len; if (!fcheck_files(files, fd)) continue; rcu_read_unlock(); len = snprintf(name, sizeof(name), "%d", fd); if (proc_fill_cache(filp, dirent, filldir, name, len, instantiate, p, &fd) < 0) { rcu_read_lock(); break; } rcu_read_lock(); } rcu_read_unlock(); put_files_struct(files); } out: put_task_struct(p); out_no_task: return retval; } static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { return proc_lookupfd_common(dir, dentry, proc_fd_instantiate); } static int proc_readfd(struct file *filp, void *dirent, filldir_t filldir) { return proc_readfd_common(filp, dirent, filldir, proc_fd_instantiate); } static ssize_t proc_fdinfo_read(struct file *file, char __user *buf, size_t len, loff_t *ppos) { char tmp[PROC_FDINFO_MAX]; int err = proc_fd_info(file->f_path.dentry->d_inode, NULL, tmp); if (!err) err = simple_read_from_buffer(buf, len, ppos, tmp, strlen(tmp)); return err; } static const struct file_operations proc_fdinfo_file_operations = { .open = nonseekable_open, .read = proc_fdinfo_read, }; static const struct file_operations proc_fd_operations = { .read = generic_read_dir, .readdir = proc_readfd, }; /* * /proc/pid/fd needs a special permission handler so that a process can still * access /proc/self/fd after it has executed a setuid(). */ static int proc_fd_permission(struct inode *inode, int mask) { int rv; rv = generic_permission(inode, mask, NULL); if (rv == 0) return 0; if (task_pid(current) == proc_pid(inode)) rv = 0; return rv; } /* * proc directories can do almost nothing.. */ static const struct inode_operations proc_fd_inode_operations = { .lookup = proc_lookupfd, .permission = proc_fd_permission, .setattr = proc_setattr, }; static struct dentry *proc_fdinfo_instantiate(struct inode *dir, struct dentry *dentry, struct task_struct *task, const void *ptr) { unsigned fd = *(unsigned *)ptr; struct inode *inode; struct proc_inode *ei; struct dentry *error = ERR_PTR(-ENOENT); inode = proc_pid_make_inode(dir->i_sb, task); if (!inode) goto out; ei = PROC_I(inode); ei->fd = fd; inode->i_mode = S_IFREG | S_IRUSR; inode->i_fop = &proc_fdinfo_file_operations; dentry->d_op = &tid_fd_dentry_operations; d_add(dentry, inode); /* Close the race of the process dying before we return the dentry */ if (tid_fd_revalidate(dentry, NULL)) error = NULL; out: return error; } static struct dentry *proc_lookupfdinfo(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { return proc_lookupfd_common(dir, dentry, proc_fdinfo_instantiate); } static int proc_readfdinfo(struct file *filp, void *dirent, filldir_t filldir) { return proc_readfd_common(filp, dirent, filldir, proc_fdinfo_instantiate); } static const struct file_operations proc_fdinfo_operations = { .read = generic_read_dir, .readdir = proc_readfdinfo, }; /* * proc directories can do almost nothing.. */ static const struct inode_operations proc_fdinfo_inode_operations = { .lookup = proc_lookupfdinfo, .setattr = proc_setattr, }; static struct dentry *proc_pident_instantiate(struct inode *dir, struct dentry *dentry, struct task_struct *task, const void *ptr) { const struct pid_entry *p = ptr; struct inode *inode; struct proc_inode *ei; struct dentry *error = ERR_PTR(-ENOENT); inode = proc_pid_make_inode(dir->i_sb, task); if (!inode) goto out; ei = PROC_I(inode); inode->i_mode = p->mode; if (S_ISDIR(inode->i_mode)) inode->i_nlink = 2; /* Use getattr to fix if necessary */ if (p->iop) inode->i_op = p->iop; if (p->fop) inode->i_fop = p->fop; ei->op = p->op; dentry->d_op = &pid_dentry_operations; d_add(dentry, inode); /* Close the race of the process dying before we return the dentry */ if (pid_revalidate(dentry, NULL)) error = NULL; out: return error; } static struct dentry *proc_pident_lookup(struct inode *dir, struct dentry *dentry, const struct pid_entry *ents, unsigned int nents) { struct dentry *error; struct task_struct *task = get_proc_task(dir); const struct pid_entry *p, *last; error = ERR_PTR(-ENOENT); if (!task) goto out_no_task; /* * Yes, it does not scale. And it should not. Don't add * new entries into /proc/<tgid>/ without very good reasons. */ last = &ents[nents - 1]; for (p = ents; p <= last; p++) { if (p->len != dentry->d_name.len) continue; if (!memcmp(dentry->d_name.name, p->name, p->len)) break; } if (p > last) goto out; error = proc_pident_instantiate(dir, dentry, task, p); out: put_task_struct(task); out_no_task: return error; } static int proc_pident_fill_cache(struct file *filp, void *dirent, filldir_t filldir, struct task_struct *task, const struct pid_entry *p) { return proc_fill_cache(filp, dirent, filldir, p->name, p->len, proc_pident_instantiate, task, p); } static int proc_pident_readdir(struct file *filp, void *dirent, filldir_t filldir, const struct pid_entry *ents, unsigned int nents) { int i; struct dentry *dentry = filp->f_path.dentry; struct inode *inode = dentry->d_inode; struct task_struct *task = get_proc_task(inode); const struct pid_entry *p, *last; ino_t ino; int ret; ret = -ENOENT; if (!task) goto out_no_task; ret = 0; i = filp->f_pos; switch (i) { case 0: ino = inode->i_ino; if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0) goto out; i++; filp->f_pos++; /* fall through */ case 1: ino = parent_ino(dentry); if (filldir(dirent, "..", 2, i, ino, DT_DIR) < 0) goto out; i++; filp->f_pos++; /* fall through */ default: i -= 2; if (i >= nents) { ret = 1; goto out; } p = ents + i; last = &ents[nents - 1]; while (p <= last) { if (proc_pident_fill_cache(filp, dirent, filldir, task, p) < 0) goto out; filp->f_pos++; p++; } } ret = 1; out: put_task_struct(task); out_no_task: return ret; } #ifdef CONFIG_SECURITY static ssize_t proc_pid_attr_read(struct file * file, char __user * buf, size_t count, loff_t *ppos) { struct inode * inode = file->f_path.dentry->d_inode; char *p = NULL; ssize_t length; struct task_struct *task = get_proc_task(inode); if (!task) return -ESRCH; length = security_getprocattr(task, (char*)file->f_path.dentry->d_name.name, &p); put_task_struct(task); if (length > 0) length = simple_read_from_buffer(buf, count, ppos, p, length); kfree(p); return length; } static ssize_t proc_pid_attr_write(struct file * file, const char __user * buf, size_t count, loff_t *ppos) { struct inode * inode = file->f_path.dentry->d_inode; char *page; ssize_t length; struct task_struct *task = get_proc_task(inode); length = -ESRCH; if (!task) goto out_no_task; if (count > PAGE_SIZE) count = PAGE_SIZE; /* No partial writes. */ length = -EINVAL; if (*ppos != 0) goto out; length = -ENOMEM; page = (char*)__get_free_page(GFP_TEMPORARY); if (!page) goto out; length = -EFAULT; if (copy_from_user(page, buf, count)) goto out_free; /* Guard against adverse ptrace interaction */ length = mutex_lock_interruptible(&task->cred_guard_mutex); if (length < 0) goto out_free; length = security_setprocattr(task, (char*)file->f_path.dentry->d_name.name, (void*)page, count); mutex_unlock(&task->cred_guard_mutex); out_free: free_page((unsigned long) page); out: put_task_struct(task); out_no_task: return length; } static const struct file_operations proc_pid_attr_operations = { .read = proc_pid_attr_read, .write = proc_pid_attr_write, }; static const struct pid_entry attr_dir_stuff[] = { REG("current", S_IRUGO|S_IWUGO, proc_pid_attr_operations), REG("prev", S_IRUGO, proc_pid_attr_operations), REG("exec", S_IRUGO|S_IWUGO, proc_pid_attr_operations), REG("fscreate", S_IRUGO|S_IWUGO, proc_pid_attr_operations), REG("keycreate", S_IRUGO|S_IWUGO, proc_pid_attr_operations), REG("sockcreate", S_IRUGO|S_IWUGO, proc_pid_attr_operations), }; static int proc_attr_dir_readdir(struct file * filp, void * dirent, filldir_t filldir) { return proc_pident_readdir(filp,dirent,filldir, attr_dir_stuff,ARRAY_SIZE(attr_dir_stuff)); } static const struct file_operations proc_attr_dir_operations = { .read = generic_read_dir, .readdir = proc_attr_dir_readdir, }; static struct dentry *proc_attr_dir_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { return proc_pident_lookup(dir, dentry, attr_dir_stuff, ARRAY_SIZE(attr_dir_stuff)); } static const struct inode_operations proc_attr_dir_inode_operations = { .lookup = proc_attr_dir_lookup, .getattr = pid_getattr, .setattr = proc_setattr, }; #endif #ifdef CONFIG_ELF_CORE static ssize_t proc_coredump_filter_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct task_struct *task = get_proc_task(file->f_dentry->d_inode); struct mm_struct *mm; char buffer[PROC_NUMBUF]; size_t len; int ret; if (!task) return -ESRCH; ret = 0; mm = get_task_mm(task); if (mm) { len = snprintf(buffer, sizeof(buffer), "%08lx\n", ((mm->flags & MMF_DUMP_FILTER_MASK) >> MMF_DUMP_FILTER_SHIFT)); mmput(mm); ret = simple_read_from_buffer(buf, count, ppos, buffer, len); } put_task_struct(task); return ret; } static ssize_t proc_coredump_filter_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct task_struct *task; struct mm_struct *mm; char buffer[PROC_NUMBUF], *end; unsigned int val; int ret; int i; unsigned long mask; ret = -EFAULT; memset(buffer, 0, sizeof(buffer)); if (count > sizeof(buffer) - 1) count = sizeof(buffer) - 1; if (copy_from_user(buffer, buf, count)) goto out_no_task; ret = -EINVAL; val = (unsigned int)simple_strtoul(buffer, &end, 0); if (*end == '\n') end++; if (end - buffer == 0) goto out_no_task; ret = -ESRCH; task = get_proc_task(file->f_dentry->d_inode); if (!task) goto out_no_task; ret = end - buffer; mm = get_task_mm(task); if (!mm) goto out_no_mm; for (i = 0, mask = 1; i < MMF_DUMP_FILTER_BITS; i++, mask <<= 1) { if (val & mask) set_bit(i + MMF_DUMP_FILTER_SHIFT, &mm->flags); else clear_bit(i + MMF_DUMP_FILTER_SHIFT, &mm->flags); } mmput(mm); out_no_mm: put_task_struct(task); out_no_task: return ret; } static const struct file_operations proc_coredump_filter_operations = { .read = proc_coredump_filter_read, .write = proc_coredump_filter_write, }; #endif /* * /proc/self: */ static int proc_self_readlink(struct dentry *dentry, char __user *buffer, int buflen) { struct pid_namespace *ns = dentry->d_sb->s_fs_info; pid_t tgid = task_tgid_nr_ns(current, ns); char tmp[PROC_NUMBUF]; if (!tgid) return -ENOENT; sprintf(tmp, "%d", tgid); return vfs_readlink(dentry,buffer,buflen,tmp); } static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd) { struct pid_namespace *ns = dentry->d_sb->s_fs_info; pid_t tgid = task_tgid_nr_ns(current, ns); char tmp[PROC_NUMBUF]; if (!tgid) return ERR_PTR(-ENOENT); sprintf(tmp, "%d", task_tgid_nr_ns(current, ns)); return ERR_PTR(vfs_follow_link(nd,tmp)); } static const struct inode_operations proc_self_inode_operations = { .readlink = proc_self_readlink, .follow_link = proc_self_follow_link, }; /* * proc base * * These are the directory entries in the root directory of /proc * that properly belong to the /proc filesystem, as they describe * describe something that is process related. */ static const struct pid_entry proc_base_stuff[] = { NOD("self", S_IFLNK|S_IRWXUGO, &proc_self_inode_operations, NULL, {}), }; /* * Exceptional case: normally we are not allowed to unhash a busy * directory. In this case, however, we can do it - no aliasing problems * due to the way we treat inodes. */ static int proc_base_revalidate(struct dentry *dentry, struct nameidata *nd) { struct inode *inode = dentry->d_inode; struct task_struct *task = get_proc_task(inode); if (task) { put_task_struct(task); return 1; } d_drop(dentry); return 0; } static const struct dentry_operations proc_base_dentry_operations = { .d_revalidate = proc_base_revalidate, .d_delete = pid_delete_dentry, }; static struct dentry *proc_base_instantiate(struct inode *dir, struct dentry *dentry, struct task_struct *task, const void *ptr) { const struct pid_entry *p = ptr; struct inode *inode; struct proc_inode *ei; struct dentry *error = ERR_PTR(-EINVAL); /* Allocate the inode */ error = ERR_PTR(-ENOMEM); inode = new_inode(dir->i_sb); if (!inode) goto out; /* Initialize the inode */ ei = PROC_I(inode); inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; /* * grab the reference to the task. */ ei->pid = get_task_pid(task, PIDTYPE_PID); if (!ei->pid) goto out_iput; inode->i_mode = p->mode; if (S_ISDIR(inode->i_mode)) inode->i_nlink = 2; if (S_ISLNK(inode->i_mode)) inode->i_size = 64; if (p->iop) inode->i_op = p->iop; if (p->fop) inode->i_fop = p->fop; ei->op = p->op; dentry->d_op = &proc_base_dentry_operations; d_add(dentry, inode); error = NULL; out: return error; out_iput: iput(inode); goto out; } static struct dentry *proc_base_lookup(struct inode *dir, struct dentry *dentry) { struct dentry *error; struct task_struct *task = get_proc_task(dir); const struct pid_entry *p, *last; error = ERR_PTR(-ENOENT); if (!task) goto out_no_task; /* Lookup the directory entry */ last = &proc_base_stuff[ARRAY_SIZE(proc_base_stuff) - 1]; for (p = proc_base_stuff; p <= last; p++) { if (p->len != dentry->d_name.len) continue; if (!memcmp(dentry->d_name.name, p->name, p->len)) break; } if (p > last) goto out; error = proc_base_instantiate(dir, dentry, task, p); out: put_task_struct(task); out_no_task: return error; } static int proc_base_fill_cache(struct file *filp, void *dirent, filldir_t filldir, struct task_struct *task, const struct pid_entry *p) { return proc_fill_cache(filp, dirent, filldir, p->name, p->len, proc_base_instantiate, task, p); } #ifdef CONFIG_TASK_IO_ACCOUNTING static int do_io_accounting(struct task_struct *task, char *buffer, int whole) { struct task_io_accounting acct = task->ioac; unsigned long flags; if (whole && lock_task_sighand(task, &flags)) { struct task_struct *t = task; task_io_accounting_add(&acct, &task->signal->ioac); while_each_thread(task, t) task_io_accounting_add(&acct, &t->ioac); unlock_task_sighand(task, &flags); } return sprintf(buffer, "rchar: %llu\n" "wchar: %llu\n" "syscr: %llu\n" "syscw: %llu\n" "read_bytes: %llu\n" "write_bytes: %llu\n" "cancelled_write_bytes: %llu\n", (unsigned long long)acct.rchar, (unsigned long long)acct.wchar, (unsigned long long)acct.syscr, (unsigned long long)acct.syscw, (unsigned long long)acct.read_bytes, (unsigned long long)acct.write_bytes, (unsigned long long)acct.cancelled_write_bytes); } static int proc_tid_io_accounting(struct task_struct *task, char *buffer) { return do_io_accounting(task, buffer, 0); } static int proc_tgid_io_accounting(struct task_struct *task, char *buffer) { return do_io_accounting(task, buffer, 1); } #endif /* CONFIG_TASK_IO_ACCOUNTING */ static int proc_pid_personality(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { seq_printf(m, "%08x\n", task->personality); return 0; } /* * Thread groups */ static const struct file_operations proc_task_operations; static const struct inode_operations proc_task_inode_operations; static const struct pid_entry tgid_base_stuff[] = { DIR("task", S_IRUGO|S_IXUGO, proc_task_inode_operations, proc_task_operations), DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations), DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations), #ifdef CONFIG_NET DIR("net", S_IRUGO|S_IXUGO, proc_net_inode_operations, proc_net_operations), #endif REG("environ", S_IRUSR, proc_environ_operations), INF("auxv", S_IRUSR, proc_pid_auxv), ONE("status", S_IRUGO, proc_pid_status), ONE("personality", S_IRUSR, proc_pid_personality), INF("limits", S_IRUSR, proc_pid_limits), #ifdef CONFIG_SCHED_DEBUG REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), #endif REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations), #ifdef CONFIG_HAVE_ARCH_TRACEHOOK INF("syscall", S_IRUSR, proc_pid_syscall), #endif INF("cmdline", S_IRUGO, proc_pid_cmdline), ONE("stat", S_IRUGO, proc_tgid_stat), ONE("statm", S_IRUGO, proc_pid_statm), REG("maps", S_IRUGO, proc_maps_operations), #ifdef CONFIG_NUMA REG("numa_maps", S_IRUGO, proc_numa_maps_operations), #endif REG("mem", S_IRUSR|S_IWUSR, proc_mem_operations), LNK("cwd", proc_cwd_link), LNK("root", proc_root_link), LNK("exe", proc_exe_link), REG("mounts", S_IRUGO, proc_mounts_operations), REG("mountinfo", S_IRUGO, proc_mountinfo_operations), REG("mountstats", S_IRUSR, proc_mountstats_operations), #ifdef CONFIG_PROC_PAGE_MONITOR REG("clear_refs", S_IWUSR, proc_clear_refs_operations), REG("smaps", S_IRUGO, proc_smaps_operations), REG("pagemap", S_IRUSR, proc_pagemap_operations), #endif #ifdef CONFIG_SECURITY DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations), #endif #ifdef CONFIG_KALLSYMS INF("wchan", S_IRUGO, proc_pid_wchan), #endif #ifdef CONFIG_STACKTRACE ONE("stack", S_IRUSR, proc_pid_stack), #endif #ifdef CONFIG_SCHEDSTATS INF("schedstat", S_IRUGO, proc_pid_schedstat), #endif #ifdef CONFIG_LATENCYTOP REG("latency", S_IRUGO, proc_lstats_operations), #endif #ifdef CONFIG_PROC_PID_CPUSET REG("cpuset", S_IRUGO, proc_cpuset_operations), #endif #ifdef CONFIG_CGROUPS REG("cgroup", S_IRUGO, proc_cgroup_operations), #endif INF("oom_score", S_IRUGO, proc_oom_score), REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adjust_operations), #ifdef CONFIG_AUDITSYSCALL REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations), REG("sessionid", S_IRUGO, proc_sessionid_operations), #endif #ifdef CONFIG_FAULT_INJECTION REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations), #endif #ifdef CONFIG_ELF_CORE REG("coredump_filter", S_IRUGO|S_IWUSR, proc_coredump_filter_operations), #endif #ifdef CONFIG_TASK_IO_ACCOUNTING INF("io", S_IRUGO, proc_tgid_io_accounting), #endif }; static int proc_tgid_base_readdir(struct file * filp, void * dirent, filldir_t filldir) { return proc_pident_readdir(filp,dirent,filldir, tgid_base_stuff,ARRAY_SIZE(tgid_base_stuff)); } static const struct file_operations proc_tgid_base_operations = { .read = generic_read_dir, .readdir = proc_tgid_base_readdir, }; static struct dentry *proc_tgid_base_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd){ return proc_pident_lookup(dir, dentry, tgid_base_stuff, ARRAY_SIZE(tgid_base_stuff)); } static const struct inode_operations proc_tgid_base_inode_operations = { .lookup = proc_tgid_base_lookup, .getattr = pid_getattr, .setattr = proc_setattr, }; static void proc_flush_task_mnt(struct vfsmount *mnt, pid_t pid, pid_t tgid) { struct dentry *dentry, *leader, *dir; char buf[PROC_NUMBUF]; struct qstr name; name.name = buf; name.len = snprintf(buf, sizeof(buf), "%d", pid); dentry = d_hash_and_lookup(mnt->mnt_root, &name); if (dentry) { shrink_dcache_parent(dentry); d_drop(dentry); dput(dentry); } name.name = buf; name.len = snprintf(buf, sizeof(buf), "%d", tgid); leader = d_hash_and_lookup(mnt->mnt_root, &name); if (!leader) goto out; name.name = "task"; name.len = strlen(name.name); dir = d_hash_and_lookup(leader, &name); if (!dir) goto out_put_leader; name.name = buf; name.len = snprintf(buf, sizeof(buf), "%d", pid); dentry = d_hash_and_lookup(dir, &name); if (dentry) { shrink_dcache_parent(dentry); d_drop(dentry); dput(dentry); } dput(dir); out_put_leader: dput(leader); out: return; } /** * proc_flush_task - Remove dcache entries for @task from the /proc dcache. * @task: task that should be flushed. * * When flushing dentries from proc, one needs to flush them from global * proc (proc_mnt) and from all the namespaces' procs this task was seen * in. This call is supposed to do all of this job. * * Looks in the dcache for * /proc/@pid * /proc/@tgid/task/@pid * if either directory is present flushes it and all of it'ts children * from the dcache. * * It is safe and reasonable to cache /proc entries for a task until * that task exits. After that they just clog up the dcache with * useless entries, possibly causing useful dcache entries to be * flushed instead. This routine is proved to flush those useless * dcache entries at process exit time. * * NOTE: This routine is just an optimization so it does not guarantee * that no dcache entries will exist at process exit time it * just makes it very unlikely that any will persist. */ void proc_flush_task(struct task_struct *task) { int i; struct pid *pid, *tgid; struct upid *upid; pid = task_pid(task); tgid = task_tgid(task); for (i = 0; i <= pid->level; i++) { upid = &pid->numbers[i]; proc_flush_task_mnt(upid->ns->proc_mnt, upid->nr, tgid->numbers[i].nr); } upid = &pid->numbers[pid->level]; if (upid->nr == 1) pid_ns_release_proc(upid->ns); } static struct dentry *proc_pid_instantiate(struct inode *dir, struct dentry * dentry, struct task_struct *task, const void *ptr) { struct dentry *error = ERR_PTR(-ENOENT); struct inode *inode; inode = proc_pid_make_inode(dir->i_sb, task); if (!inode) goto out; inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO; inode->i_op = &proc_tgid_base_inode_operations; inode->i_fop = &proc_tgid_base_operations; inode->i_flags|=S_IMMUTABLE; inode->i_nlink = 2 + pid_entry_count_dirs(tgid_base_stuff, ARRAY_SIZE(tgid_base_stuff)); dentry->d_op = &pid_dentry_operations; d_add(dentry, inode); /* Close the race of the process dying before we return the dentry */ if (pid_revalidate(dentry, NULL)) error = NULL; out: return error; } struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd) { struct dentry *result = ERR_PTR(-ENOENT); struct task_struct *task; unsigned tgid; struct pid_namespace *ns; result = proc_base_lookup(dir, dentry); if (!IS_ERR(result) || PTR_ERR(result) != -ENOENT) goto out; tgid = name_to_int(dentry); if (tgid == ~0U) goto out; ns = dentry->d_sb->s_fs_info; rcu_read_lock(); task = find_task_by_pid_ns(tgid, ns); if (task) get_task_struct(task); rcu_read_unlock(); if (!task) goto out; result = proc_pid_instantiate(dir, dentry, task, NULL); put_task_struct(task); out: return result; } /* * Find the first task with tgid >= tgid * */ struct tgid_iter { unsigned int tgid; struct task_struct *task; }; static struct tgid_iter next_tgid(struct pid_namespace *ns, struct tgid_iter iter) { struct pid *pid; if (iter.task) put_task_struct(iter.task); rcu_read_lock(); retry: iter.task = NULL; pid = find_ge_pid(iter.tgid, ns); if (pid) { iter.tgid = pid_nr_ns(pid, ns); iter.task = pid_task(pid, PIDTYPE_PID); /* What we to know is if the pid we have find is the * pid of a thread_group_leader. Testing for task * being a thread_group_leader is the obvious thing * todo but there is a window when it fails, due to * the pid transfer logic in de_thread. * * So we perform the straight forward test of seeing * if the pid we have found is the pid of a thread * group leader, and don't worry if the task we have * found doesn't happen to be a thread group leader. * As we don't care in the case of readdir. */ if (!iter.task || !has_group_leader_pid(iter.task)) { iter.tgid += 1; goto retry; } get_task_struct(iter.task); } rcu_read_unlock(); return iter; } #define TGID_OFFSET (FIRST_PROCESS_ENTRY + ARRAY_SIZE(proc_base_stuff)) static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldir, struct tgid_iter iter) { char name[PROC_NUMBUF]; int len = snprintf(name, sizeof(name), "%d", iter.tgid); return proc_fill_cache(filp, dirent, filldir, name, len, proc_pid_instantiate, iter.task, NULL); } /* for the /proc/ directory itself, after non-process stuff has been done */ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir) { unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY; struct task_struct *reaper = get_proc_task(filp->f_path.dentry->d_inode); struct tgid_iter iter; struct pid_namespace *ns; if (!reaper) goto out_no_task; for (; nr < ARRAY_SIZE(proc_base_stuff); filp->f_pos++, nr++) { const struct pid_entry *p = &proc_base_stuff[nr]; if (proc_base_fill_cache(filp, dirent, filldir, reaper, p) < 0) goto out; } ns = filp->f_dentry->d_sb->s_fs_info; iter.task = NULL; iter.tgid = filp->f_pos - TGID_OFFSET; for (iter = next_tgid(ns, iter); iter.task; iter.tgid += 1, iter = next_tgid(ns, iter)) { filp->f_pos = iter.tgid + TGID_OFFSET; if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) { put_task_struct(iter.task); goto out; } } filp->f_pos = PID_MAX_LIMIT + TGID_OFFSET; out: put_task_struct(reaper); out_no_task: return 0; } /* * Tasks */ static const struct pid_entry tid_base_stuff[] = { DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations), DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fd_operations), REG("environ", S_IRUSR, proc_environ_operations), INF("auxv", S_IRUSR, proc_pid_auxv), ONE("status", S_IRUGO, proc_pid_status), ONE("personality", S_IRUSR, proc_pid_personality), INF("limits", S_IRUSR, proc_pid_limits), #ifdef CONFIG_SCHED_DEBUG REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), #endif REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations), #ifdef CONFIG_HAVE_ARCH_TRACEHOOK INF("syscall", S_IRUSR, proc_pid_syscall), #endif INF("cmdline", S_IRUGO, proc_pid_cmdline), ONE("stat", S_IRUGO, proc_tid_stat), ONE("statm", S_IRUGO, proc_pid_statm), REG("maps", S_IRUGO, proc_maps_operations), #ifdef CONFIG_NUMA REG("numa_maps", S_IRUGO, proc_numa_maps_operations), #endif REG("mem", S_IRUSR|S_IWUSR, proc_mem_operations), LNK("cwd", proc_cwd_link), LNK("root", proc_root_link), LNK("exe", proc_exe_link), REG("mounts", S_IRUGO, proc_mounts_operations), REG("mountinfo", S_IRUGO, proc_mountinfo_operations), #ifdef CONFIG_PROC_PAGE_MONITOR REG("clear_refs", S_IWUSR, proc_clear_refs_operations), REG("smaps", S_IRUGO, proc_smaps_operations), REG("pagemap", S_IRUSR, proc_pagemap_operations), #endif #ifdef CONFIG_SECURITY DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations), #endif #ifdef CONFIG_KALLSYMS INF("wchan", S_IRUGO, proc_pid_wchan), #endif #ifdef CONFIG_STACKTRACE ONE("stack", S_IRUSR, proc_pid_stack), #endif #ifdef CONFIG_SCHEDSTATS INF("schedstat", S_IRUGO, proc_pid_schedstat), #endif #ifdef CONFIG_LATENCYTOP REG("latency", S_IRUGO, proc_lstats_operations), #endif #ifdef CONFIG_PROC_PID_CPUSET REG("cpuset", S_IRUGO, proc_cpuset_operations), #endif #ifdef CONFIG_CGROUPS REG("cgroup", S_IRUGO, proc_cgroup_operations), #endif INF("oom_score", S_IRUGO, proc_oom_score), REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adjust_operations), #ifdef CONFIG_AUDITSYSCALL REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations), REG("sessionid", S_IRUSR, proc_sessionid_operations), #endif #ifdef CONFIG_FAULT_INJECTION REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations), #endif #ifdef CONFIG_TASK_IO_ACCOUNTING INF("io", S_IRUGO, proc_tid_io_accounting), #endif }; static int proc_tid_base_readdir(struct file * filp, void * dirent, filldir_t filldir) { return proc_pident_readdir(filp,dirent,filldir, tid_base_stuff,ARRAY_SIZE(tid_base_stuff)); } static struct dentry *proc_tid_base_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd){ return proc_pident_lookup(dir, dentry, tid_base_stuff, ARRAY_SIZE(tid_base_stuff)); } static const struct file_operations proc_tid_base_operations = { .read = generic_read_dir, .readdir = proc_tid_base_readdir, }; static const struct inode_operations proc_tid_base_inode_operations = { .lookup = proc_tid_base_lookup, .getattr = pid_getattr, .setattr = proc_setattr, }; static struct dentry *proc_task_instantiate(struct inode *dir, struct dentry *dentry, struct task_struct *task, const void *ptr) { struct dentry *error = ERR_PTR(-ENOENT); struct inode *inode; inode = proc_pid_make_inode(dir->i_sb, task); if (!inode) goto out; inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO; inode->i_op = &proc_tid_base_inode_operations; inode->i_fop = &proc_tid_base_operations; inode->i_flags|=S_IMMUTABLE; inode->i_nlink = 2 + pid_entry_count_dirs(tid_base_stuff, ARRAY_SIZE(tid_base_stuff)); dentry->d_op = &pid_dentry_operations; d_add(dentry, inode); /* Close the race of the process dying before we return the dentry */ if (pid_revalidate(dentry, NULL)) error = NULL; out: return error; } static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd) { struct dentry *result = ERR_PTR(-ENOENT); struct task_struct *task; struct task_struct *leader = get_proc_task(dir); unsigned tid; struct pid_namespace *ns; if (!leader) goto out_no_task; tid = name_to_int(dentry); if (tid == ~0U) goto out; ns = dentry->d_sb->s_fs_info; rcu_read_lock(); task = find_task_by_pid_ns(tid, ns); if (task) get_task_struct(task); rcu_read_unlock(); if (!task) goto out; if (!same_thread_group(leader, task)) goto out_drop_task; result = proc_task_instantiate(dir, dentry, task, NULL); out_drop_task: put_task_struct(task); out: put_task_struct(leader); out_no_task: return result; } /* * Find the first tid of a thread group to return to user space. * * Usually this is just the thread group leader, but if the users * buffer was too small or there was a seek into the middle of the * directory we have more work todo. * * In the case of a short read we start with find_task_by_pid. * * In the case of a seek we start with the leader and walk nr * threads past it. */ static struct task_struct *first_tid(struct task_struct *leader, int tid, int nr, struct pid_namespace *ns) { struct task_struct *pos; rcu_read_lock(); /* Attempt to start with the pid of a thread */ if (tid && (nr > 0)) { pos = find_task_by_pid_ns(tid, ns); if (pos && (pos->group_leader == leader)) goto found; } /* If nr exceeds the number of threads there is nothing todo */ pos = NULL; if (nr && nr >= get_nr_threads(leader)) goto out; /* If we haven't found our starting place yet start * with the leader and walk nr threads forward. */ for (pos = leader; nr > 0; --nr) { pos = next_thread(pos); if (pos == leader) { pos = NULL; goto out; } } found: get_task_struct(pos); out: rcu_read_unlock(); return pos; } /* * Find the next thread in the thread list. * Return NULL if there is an error or no next thread. * * The reference to the input task_struct is released. */ static struct task_struct *next_tid(struct task_struct *start) { struct task_struct *pos = NULL; rcu_read_lock(); if (pid_alive(start)) { pos = next_thread(start); if (thread_group_leader(pos)) pos = NULL; else get_task_struct(pos); } rcu_read_unlock(); put_task_struct(start); return pos; } static int proc_task_fill_cache(struct file *filp, void *dirent, filldir_t filldir, struct task_struct *task, int tid) { char name[PROC_NUMBUF]; int len = snprintf(name, sizeof(name), "%d", tid); return proc_fill_cache(filp, dirent, filldir, name, len, proc_task_instantiate, task, NULL); } /* for the /proc/TGID/task/ directories */ static int proc_task_readdir(struct file * filp, void * dirent, filldir_t filldir) { struct dentry *dentry = filp->f_path.dentry; struct inode *inode = dentry->d_inode; struct task_struct *leader = NULL; struct task_struct *task; int retval = -ENOENT; ino_t ino; int tid; struct pid_namespace *ns; task = get_proc_task(inode); if (!task) goto out_no_task; rcu_read_lock(); if (pid_alive(task)) { leader = task->group_leader; get_task_struct(leader); } rcu_read_unlock(); put_task_struct(task); if (!leader) goto out_no_task; retval = 0; switch ((unsigned long)filp->f_pos) { case 0: ino = inode->i_ino; if (filldir(dirent, ".", 1, filp->f_pos, ino, DT_DIR) < 0) goto out; filp->f_pos++; /* fall through */ case 1: ino = parent_ino(dentry); if (filldir(dirent, "..", 2, filp->f_pos, ino, DT_DIR) < 0) goto out; filp->f_pos++; /* fall through */ } /* f_version caches the tgid value that the last readdir call couldn't * return. lseek aka telldir automagically resets f_version to 0. */ ns = filp->f_dentry->d_sb->s_fs_info; tid = (int)filp->f_version; filp->f_version = 0; for (task = first_tid(leader, tid, filp->f_pos - 2, ns); task; task = next_tid(task), filp->f_pos++) { tid = task_pid_nr_ns(task, ns); if (proc_task_fill_cache(filp, dirent, filldir, task, tid) < 0) { /* returning this tgid failed, save it as the first * pid for the next readir call */ filp->f_version = (u64)tid; put_task_struct(task); break; } } out: put_task_struct(leader); out_no_task: return retval; } static int proc_task_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct inode *inode = dentry->d_inode; struct task_struct *p = get_proc_task(inode); generic_fillattr(inode, stat); if (p) { stat->nlink += get_nr_threads(p); put_task_struct(p); } return 0; } static const struct inode_operations proc_task_inode_operations = { .lookup = proc_task_lookup, .getattr = proc_task_getattr, .setattr = proc_setattr, }; static const struct file_operations proc_task_operations = { .read = generic_read_dir, .readdir = proc_task_readdir, };
./CrossVul/dataset_final_sorted/CWE-20/c/good_2035_1
crossvul-cpp_data_bad_5066_0
/*- * Copyright (c) 2003-2007 Tim Kientzle * Copyright (c) 2010-2012 Michihiro NAKAJIMA * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "archive_platform.h" __FBSDID("$FreeBSD: head/lib/libarchive/archive_read_support_format_cpio.c 201163 2009-12-29 05:50:34Z kientzle $"); #ifdef HAVE_ERRNO_H #include <errno.h> #endif /* #include <stdint.h> */ /* See archive_platform.h */ #ifdef HAVE_STDLIB_H #include <stdlib.h> #endif #ifdef HAVE_STRING_H #include <string.h> #endif #include "archive.h" #include "archive_entry.h" #include "archive_entry_locale.h" #include "archive_private.h" #include "archive_read_private.h" #define bin_magic_offset 0 #define bin_magic_size 2 #define bin_dev_offset 2 #define bin_dev_size 2 #define bin_ino_offset 4 #define bin_ino_size 2 #define bin_mode_offset 6 #define bin_mode_size 2 #define bin_uid_offset 8 #define bin_uid_size 2 #define bin_gid_offset 10 #define bin_gid_size 2 #define bin_nlink_offset 12 #define bin_nlink_size 2 #define bin_rdev_offset 14 #define bin_rdev_size 2 #define bin_mtime_offset 16 #define bin_mtime_size 4 #define bin_namesize_offset 20 #define bin_namesize_size 2 #define bin_filesize_offset 22 #define bin_filesize_size 4 #define bin_header_size 26 #define odc_magic_offset 0 #define odc_magic_size 6 #define odc_dev_offset 6 #define odc_dev_size 6 #define odc_ino_offset 12 #define odc_ino_size 6 #define odc_mode_offset 18 #define odc_mode_size 6 #define odc_uid_offset 24 #define odc_uid_size 6 #define odc_gid_offset 30 #define odc_gid_size 6 #define odc_nlink_offset 36 #define odc_nlink_size 6 #define odc_rdev_offset 42 #define odc_rdev_size 6 #define odc_mtime_offset 48 #define odc_mtime_size 11 #define odc_namesize_offset 59 #define odc_namesize_size 6 #define odc_filesize_offset 65 #define odc_filesize_size 11 #define odc_header_size 76 #define newc_magic_offset 0 #define newc_magic_size 6 #define newc_ino_offset 6 #define newc_ino_size 8 #define newc_mode_offset 14 #define newc_mode_size 8 #define newc_uid_offset 22 #define newc_uid_size 8 #define newc_gid_offset 30 #define newc_gid_size 8 #define newc_nlink_offset 38 #define newc_nlink_size 8 #define newc_mtime_offset 46 #define newc_mtime_size 8 #define newc_filesize_offset 54 #define newc_filesize_size 8 #define newc_devmajor_offset 62 #define newc_devmajor_size 8 #define newc_devminor_offset 70 #define newc_devminor_size 8 #define newc_rdevmajor_offset 78 #define newc_rdevmajor_size 8 #define newc_rdevminor_offset 86 #define newc_rdevminor_size 8 #define newc_namesize_offset 94 #define newc_namesize_size 8 #define newc_checksum_offset 102 #define newc_checksum_size 8 #define newc_header_size 110 /* * An afio large ASCII header, which they named itself. * afio utility uses this header, if a file size is larger than 2G bytes * or inode/uid/gid is bigger than 65535(0xFFFF) or mtime is bigger than * 0x7fffffff, which we cannot record to odc header because of its limit. * If not, uses odc header. */ #define afiol_magic_offset 0 #define afiol_magic_size 6 #define afiol_dev_offset 6 #define afiol_dev_size 8 /* hex */ #define afiol_ino_offset 14 #define afiol_ino_size 16 /* hex */ #define afiol_ino_m_offset 30 /* 'm' */ #define afiol_mode_offset 31 #define afiol_mode_size 6 /* oct */ #define afiol_uid_offset 37 #define afiol_uid_size 8 /* hex */ #define afiol_gid_offset 45 #define afiol_gid_size 8 /* hex */ #define afiol_nlink_offset 53 #define afiol_nlink_size 8 /* hex */ #define afiol_rdev_offset 61 #define afiol_rdev_size 8 /* hex */ #define afiol_mtime_offset 69 #define afiol_mtime_size 16 /* hex */ #define afiol_mtime_n_offset 85 /* 'n' */ #define afiol_namesize_offset 86 #define afiol_namesize_size 4 /* hex */ #define afiol_flag_offset 90 #define afiol_flag_size 4 /* hex */ #define afiol_xsize_offset 94 #define afiol_xsize_size 4 /* hex */ #define afiol_xsize_s_offset 98 /* 's' */ #define afiol_filesize_offset 99 #define afiol_filesize_size 16 /* hex */ #define afiol_filesize_c_offset 115 /* ':' */ #define afiol_header_size 116 struct links_entry { struct links_entry *next; struct links_entry *previous; int links; dev_t dev; int64_t ino; char *name; }; #define CPIO_MAGIC 0x13141516 struct cpio { int magic; int (*read_header)(struct archive_read *, struct cpio *, struct archive_entry *, size_t *, size_t *); struct links_entry *links_head; int64_t entry_bytes_remaining; int64_t entry_bytes_unconsumed; int64_t entry_offset; int64_t entry_padding; struct archive_string_conv *opt_sconv; struct archive_string_conv *sconv_default; int init_default_conversion; }; static int64_t atol16(const char *, unsigned); static int64_t atol8(const char *, unsigned); static int archive_read_format_cpio_bid(struct archive_read *, int); static int archive_read_format_cpio_options(struct archive_read *, const char *, const char *); static int archive_read_format_cpio_cleanup(struct archive_read *); static int archive_read_format_cpio_read_data(struct archive_read *, const void **, size_t *, int64_t *); static int archive_read_format_cpio_read_header(struct archive_read *, struct archive_entry *); static int archive_read_format_cpio_skip(struct archive_read *); static int64_t be4(const unsigned char *); static int find_odc_header(struct archive_read *); static int find_newc_header(struct archive_read *); static int header_bin_be(struct archive_read *, struct cpio *, struct archive_entry *, size_t *, size_t *); static int header_bin_le(struct archive_read *, struct cpio *, struct archive_entry *, size_t *, size_t *); static int header_newc(struct archive_read *, struct cpio *, struct archive_entry *, size_t *, size_t *); static int header_odc(struct archive_read *, struct cpio *, struct archive_entry *, size_t *, size_t *); static int header_afiol(struct archive_read *, struct cpio *, struct archive_entry *, size_t *, size_t *); static int is_octal(const char *, size_t); static int is_hex(const char *, size_t); static int64_t le4(const unsigned char *); static int record_hardlink(struct archive_read *a, struct cpio *cpio, struct archive_entry *entry); int archive_read_support_format_cpio(struct archive *_a) { struct archive_read *a = (struct archive_read *)_a; struct cpio *cpio; int r; archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW, "archive_read_support_format_cpio"); cpio = (struct cpio *)calloc(1, sizeof(*cpio)); if (cpio == NULL) { archive_set_error(&a->archive, ENOMEM, "Can't allocate cpio data"); return (ARCHIVE_FATAL); } cpio->magic = CPIO_MAGIC; r = __archive_read_register_format(a, cpio, "cpio", archive_read_format_cpio_bid, archive_read_format_cpio_options, archive_read_format_cpio_read_header, archive_read_format_cpio_read_data, archive_read_format_cpio_skip, NULL, archive_read_format_cpio_cleanup, NULL, NULL); if (r != ARCHIVE_OK) free(cpio); return (ARCHIVE_OK); } static int archive_read_format_cpio_bid(struct archive_read *a, int best_bid) { const unsigned char *p; struct cpio *cpio; int bid; (void)best_bid; /* UNUSED */ cpio = (struct cpio *)(a->format->data); if ((p = __archive_read_ahead(a, 6, NULL)) == NULL) return (-1); bid = 0; if (memcmp(p, "070707", 6) == 0) { /* ASCII cpio archive (odc, POSIX.1) */ cpio->read_header = header_odc; bid += 48; /* * XXX TODO: More verification; Could check that only octal * digits appear in appropriate header locations. XXX */ } else if (memcmp(p, "070727", 6) == 0) { /* afio large ASCII cpio archive */ cpio->read_header = header_odc; bid += 48; /* * XXX TODO: More verification; Could check that almost hex * digits appear in appropriate header locations. XXX */ } else if (memcmp(p, "070701", 6) == 0) { /* ASCII cpio archive (SVR4 without CRC) */ cpio->read_header = header_newc; bid += 48; /* * XXX TODO: More verification; Could check that only hex * digits appear in appropriate header locations. XXX */ } else if (memcmp(p, "070702", 6) == 0) { /* ASCII cpio archive (SVR4 with CRC) */ /* XXX TODO: Flag that we should check the CRC. XXX */ cpio->read_header = header_newc; bid += 48; /* * XXX TODO: More verification; Could check that only hex * digits appear in appropriate header locations. XXX */ } else if (p[0] * 256 + p[1] == 070707) { /* big-endian binary cpio archives */ cpio->read_header = header_bin_be; bid += 16; /* Is more verification possible here? */ } else if (p[0] + p[1] * 256 == 070707) { /* little-endian binary cpio archives */ cpio->read_header = header_bin_le; bid += 16; /* Is more verification possible here? */ } else return (ARCHIVE_WARN); return (bid); } static int archive_read_format_cpio_options(struct archive_read *a, const char *key, const char *val) { struct cpio *cpio; int ret = ARCHIVE_FAILED; cpio = (struct cpio *)(a->format->data); if (strcmp(key, "compat-2x") == 0) { /* Handle filnames as libarchive 2.x */ cpio->init_default_conversion = (val != NULL)?1:0; return (ARCHIVE_OK); } else if (strcmp(key, "hdrcharset") == 0) { if (val == NULL || val[0] == 0) archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "cpio: hdrcharset option needs a character-set name"); else { cpio->opt_sconv = archive_string_conversion_from_charset( &a->archive, val, 0); if (cpio->opt_sconv != NULL) ret = ARCHIVE_OK; else ret = ARCHIVE_FATAL; } return (ret); } /* Note: The "warn" return is just to inform the options * supervisor that we didn't handle it. It will generate * a suitable error if no one used this option. */ return (ARCHIVE_WARN); } static int archive_read_format_cpio_read_header(struct archive_read *a, struct archive_entry *entry) { struct cpio *cpio; const void *h; struct archive_string_conv *sconv; size_t namelength; size_t name_pad; int r; cpio = (struct cpio *)(a->format->data); sconv = cpio->opt_sconv; if (sconv == NULL) { if (!cpio->init_default_conversion) { cpio->sconv_default = archive_string_default_conversion_for_read( &(a->archive)); cpio->init_default_conversion = 1; } sconv = cpio->sconv_default; } r = (cpio->read_header(a, cpio, entry, &namelength, &name_pad)); if (r < ARCHIVE_WARN) return (r); /* Read name from buffer. */ h = __archive_read_ahead(a, namelength + name_pad, NULL); if (h == NULL) return (ARCHIVE_FATAL); if (archive_entry_copy_pathname_l(entry, (const char *)h, namelength, sconv) != 0) { if (errno == ENOMEM) { archive_set_error(&a->archive, ENOMEM, "Can't allocate memory for Pathname"); return (ARCHIVE_FATAL); } archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Pathname can't be converted from %s to current locale.", archive_string_conversion_charset_name(sconv)); r = ARCHIVE_WARN; } cpio->entry_offset = 0; __archive_read_consume(a, namelength + name_pad); /* If this is a symlink, read the link contents. */ if (archive_entry_filetype(entry) == AE_IFLNK) { h = __archive_read_ahead(a, (size_t)cpio->entry_bytes_remaining, NULL); if (h == NULL) return (ARCHIVE_FATAL); if (archive_entry_copy_symlink_l(entry, (const char *)h, (size_t)cpio->entry_bytes_remaining, sconv) != 0) { if (errno == ENOMEM) { archive_set_error(&a->archive, ENOMEM, "Can't allocate memory for Linkname"); return (ARCHIVE_FATAL); } archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Linkname can't be converted from %s to " "current locale.", archive_string_conversion_charset_name(sconv)); r = ARCHIVE_WARN; } __archive_read_consume(a, cpio->entry_bytes_remaining); cpio->entry_bytes_remaining = 0; } /* XXX TODO: If the full mode is 0160200, then this is a Solaris * ACL description for the following entry. Read this body * and parse it as a Solaris-style ACL, then read the next * header. XXX */ /* Compare name to "TRAILER!!!" to test for end-of-archive. */ if (namelength == 11 && strcmp((const char *)h, "TRAILER!!!") == 0) { /* TODO: Store file location of start of block. */ archive_clear_error(&a->archive); return (ARCHIVE_EOF); } /* Detect and record hardlinks to previously-extracted entries. */ if (record_hardlink(a, cpio, entry) != ARCHIVE_OK) { return (ARCHIVE_FATAL); } return (r); } static int archive_read_format_cpio_read_data(struct archive_read *a, const void **buff, size_t *size, int64_t *offset) { ssize_t bytes_read; struct cpio *cpio; cpio = (struct cpio *)(a->format->data); if (cpio->entry_bytes_unconsumed) { __archive_read_consume(a, cpio->entry_bytes_unconsumed); cpio->entry_bytes_unconsumed = 0; } if (cpio->entry_bytes_remaining > 0) { *buff = __archive_read_ahead(a, 1, &bytes_read); if (bytes_read <= 0) return (ARCHIVE_FATAL); if (bytes_read > cpio->entry_bytes_remaining) bytes_read = (ssize_t)cpio->entry_bytes_remaining; *size = bytes_read; cpio->entry_bytes_unconsumed = bytes_read; *offset = cpio->entry_offset; cpio->entry_offset += bytes_read; cpio->entry_bytes_remaining -= bytes_read; return (ARCHIVE_OK); } else { if (cpio->entry_padding != __archive_read_consume(a, cpio->entry_padding)) { return (ARCHIVE_FATAL); } cpio->entry_padding = 0; *buff = NULL; *size = 0; *offset = cpio->entry_offset; return (ARCHIVE_EOF); } } static int archive_read_format_cpio_skip(struct archive_read *a) { struct cpio *cpio = (struct cpio *)(a->format->data); int64_t to_skip = cpio->entry_bytes_remaining + cpio->entry_padding + cpio->entry_bytes_unconsumed; if (to_skip != __archive_read_consume(a, to_skip)) { return (ARCHIVE_FATAL); } cpio->entry_bytes_remaining = 0; cpio->entry_padding = 0; cpio->entry_bytes_unconsumed = 0; return (ARCHIVE_OK); } /* * Skip forward to the next cpio newc header by searching for the * 07070[12] string. This should be generalized and merged with * find_odc_header below. */ static int is_hex(const char *p, size_t len) { while (len-- > 0) { if ((*p >= '0' && *p <= '9') || (*p >= 'a' && *p <= 'f') || (*p >= 'A' && *p <= 'F')) ++p; else return (0); } return (1); } static int find_newc_header(struct archive_read *a) { const void *h; const char *p, *q; size_t skip, skipped = 0; ssize_t bytes; for (;;) { h = __archive_read_ahead(a, newc_header_size, &bytes); if (h == NULL) return (ARCHIVE_FATAL); p = h; q = p + bytes; /* Try the typical case first, then go into the slow search.*/ if (memcmp("07070", p, 5) == 0 && (p[5] == '1' || p[5] == '2') && is_hex(p, newc_header_size)) return (ARCHIVE_OK); /* * Scan ahead until we find something that looks * like a newc header. */ while (p + newc_header_size <= q) { switch (p[5]) { case '1': case '2': if (memcmp("07070", p, 5) == 0 && is_hex(p, newc_header_size)) { skip = p - (const char *)h; __archive_read_consume(a, skip); skipped += skip; if (skipped > 0) { archive_set_error(&a->archive, 0, "Skipped %d bytes before " "finding valid header", (int)skipped); return (ARCHIVE_WARN); } return (ARCHIVE_OK); } p += 2; break; case '0': p++; break; default: p += 6; break; } } skip = p - (const char *)h; __archive_read_consume(a, skip); skipped += skip; } } static int header_newc(struct archive_read *a, struct cpio *cpio, struct archive_entry *entry, size_t *namelength, size_t *name_pad) { const void *h; const char *header; int r; r = find_newc_header(a); if (r < ARCHIVE_WARN) return (r); /* Read fixed-size portion of header. */ h = __archive_read_ahead(a, newc_header_size, NULL); if (h == NULL) return (ARCHIVE_FATAL); /* Parse out hex fields. */ header = (const char *)h; if (memcmp(header + newc_magic_offset, "070701", 6) == 0) { a->archive.archive_format = ARCHIVE_FORMAT_CPIO_SVR4_NOCRC; a->archive.archive_format_name = "ASCII cpio (SVR4 with no CRC)"; } else if (memcmp(header + newc_magic_offset, "070702", 6) == 0) { a->archive.archive_format = ARCHIVE_FORMAT_CPIO_SVR4_CRC; a->archive.archive_format_name = "ASCII cpio (SVR4 with CRC)"; } else { /* TODO: Abort here? */ } archive_entry_set_devmajor(entry, (dev_t)atol16(header + newc_devmajor_offset, newc_devmajor_size)); archive_entry_set_devminor(entry, (dev_t)atol16(header + newc_devminor_offset, newc_devminor_size)); archive_entry_set_ino(entry, atol16(header + newc_ino_offset, newc_ino_size)); archive_entry_set_mode(entry, (mode_t)atol16(header + newc_mode_offset, newc_mode_size)); archive_entry_set_uid(entry, atol16(header + newc_uid_offset, newc_uid_size)); archive_entry_set_gid(entry, atol16(header + newc_gid_offset, newc_gid_size)); archive_entry_set_nlink(entry, (unsigned int)atol16(header + newc_nlink_offset, newc_nlink_size)); archive_entry_set_rdevmajor(entry, (dev_t)atol16(header + newc_rdevmajor_offset, newc_rdevmajor_size)); archive_entry_set_rdevminor(entry, (dev_t)atol16(header + newc_rdevminor_offset, newc_rdevminor_size)); archive_entry_set_mtime(entry, atol16(header + newc_mtime_offset, newc_mtime_size), 0); *namelength = (size_t)atol16(header + newc_namesize_offset, newc_namesize_size); /* Pad name to 2 more than a multiple of 4. */ *name_pad = (2 - *namelength) & 3; /* * Note: entry_bytes_remaining is at least 64 bits and * therefore guaranteed to be big enough for a 33-bit file * size. */ cpio->entry_bytes_remaining = atol16(header + newc_filesize_offset, newc_filesize_size); archive_entry_set_size(entry, cpio->entry_bytes_remaining); /* Pad file contents to a multiple of 4. */ cpio->entry_padding = 3 & -cpio->entry_bytes_remaining; __archive_read_consume(a, newc_header_size); return (r); } /* * Skip forward to the next cpio odc header by searching for the * 070707 string. This is a hand-optimized search that could * probably be easily generalized to handle all character-based * cpio variants. */ static int is_octal(const char *p, size_t len) { while (len-- > 0) { if (*p < '0' || *p > '7') return (0); ++p; } return (1); } static int is_afio_large(const char *h, size_t len) { if (len < afiol_header_size) return (0); if (h[afiol_ino_m_offset] != 'm' || h[afiol_mtime_n_offset] != 'n' || h[afiol_xsize_s_offset] != 's' || h[afiol_filesize_c_offset] != ':') return (0); if (!is_hex(h + afiol_dev_offset, afiol_ino_m_offset - afiol_dev_offset)) return (0); if (!is_hex(h + afiol_mode_offset, afiol_mtime_n_offset - afiol_mode_offset)) return (0); if (!is_hex(h + afiol_namesize_offset, afiol_xsize_s_offset - afiol_namesize_offset)) return (0); if (!is_hex(h + afiol_filesize_offset, afiol_filesize_size)) return (0); return (1); } static int find_odc_header(struct archive_read *a) { const void *h; const char *p, *q; size_t skip, skipped = 0; ssize_t bytes; for (;;) { h = __archive_read_ahead(a, odc_header_size, &bytes); if (h == NULL) return (ARCHIVE_FATAL); p = h; q = p + bytes; /* Try the typical case first, then go into the slow search.*/ if (memcmp("070707", p, 6) == 0 && is_octal(p, odc_header_size)) return (ARCHIVE_OK); if (memcmp("070727", p, 6) == 0 && is_afio_large(p, bytes)) { a->archive.archive_format = ARCHIVE_FORMAT_CPIO_AFIO_LARGE; return (ARCHIVE_OK); } /* * Scan ahead until we find something that looks * like an odc header. */ while (p + odc_header_size <= q) { switch (p[5]) { case '7': if ((memcmp("070707", p, 6) == 0 && is_octal(p, odc_header_size)) || (memcmp("070727", p, 6) == 0 && is_afio_large(p, q - p))) { skip = p - (const char *)h; __archive_read_consume(a, skip); skipped += skip; if (p[4] == '2') a->archive.archive_format = ARCHIVE_FORMAT_CPIO_AFIO_LARGE; if (skipped > 0) { archive_set_error(&a->archive, 0, "Skipped %d bytes before " "finding valid header", (int)skipped); return (ARCHIVE_WARN); } return (ARCHIVE_OK); } p += 2; break; case '0': p++; break; default: p += 6; break; } } skip = p - (const char *)h; __archive_read_consume(a, skip); skipped += skip; } } static int header_odc(struct archive_read *a, struct cpio *cpio, struct archive_entry *entry, size_t *namelength, size_t *name_pad) { const void *h; int r; const char *header; a->archive.archive_format = ARCHIVE_FORMAT_CPIO_POSIX; a->archive.archive_format_name = "POSIX octet-oriented cpio"; /* Find the start of the next header. */ r = find_odc_header(a); if (r < ARCHIVE_WARN) return (r); if (a->archive.archive_format == ARCHIVE_FORMAT_CPIO_AFIO_LARGE) { int r2 = (header_afiol(a, cpio, entry, namelength, name_pad)); if (r2 == ARCHIVE_OK) return (r); else return (r2); } /* Read fixed-size portion of header. */ h = __archive_read_ahead(a, odc_header_size, NULL); if (h == NULL) return (ARCHIVE_FATAL); /* Parse out octal fields. */ header = (const char *)h; archive_entry_set_dev(entry, (dev_t)atol8(header + odc_dev_offset, odc_dev_size)); archive_entry_set_ino(entry, atol8(header + odc_ino_offset, odc_ino_size)); archive_entry_set_mode(entry, (mode_t)atol8(header + odc_mode_offset, odc_mode_size)); archive_entry_set_uid(entry, atol8(header + odc_uid_offset, odc_uid_size)); archive_entry_set_gid(entry, atol8(header + odc_gid_offset, odc_gid_size)); archive_entry_set_nlink(entry, (unsigned int)atol8(header + odc_nlink_offset, odc_nlink_size)); archive_entry_set_rdev(entry, (dev_t)atol8(header + odc_rdev_offset, odc_rdev_size)); archive_entry_set_mtime(entry, atol8(header + odc_mtime_offset, odc_mtime_size), 0); *namelength = (size_t)atol8(header + odc_namesize_offset, odc_namesize_size); *name_pad = 0; /* No padding of filename. */ /* * Note: entry_bytes_remaining is at least 64 bits and * therefore guaranteed to be big enough for a 33-bit file * size. */ cpio->entry_bytes_remaining = atol8(header + odc_filesize_offset, odc_filesize_size); archive_entry_set_size(entry, cpio->entry_bytes_remaining); cpio->entry_padding = 0; __archive_read_consume(a, odc_header_size); return (r); } /* * NOTE: if a filename suffix is ".z", it is the file gziped by afio. * it would be nice that we can show uncompressed file size and we can * uncompressed file contents automatically, unfortunately we have nothing * to get a uncompressed file size while reading each header. it means * we also cannot uncompressed file contens under the our framework. */ static int header_afiol(struct archive_read *a, struct cpio *cpio, struct archive_entry *entry, size_t *namelength, size_t *name_pad) { const void *h; const char *header; a->archive.archive_format = ARCHIVE_FORMAT_CPIO_AFIO_LARGE; a->archive.archive_format_name = "afio large ASCII"; /* Read fixed-size portion of header. */ h = __archive_read_ahead(a, afiol_header_size, NULL); if (h == NULL) return (ARCHIVE_FATAL); /* Parse out octal fields. */ header = (const char *)h; archive_entry_set_dev(entry, (dev_t)atol16(header + afiol_dev_offset, afiol_dev_size)); archive_entry_set_ino(entry, atol16(header + afiol_ino_offset, afiol_ino_size)); archive_entry_set_mode(entry, (mode_t)atol8(header + afiol_mode_offset, afiol_mode_size)); archive_entry_set_uid(entry, atol16(header + afiol_uid_offset, afiol_uid_size)); archive_entry_set_gid(entry, atol16(header + afiol_gid_offset, afiol_gid_size)); archive_entry_set_nlink(entry, (unsigned int)atol16(header + afiol_nlink_offset, afiol_nlink_size)); archive_entry_set_rdev(entry, (dev_t)atol16(header + afiol_rdev_offset, afiol_rdev_size)); archive_entry_set_mtime(entry, atol16(header + afiol_mtime_offset, afiol_mtime_size), 0); *namelength = (size_t)atol16(header + afiol_namesize_offset, afiol_namesize_size); *name_pad = 0; /* No padding of filename. */ cpio->entry_bytes_remaining = atol16(header + afiol_filesize_offset, afiol_filesize_size); archive_entry_set_size(entry, cpio->entry_bytes_remaining); cpio->entry_padding = 0; __archive_read_consume(a, afiol_header_size); return (ARCHIVE_OK); } static int header_bin_le(struct archive_read *a, struct cpio *cpio, struct archive_entry *entry, size_t *namelength, size_t *name_pad) { const void *h; const unsigned char *header; a->archive.archive_format = ARCHIVE_FORMAT_CPIO_BIN_LE; a->archive.archive_format_name = "cpio (little-endian binary)"; /* Read fixed-size portion of header. */ h = __archive_read_ahead(a, bin_header_size, NULL); if (h == NULL) { archive_set_error(&a->archive, 0, "End of file trying to read next cpio header"); return (ARCHIVE_FATAL); } /* Parse out binary fields. */ header = (const unsigned char *)h; archive_entry_set_dev(entry, header[bin_dev_offset] + header[bin_dev_offset + 1] * 256); archive_entry_set_ino(entry, header[bin_ino_offset] + header[bin_ino_offset + 1] * 256); archive_entry_set_mode(entry, header[bin_mode_offset] + header[bin_mode_offset + 1] * 256); archive_entry_set_uid(entry, header[bin_uid_offset] + header[bin_uid_offset + 1] * 256); archive_entry_set_gid(entry, header[bin_gid_offset] + header[bin_gid_offset + 1] * 256); archive_entry_set_nlink(entry, header[bin_nlink_offset] + header[bin_nlink_offset + 1] * 256); archive_entry_set_rdev(entry, header[bin_rdev_offset] + header[bin_rdev_offset + 1] * 256); archive_entry_set_mtime(entry, le4(header + bin_mtime_offset), 0); *namelength = header[bin_namesize_offset] + header[bin_namesize_offset + 1] * 256; *name_pad = *namelength & 1; /* Pad to even. */ cpio->entry_bytes_remaining = le4(header + bin_filesize_offset); archive_entry_set_size(entry, cpio->entry_bytes_remaining); cpio->entry_padding = cpio->entry_bytes_remaining & 1; /* Pad to even. */ __archive_read_consume(a, bin_header_size); return (ARCHIVE_OK); } static int header_bin_be(struct archive_read *a, struct cpio *cpio, struct archive_entry *entry, size_t *namelength, size_t *name_pad) { const void *h; const unsigned char *header; a->archive.archive_format = ARCHIVE_FORMAT_CPIO_BIN_BE; a->archive.archive_format_name = "cpio (big-endian binary)"; /* Read fixed-size portion of header. */ h = __archive_read_ahead(a, bin_header_size, NULL); if (h == NULL) { archive_set_error(&a->archive, 0, "End of file trying to read next cpio header"); return (ARCHIVE_FATAL); } /* Parse out binary fields. */ header = (const unsigned char *)h; archive_entry_set_dev(entry, header[bin_dev_offset] * 256 + header[bin_dev_offset + 1]); archive_entry_set_ino(entry, header[bin_ino_offset] * 256 + header[bin_ino_offset + 1]); archive_entry_set_mode(entry, header[bin_mode_offset] * 256 + header[bin_mode_offset + 1]); archive_entry_set_uid(entry, header[bin_uid_offset] * 256 + header[bin_uid_offset + 1]); archive_entry_set_gid(entry, header[bin_gid_offset] * 256 + header[bin_gid_offset + 1]); archive_entry_set_nlink(entry, header[bin_nlink_offset] * 256 + header[bin_nlink_offset + 1]); archive_entry_set_rdev(entry, header[bin_rdev_offset] * 256 + header[bin_rdev_offset + 1]); archive_entry_set_mtime(entry, be4(header + bin_mtime_offset), 0); *namelength = header[bin_namesize_offset] * 256 + header[bin_namesize_offset + 1]; *name_pad = *namelength & 1; /* Pad to even. */ cpio->entry_bytes_remaining = be4(header + bin_filesize_offset); archive_entry_set_size(entry, cpio->entry_bytes_remaining); cpio->entry_padding = cpio->entry_bytes_remaining & 1; /* Pad to even. */ __archive_read_consume(a, bin_header_size); return (ARCHIVE_OK); } static int archive_read_format_cpio_cleanup(struct archive_read *a) { struct cpio *cpio; cpio = (struct cpio *)(a->format->data); /* Free inode->name map */ while (cpio->links_head != NULL) { struct links_entry *lp = cpio->links_head->next; if (cpio->links_head->name) free(cpio->links_head->name); free(cpio->links_head); cpio->links_head = lp; } free(cpio); (a->format->data) = NULL; return (ARCHIVE_OK); } static int64_t le4(const unsigned char *p) { return ((p[0] << 16) + (((int64_t)p[1]) << 24) + (p[2] << 0) + (p[3] << 8)); } static int64_t be4(const unsigned char *p) { return ((((int64_t)p[0]) << 24) + (p[1] << 16) + (p[2] << 8) + (p[3])); } /* * Note that this implementation does not (and should not!) obey * locale settings; you cannot simply substitute strtol here, since * it does obey locale. */ static int64_t atol8(const char *p, unsigned char_cnt) { int64_t l; int digit; l = 0; while (char_cnt-- > 0) { if (*p >= '0' && *p <= '7') digit = *p - '0'; else return (l); p++; l <<= 3; l |= digit; } return (l); } static int64_t atol16(const char *p, unsigned char_cnt) { int64_t l; int digit; l = 0; while (char_cnt-- > 0) { if (*p >= 'a' && *p <= 'f') digit = *p - 'a' + 10; else if (*p >= 'A' && *p <= 'F') digit = *p - 'A' + 10; else if (*p >= '0' && *p <= '9') digit = *p - '0'; else return (l); p++; l <<= 4; l |= digit; } return (l); } static int record_hardlink(struct archive_read *a, struct cpio *cpio, struct archive_entry *entry) { struct links_entry *le; dev_t dev; int64_t ino; if (archive_entry_nlink(entry) <= 1) return (ARCHIVE_OK); dev = archive_entry_dev(entry); ino = archive_entry_ino64(entry); /* * First look in the list of multiply-linked files. If we've * already dumped it, convert this entry to a hard link entry. */ for (le = cpio->links_head; le; le = le->next) { if (le->dev == dev && le->ino == ino) { archive_entry_copy_hardlink(entry, le->name); if (--le->links <= 0) { if (le->previous != NULL) le->previous->next = le->next; if (le->next != NULL) le->next->previous = le->previous; if (cpio->links_head == le) cpio->links_head = le->next; free(le->name); free(le); } return (ARCHIVE_OK); } } le = (struct links_entry *)malloc(sizeof(struct links_entry)); if (le == NULL) { archive_set_error(&a->archive, ENOMEM, "Out of memory adding file to list"); return (ARCHIVE_FATAL); } if (cpio->links_head != NULL) cpio->links_head->previous = le; le->next = cpio->links_head; le->previous = NULL; cpio->links_head = le; le->dev = dev; le->ino = ino; le->links = archive_entry_nlink(entry) - 1; le->name = strdup(archive_entry_pathname(entry)); if (le->name == NULL) { archive_set_error(&a->archive, ENOMEM, "Out of memory adding file to list"); return (ARCHIVE_FATAL); } return (ARCHIVE_OK); }
./CrossVul/dataset_final_sorted/CWE-20/c/bad_5066_0
crossvul-cpp_data_bad_5608_0
/* * ModSecurity for Apache 2.x, http://www.modsecurity.org/ * Copyright (c) 2004-2011 Trustwave Holdings, Inc. (http://www.trustwave.com/) * * You may not use this file except in compliance with * the License.  You may obtain a copy of the License at * *     http://www.apache.org/licenses/LICENSE-2.0 * * If any of the files related to licensing are missing or if you have any * other questions related to licensing please contact Trustwave Holdings, Inc. * directly using the email address security@modsecurity.org. */ #include <limits.h> #include "modsecurity.h" #include "msc_logging.h" #include "msc_util.h" #include "http_log.h" #include "apr_lib.h" #include "acmp.h" #include "msc_crypt.h" #if defined(WITH_LUA) #include "msc_lua.h" #endif /* -- Directory context creation and initialisation -- */ /** * Creates a fresh directory configuration. */ void *create_directory_config(apr_pool_t *mp, char *path) { directory_config *dcfg = (directory_config *)apr_pcalloc(mp, sizeof(directory_config)); if (dcfg == NULL) return NULL; #ifdef DEBUG_CONF ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_NOERRNO, 0, mp, "Created directory config %pp path %s", dcfg, path); #endif dcfg->mp = mp; dcfg->is_enabled = NOT_SET; dcfg->reqbody_access = NOT_SET; dcfg->reqintercept_oe = NOT_SET; dcfg->reqbody_buffering = NOT_SET; dcfg->reqbody_inmemory_limit = NOT_SET; dcfg->reqbody_limit = NOT_SET; dcfg->reqbody_no_files_limit = NOT_SET; dcfg->resbody_access = NOT_SET; dcfg->debuglog_name = NOT_SET_P; dcfg->debuglog_level = NOT_SET; dcfg->debuglog_fd = NOT_SET_P; dcfg->of_limit = NOT_SET; dcfg->if_limit_action = NOT_SET; dcfg->of_limit_action = NOT_SET; dcfg->of_mime_types = NOT_SET_P; dcfg->of_mime_types_cleared = NOT_SET; dcfg->cookie_format = NOT_SET; dcfg->argument_separator = NOT_SET; dcfg->cookiev0_separator = NOT_SET_P; dcfg->rule_inheritance = NOT_SET; dcfg->rule_exceptions = apr_array_make(mp, 16, sizeof(rule_exception *)); dcfg->hash_method = apr_array_make(mp, 16, sizeof(hash_method *)); /* audit log variables */ dcfg->auditlog_flag = NOT_SET; dcfg->auditlog_type = NOT_SET; dcfg->max_rule_time = NOT_SET; dcfg->auditlog_dirperms = NOT_SET; dcfg->auditlog_fileperms = NOT_SET; dcfg->auditlog_name = NOT_SET_P; dcfg->auditlog2_name = NOT_SET_P; dcfg->auditlog_fd = NOT_SET_P; dcfg->auditlog2_fd = NOT_SET_P; dcfg->auditlog_storage_dir = NOT_SET_P; dcfg->auditlog_parts = NOT_SET_P; dcfg->auditlog_relevant_regex = NOT_SET_P; dcfg->ruleset = NULL; /* Upload */ dcfg->tmp_dir = NOT_SET_P; dcfg->upload_dir = NOT_SET_P; dcfg->upload_keep_files = NOT_SET; dcfg->upload_validates_files = NOT_SET; dcfg->upload_filemode = NOT_SET; dcfg->upload_file_limit = NOT_SET; /* These are only used during the configuration process. */ dcfg->tmp_chain_starter = NULL; dcfg->tmp_default_actionset = NULL; dcfg->tmp_rule_placeholders = NULL; /* Misc */ dcfg->data_dir = NOT_SET_P; dcfg->webappid = NOT_SET_P; dcfg->sensor_id = NOT_SET_P; dcfg->httpBlkey = NOT_SET_P; /* Content injection. */ dcfg->content_injection_enabled = NOT_SET; /* Stream inspection */ dcfg->stream_inbody_inspection = NOT_SET; dcfg->stream_outbody_inspection = NOT_SET; /* Geo Lookups */ dcfg->geo = NOT_SET_P; /* Gsb Lookups */ dcfg->gsb = NOT_SET_P; /* Unicode Map */ dcfg->u_map = NOT_SET_P; /* Cache */ dcfg->cache_trans = NOT_SET; dcfg->cache_trans_incremental = NOT_SET; dcfg->cache_trans_min = NOT_SET; dcfg->cache_trans_max = NOT_SET; dcfg->cache_trans_maxitems = NOT_SET; /* Rule ids */ dcfg->rule_id_htab = apr_hash_make(mp); dcfg->component_signatures = apr_array_make(mp, 16, sizeof(char *)); dcfg->request_encoding = NOT_SET_P; dcfg->disable_backend_compression = NOT_SET; /* Collection timeout */ dcfg->col_timeout = NOT_SET; dcfg->crypto_key = NOT_SET_P; dcfg->crypto_key_len = NOT_SET; dcfg->crypto_key_add = NOT_SET; dcfg->crypto_param_name = NOT_SET_P; dcfg->hash_is_enabled = NOT_SET; dcfg->hash_enforcement = NOT_SET; dcfg->crypto_hash_href_rx = NOT_SET; dcfg->crypto_hash_faction_rx = NOT_SET; dcfg->crypto_hash_location_rx = NOT_SET; dcfg->crypto_hash_iframesrc_rx = NOT_SET; dcfg->crypto_hash_framesrc_rx = NOT_SET; dcfg->crypto_hash_href_pm = NOT_SET; dcfg->crypto_hash_faction_pm = NOT_SET; dcfg->crypto_hash_location_pm = NOT_SET; dcfg->crypto_hash_iframesrc_pm = NOT_SET; dcfg->crypto_hash_framesrc_pm = NOT_SET; return dcfg; } /** * Copies rules between one phase of two configuration contexts, * taking exceptions into account. */ static void copy_rules_phase(apr_pool_t *mp, apr_array_header_t *parent_phase_arr, apr_array_header_t *child_phase_arr, apr_array_header_t *exceptions_arr) { rule_exception **exceptions; msre_rule **rules; int i, j; int mode = 0; rules = (msre_rule **)parent_phase_arr->elts; for(i = 0; i < parent_phase_arr->nelts; i++) { msre_rule *rule = (msre_rule *)rules[i]; int copy = 1; if (mode == 0) { /* First rule in the chain. */ exceptions = (rule_exception **)exceptions_arr->elts; for(j = 0; j < exceptions_arr->nelts; j++) { /* Process exceptions. */ switch(exceptions[j]->type) { case RULE_EXCEPTION_REMOVE_ID : if ((rule->actionset != NULL)&&(rule->actionset->id != NULL)) { int ruleid = atoi(rule->actionset->id); if (rule_id_in_range(ruleid, exceptions[j]->param)) copy--; } break; case RULE_EXCEPTION_REMOVE_MSG : if ((rule->actionset != NULL)&&(rule->actionset->msg != NULL)) { char *my_error_msg = NULL; int rc = msc_regexec(exceptions[j]->param_data, rule->actionset->msg, strlen(rule->actionset->msg), &my_error_msg); if (rc >= 0) copy--; } break; case RULE_EXCEPTION_REMOVE_TAG : if ((rule->actionset != NULL)&&(apr_is_empty_table(rule->actionset->actions) == 0)) { char *my_error_msg = NULL; const apr_array_header_t *tarr = NULL; const apr_table_entry_t *telts = NULL; int c; tarr = apr_table_elts(rule->actionset->actions); telts = (const apr_table_entry_t*)tarr->elts; for (c = 0; c < tarr->nelts; c++) { msre_action *action = (msre_action *)telts[c].val; if(strcmp("tag", action->metadata->name) == 0) { int rc = msc_regexec(exceptions[j]->param_data, action->param, strlen(action->param), &my_error_msg); if (rc >= 0) copy--; } } } break; } } if (copy > 0) { #ifdef DEBUG_CONF ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_NOERRNO, 0, mp, "Copy rule %pp [id \"%s\"]", rule, rule->actionset->id); #endif /* Copy the rule. */ *(msre_rule **)apr_array_push(child_phase_arr) = rule; if (rule->actionset->is_chained) mode = 2; } else { if (rule->actionset->is_chained) mode = 1; } } else { if (mode == 2) { #ifdef DEBUG_CONF ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_NOERRNO, 0, mp, "Copy chain %pp for rule %pp [id \"%s\"]", rule, rule->chain_starter, rule->chain_starter->actionset->id); #endif /* Copy the rule (it belongs to the chain we want to include. */ *(msre_rule **)apr_array_push(child_phase_arr) = rule; } if ((rule->actionset == NULL)||(rule->actionset->is_chained == 0)) mode = 0; } } } /** * Copies rules between two configuration contexts, * taking exceptions into account. */ static int copy_rules(apr_pool_t *mp, msre_ruleset *parent_ruleset, msre_ruleset *child_ruleset, apr_array_header_t *exceptions_arr) { copy_rules_phase(mp, parent_ruleset->phase_request_headers, child_ruleset->phase_request_headers, exceptions_arr); copy_rules_phase(mp, parent_ruleset->phase_request_body, child_ruleset->phase_request_body, exceptions_arr); copy_rules_phase(mp, parent_ruleset->phase_response_headers, child_ruleset->phase_response_headers, exceptions_arr); copy_rules_phase(mp, parent_ruleset->phase_response_body, child_ruleset->phase_response_body, exceptions_arr); copy_rules_phase(mp, parent_ruleset->phase_logging, child_ruleset->phase_logging, exceptions_arr); return 1; } /** * Merges two directory configurations. */ void *merge_directory_configs(apr_pool_t *mp, void *_parent, void *_child) { directory_config *parent = (directory_config *)_parent; directory_config *child = (directory_config *)_child; directory_config *merged = create_directory_config(mp, NULL); #ifdef DEBUG_CONF ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_NOERRNO, 0, mp, "Merge parent %pp child %pp RESULT %pp", _parent, _child, merged); #endif if (merged == NULL) return NULL; /* Use values from the child configuration where possible, * otherwise use the parent's. */ merged->is_enabled = (child->is_enabled == NOT_SET ? parent->is_enabled : child->is_enabled); /* IO parameters */ merged->reqbody_access = (child->reqbody_access == NOT_SET ? parent->reqbody_access : child->reqbody_access); merged->reqbody_buffering = (child->reqbody_buffering == NOT_SET ? parent->reqbody_buffering : child->reqbody_buffering); merged->reqbody_inmemory_limit = (child->reqbody_inmemory_limit == NOT_SET ? parent->reqbody_inmemory_limit : child->reqbody_inmemory_limit); merged->reqbody_limit = (child->reqbody_limit == NOT_SET ? parent->reqbody_limit : child->reqbody_limit); merged->reqbody_no_files_limit = (child->reqbody_no_files_limit == NOT_SET ? parent->reqbody_no_files_limit : child->reqbody_no_files_limit); merged->resbody_access = (child->resbody_access == NOT_SET ? parent->resbody_access : child->resbody_access); merged->of_limit = (child->of_limit == NOT_SET ? parent->of_limit : child->of_limit); merged->if_limit_action = (child->if_limit_action == NOT_SET ? parent->if_limit_action : child->if_limit_action); merged->of_limit_action = (child->of_limit_action == NOT_SET ? parent->of_limit_action : child->of_limit_action); merged->reqintercept_oe = (child->reqintercept_oe == NOT_SET ? parent->reqintercept_oe : child->reqintercept_oe); if (child->of_mime_types != NOT_SET_P) { /* Child added to the table */ if (child->of_mime_types_cleared == 1) { /* The list of MIME types was cleared in the child, * which means the parent's MIME types went away and * we should not take them into consideration here. */ merged->of_mime_types = child->of_mime_types; merged->of_mime_types_cleared = 1; } else { /* Add MIME types defined in the child to those * defined in the parent context. */ if (parent->of_mime_types == NOT_SET_P) { merged->of_mime_types = child->of_mime_types; merged->of_mime_types_cleared = NOT_SET; } else { merged->of_mime_types = apr_table_overlay(mp, parent->of_mime_types, child->of_mime_types); if (merged->of_mime_types == NULL) return NULL; } } } else { /* Child did not add to the table */ if (child->of_mime_types_cleared == 1) { merged->of_mime_types_cleared = 1; } else { merged->of_mime_types = parent->of_mime_types; merged->of_mime_types_cleared = parent->of_mime_types_cleared; } } /* debug log */ if (child->debuglog_fd == NOT_SET_P) { merged->debuglog_name = parent->debuglog_name; merged->debuglog_fd = parent->debuglog_fd; } else { merged->debuglog_name = child->debuglog_name; merged->debuglog_fd = child->debuglog_fd; } merged->debuglog_level = (child->debuglog_level == NOT_SET ? parent->debuglog_level : child->debuglog_level); merged->cookie_format = (child->cookie_format == NOT_SET ? parent->cookie_format : child->cookie_format); merged->argument_separator = (child->argument_separator == NOT_SET ? parent->argument_separator : child->argument_separator); merged->cookiev0_separator = (child->cookiev0_separator == NOT_SET_P ? parent->cookiev0_separator : child->cookiev0_separator); /* rule inheritance */ if ((child->rule_inheritance == NOT_SET)||(child->rule_inheritance == 1)) { merged->rule_inheritance = parent->rule_inheritance; if ((child->ruleset == NULL)&&(parent->ruleset == NULL)) { #ifdef DEBUG_CONF ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_NOERRNO, 0, mp, "No rules in this context."); #endif /* Do nothing, there are no rules in either context. */ } else if (child->ruleset == NULL) { #ifdef DEBUG_CONF ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_NOERRNO, 0, mp, "Using parent rules in this context."); #endif /* Copy the rules from the parent context. */ merged->ruleset = msre_ruleset_create(parent->ruleset->engine, mp); copy_rules(mp, parent->ruleset, merged->ruleset, child->rule_exceptions); } else if (parent->ruleset == NULL) { #ifdef DEBUG_CONF ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_NOERRNO, 0, mp, "Using child rules in this context."); #endif /* Copy child rules. */ merged->ruleset = msre_ruleset_create(child->ruleset->engine, mp); merged->ruleset->phase_request_headers = apr_array_copy(mp, child->ruleset->phase_request_headers); merged->ruleset->phase_request_body = apr_array_copy(mp, child->ruleset->phase_request_body); merged->ruleset->phase_response_headers = apr_array_copy(mp, child->ruleset->phase_response_headers); merged->ruleset->phase_response_body = apr_array_copy(mp, child->ruleset->phase_response_body); merged->ruleset->phase_logging = apr_array_copy(mp, child->ruleset->phase_logging); } else { #ifdef DEBUG_CONF ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_NOERRNO, 0, mp, "Using parent then child rules in this context."); #endif /* Copy parent rules, then add child rules to it. */ merged->ruleset = msre_ruleset_create(parent->ruleset->engine, mp); copy_rules(mp, parent->ruleset, merged->ruleset, child->rule_exceptions); apr_array_cat(merged->ruleset->phase_request_headers, child->ruleset->phase_request_headers); apr_array_cat(merged->ruleset->phase_request_body, child->ruleset->phase_request_body); apr_array_cat(merged->ruleset->phase_response_headers, child->ruleset->phase_response_headers); apr_array_cat(merged->ruleset->phase_response_body, child->ruleset->phase_response_body); apr_array_cat(merged->ruleset->phase_logging, child->ruleset->phase_logging); } } else { merged->rule_inheritance = 0; if (child->ruleset != NULL) { /* Copy child rules. */ merged->ruleset = msre_ruleset_create(child->ruleset->engine, mp); merged->ruleset->phase_request_headers = apr_array_copy(mp, child->ruleset->phase_request_headers); merged->ruleset->phase_request_body = apr_array_copy(mp, child->ruleset->phase_request_body); merged->ruleset->phase_response_headers = apr_array_copy(mp, child->ruleset->phase_response_headers); merged->ruleset->phase_response_body = apr_array_copy(mp, child->ruleset->phase_response_body); merged->ruleset->phase_logging = apr_array_copy(mp, child->ruleset->phase_logging); } } /* Merge rule exceptions. */ merged->rule_exceptions = apr_array_append(mp, parent->rule_exceptions, child->rule_exceptions); merged->hash_method = apr_array_append(mp, parent->hash_method, child->hash_method); /* audit log variables */ merged->auditlog_flag = (child->auditlog_flag == NOT_SET ? parent->auditlog_flag : child->auditlog_flag); merged->auditlog_type = (child->auditlog_type == NOT_SET ? parent->auditlog_type : child->auditlog_type); merged->max_rule_time = (child->max_rule_time == NOT_SET ? parent->max_rule_time : child->max_rule_time); merged->auditlog_dirperms = (child->auditlog_dirperms == NOT_SET ? parent->auditlog_dirperms : child->auditlog_dirperms); merged->auditlog_fileperms = (child->auditlog_fileperms == NOT_SET ? parent->auditlog_fileperms : child->auditlog_fileperms); if (child->auditlog_fd != NOT_SET_P) { merged->auditlog_fd = child->auditlog_fd; merged->auditlog_name = child->auditlog_name; } else { merged->auditlog_fd = parent->auditlog_fd; merged->auditlog_name = parent->auditlog_name; } if (child->auditlog2_fd != NOT_SET_P) { merged->auditlog2_fd = child->auditlog2_fd; merged->auditlog2_name = child->auditlog2_name; } else { merged->auditlog2_fd = parent->auditlog2_fd; merged->auditlog2_name = parent->auditlog2_name; } merged->auditlog_storage_dir = (child->auditlog_storage_dir == NOT_SET_P ? parent->auditlog_storage_dir : child->auditlog_storage_dir); merged->auditlog_parts = (child->auditlog_parts == NOT_SET_P ? parent->auditlog_parts : child->auditlog_parts); merged->auditlog_relevant_regex = (child->auditlog_relevant_regex == NOT_SET_P ? parent->auditlog_relevant_regex : child->auditlog_relevant_regex); /* Upload */ merged->tmp_dir = (child->tmp_dir == NOT_SET_P ? parent->tmp_dir : child->tmp_dir); merged->upload_dir = (child->upload_dir == NOT_SET_P ? parent->upload_dir : child->upload_dir); merged->upload_keep_files = (child->upload_keep_files == NOT_SET ? parent->upload_keep_files : child->upload_keep_files); merged->upload_validates_files = (child->upload_validates_files == NOT_SET ? parent->upload_validates_files : child->upload_validates_files); merged->upload_filemode = (child->upload_filemode == NOT_SET ? parent->upload_filemode : child->upload_filemode); merged->upload_file_limit = (child->upload_file_limit == NOT_SET ? parent->upload_file_limit : child->upload_file_limit); /* Misc */ merged->data_dir = (child->data_dir == NOT_SET_P ? parent->data_dir : child->data_dir); merged->webappid = (child->webappid == NOT_SET_P ? parent->webappid : child->webappid); merged->sensor_id = (child->sensor_id == NOT_SET_P ? parent->sensor_id : child->sensor_id); merged->httpBlkey = (child->httpBlkey == NOT_SET_P ? parent->httpBlkey : child->httpBlkey); /* Content injection. */ merged->content_injection_enabled = (child->content_injection_enabled == NOT_SET ? parent->content_injection_enabled : child->content_injection_enabled); /* Stream inspection */ merged->stream_inbody_inspection = (child->stream_inbody_inspection == NOT_SET ? parent->stream_inbody_inspection : child->stream_inbody_inspection); merged->stream_outbody_inspection = (child->stream_outbody_inspection == NOT_SET ? parent->stream_outbody_inspection : child->stream_outbody_inspection); /* Geo Lookup */ merged->geo = (child->geo == NOT_SET_P ? parent->geo : child->geo); /* Gsb Lookup */ merged->gsb = (child->gsb == NOT_SET_P ? parent->gsb : child->gsb); /* Unicode Map */ merged->u_map = (child->u_map == NOT_SET_P ? parent->u_map : child->u_map); /* Cache */ merged->cache_trans = (child->cache_trans == NOT_SET ? parent->cache_trans : child->cache_trans); merged->cache_trans_incremental = (child->cache_trans_incremental == NOT_SET ? parent->cache_trans_incremental : child->cache_trans_incremental); merged->cache_trans_min = (child->cache_trans_min == (apr_size_t)NOT_SET ? parent->cache_trans_min : child->cache_trans_min); merged->cache_trans_max = (child->cache_trans_max == (apr_size_t)NOT_SET ? parent->cache_trans_max : child->cache_trans_max); merged->cache_trans_maxitems = (child->cache_trans_maxitems == (apr_size_t)NOT_SET ? parent->cache_trans_maxitems : child->cache_trans_maxitems); /* Merge component signatures. */ merged->component_signatures = apr_array_append(mp, parent->component_signatures, child->component_signatures); merged->request_encoding = (child->request_encoding == NOT_SET_P ? parent->request_encoding : child->request_encoding); merged->disable_backend_compression = (child->disable_backend_compression == NOT_SET ? parent->disable_backend_compression : child->disable_backend_compression); merged->col_timeout = (child->col_timeout == NOT_SET ? parent->col_timeout : child->col_timeout); /* Hash */ merged->crypto_key = (child->crypto_key == NOT_SET_P ? parent->crypto_key : child->crypto_key); merged->crypto_key_len = (child->crypto_key_len == NOT_SET ? parent->crypto_key_len : child->crypto_key_len); merged->crypto_key_add = (child->crypto_key_add == NOT_SET ? parent->crypto_key_add : child->crypto_key_add); merged->crypto_param_name = (child->crypto_param_name == NOT_SET_P ? parent->crypto_param_name : child->crypto_param_name); merged->hash_is_enabled = (child->hash_is_enabled == NOT_SET ? parent->hash_is_enabled : child->hash_is_enabled); merged->hash_enforcement = (child->hash_enforcement == NOT_SET ? parent->hash_enforcement : child->hash_enforcement); merged->crypto_hash_href_rx = (child->crypto_hash_href_rx == NOT_SET ? parent->crypto_hash_href_rx : child->crypto_hash_href_rx); merged->crypto_hash_faction_rx = (child->crypto_hash_faction_rx == NOT_SET ? parent->crypto_hash_faction_rx : child->crypto_hash_faction_rx); merged->crypto_hash_location_rx = (child->crypto_hash_location_rx == NOT_SET ? parent->crypto_hash_location_rx : child->crypto_hash_location_rx); merged->crypto_hash_iframesrc_rx = (child->crypto_hash_iframesrc_rx == NOT_SET ? parent->crypto_hash_iframesrc_rx : child->crypto_hash_iframesrc_rx); merged->crypto_hash_framesrc_rx = (child->crypto_hash_framesrc_rx == NOT_SET ? parent->crypto_hash_framesrc_rx : child->crypto_hash_framesrc_rx); merged->crypto_hash_href_pm = (child->crypto_hash_href_pm == NOT_SET ? parent->crypto_hash_href_pm : child->crypto_hash_href_pm); merged->crypto_hash_faction_pm = (child->crypto_hash_faction_pm == NOT_SET ? parent->crypto_hash_faction_pm : child->crypto_hash_faction_pm); merged->crypto_hash_location_pm = (child->crypto_hash_location_pm == NOT_SET ? parent->crypto_hash_location_pm : child->crypto_hash_location_pm); merged->crypto_hash_iframesrc_pm = (child->crypto_hash_iframesrc_pm == NOT_SET ? parent->crypto_hash_iframesrc_pm : child->crypto_hash_iframesrc_pm); merged->crypto_hash_framesrc_pm = (child->crypto_hash_framesrc_pm == NOT_SET ? parent->crypto_hash_framesrc_pm : child->crypto_hash_framesrc_pm); return merged; } /** * Initialise directory configuration. This function is *not* meant * to be called for directory configuration instances created during * the configuration phase. It can only be called on copies of those * (created fresh for every transaction). */ void init_directory_config(directory_config *dcfg) { if (dcfg == NULL) return; if (dcfg->is_enabled == NOT_SET) dcfg->is_enabled = 0; if (dcfg->reqbody_access == NOT_SET) dcfg->reqbody_access = 0; if (dcfg->reqintercept_oe == NOT_SET) dcfg->reqintercept_oe = 0; if (dcfg->reqbody_buffering == NOT_SET) dcfg->reqbody_buffering = REQUEST_BODY_FORCEBUF_OFF; if (dcfg->reqbody_inmemory_limit == NOT_SET) dcfg->reqbody_inmemory_limit = REQUEST_BODY_DEFAULT_INMEMORY_LIMIT; if (dcfg->reqbody_limit == NOT_SET) dcfg->reqbody_limit = REQUEST_BODY_DEFAULT_LIMIT; if (dcfg->reqbody_no_files_limit == NOT_SET) dcfg->reqbody_no_files_limit = REQUEST_BODY_NO_FILES_DEFAULT_LIMIT; if (dcfg->resbody_access == NOT_SET) dcfg->resbody_access = 0; if (dcfg->of_limit == NOT_SET) dcfg->of_limit = RESPONSE_BODY_DEFAULT_LIMIT; if (dcfg->if_limit_action == NOT_SET) dcfg->if_limit_action = REQUEST_BODY_LIMIT_ACTION_REJECT; if (dcfg->of_limit_action == NOT_SET) dcfg->of_limit_action = RESPONSE_BODY_LIMIT_ACTION_REJECT; if (dcfg->of_mime_types == NOT_SET_P) { dcfg->of_mime_types = apr_table_make(dcfg->mp, 3); if (dcfg->of_mime_types_cleared != 1) { apr_table_setn(dcfg->of_mime_types, "text/plain", "1"); apr_table_setn(dcfg->of_mime_types, "text/html", "1"); } } if (dcfg->debuglog_fd == NOT_SET_P) dcfg->debuglog_fd = NULL; if (dcfg->debuglog_name == NOT_SET_P) dcfg->debuglog_name = NULL; if (dcfg->debuglog_level == NOT_SET) dcfg->debuglog_level = 0; if (dcfg->cookie_format == NOT_SET) dcfg->cookie_format = 0; if (dcfg->argument_separator == NOT_SET) dcfg->argument_separator = '&'; if (dcfg->cookiev0_separator == NOT_SET_P) dcfg->cookiev0_separator = NULL; if (dcfg->rule_inheritance == NOT_SET) dcfg->rule_inheritance = 1; /* audit log variables */ if (dcfg->auditlog_flag == NOT_SET) dcfg->auditlog_flag = 0; if (dcfg->auditlog_type == NOT_SET) dcfg->auditlog_type = AUDITLOG_SERIAL; if (dcfg->max_rule_time == NOT_SET) dcfg->max_rule_time = 0; if (dcfg->auditlog_dirperms == NOT_SET) dcfg->auditlog_dirperms = CREATEMODE_DIR; if (dcfg->auditlog_fileperms == NOT_SET) dcfg->auditlog_fileperms = CREATEMODE; if (dcfg->auditlog_fd == NOT_SET_P) dcfg->auditlog_fd = NULL; if (dcfg->auditlog2_fd == NOT_SET_P) dcfg->auditlog2_fd = NULL; if (dcfg->auditlog_name == NOT_SET_P) dcfg->auditlog_name = NULL; if (dcfg->auditlog2_name == NOT_SET_P) dcfg->auditlog2_name = NULL; if (dcfg->auditlog_storage_dir == NOT_SET_P) dcfg->auditlog_storage_dir = NULL; if (dcfg->auditlog_parts == NOT_SET_P) dcfg->auditlog_parts = "ABCFHZ"; if (dcfg->auditlog_relevant_regex == NOT_SET_P) dcfg->auditlog_relevant_regex = NULL; /* Upload */ if (dcfg->tmp_dir == NOT_SET_P) dcfg->tmp_dir = guess_tmp_dir(dcfg->mp); if (dcfg->upload_dir == NOT_SET_P) dcfg->upload_dir = NULL; if (dcfg->upload_keep_files == NOT_SET) dcfg->upload_keep_files = KEEP_FILES_OFF; if (dcfg->upload_validates_files == NOT_SET) dcfg->upload_validates_files = 0; if (dcfg->upload_filemode == NOT_SET) dcfg->upload_filemode = 0600; if (dcfg->upload_file_limit == NOT_SET) dcfg->upload_file_limit = 100; /* Misc */ if (dcfg->data_dir == NOT_SET_P) dcfg->data_dir = NULL; if (dcfg->webappid == NOT_SET_P) dcfg->webappid = "default"; if (dcfg->sensor_id == NOT_SET_P) dcfg->sensor_id = "default"; if (dcfg->httpBlkey == NOT_SET_P) dcfg->httpBlkey = NULL; /* Content injection. */ if (dcfg->content_injection_enabled == NOT_SET) dcfg->content_injection_enabled = 0; /* Stream inspection */ if (dcfg->stream_inbody_inspection == NOT_SET) dcfg->stream_inbody_inspection = 0; if (dcfg->stream_outbody_inspection == NOT_SET) dcfg->stream_outbody_inspection = 0; /* Geo Lookup */ if (dcfg->geo == NOT_SET_P) dcfg->geo = NULL; /* Gsb Lookup */ if (dcfg->gsb == NOT_SET_P) dcfg->gsb = NULL; /* Unicode Map */ if (dcfg->u_map == NOT_SET_P) dcfg->u_map = NULL; /* Cache */ if (dcfg->cache_trans == NOT_SET) dcfg->cache_trans = MODSEC_CACHE_DISABLED; if (dcfg->cache_trans_incremental == NOT_SET) dcfg->cache_trans_incremental = 0; if (dcfg->cache_trans_min == (apr_size_t)NOT_SET) dcfg->cache_trans_min = 32; if (dcfg->cache_trans_max == (apr_size_t)NOT_SET) dcfg->cache_trans_max = 1024; if (dcfg->cache_trans_maxitems == (apr_size_t)NOT_SET) dcfg->cache_trans_maxitems = 512; if (dcfg->request_encoding == NOT_SET_P) dcfg->request_encoding = NULL; if (dcfg->disable_backend_compression == NOT_SET) dcfg->disable_backend_compression = 0; if (dcfg->col_timeout == NOT_SET) dcfg->col_timeout = 3600; /* Hash */ if (dcfg->crypto_key == NOT_SET_P) dcfg->crypto_key = getkey(dcfg->mp); if (dcfg->crypto_key_len == NOT_SET) dcfg->crypto_key_len = strlen(dcfg->crypto_key); if (dcfg->crypto_key_add == NOT_SET) dcfg->crypto_key_add = HASH_KEYONLY; if (dcfg->crypto_param_name == NOT_SET_P) dcfg->crypto_param_name = "crypt"; if (dcfg->hash_is_enabled == NOT_SET) dcfg->hash_is_enabled = HASH_DISABLED; if (dcfg->hash_enforcement == NOT_SET) dcfg->hash_enforcement = HASH_DISABLED; if (dcfg->crypto_hash_href_rx == NOT_SET) dcfg->crypto_hash_href_rx = 0; if (dcfg->crypto_hash_faction_rx == NOT_SET) dcfg->crypto_hash_faction_rx = 0; if (dcfg->crypto_hash_location_rx == NOT_SET) dcfg->crypto_hash_location_rx = 0; if (dcfg->crypto_hash_iframesrc_rx == NOT_SET) dcfg->crypto_hash_iframesrc_rx = 0; if (dcfg->crypto_hash_framesrc_rx == NOT_SET) dcfg->crypto_hash_framesrc_rx = 0; if (dcfg->crypto_hash_href_pm == NOT_SET) dcfg->crypto_hash_href_pm = 0; if (dcfg->crypto_hash_faction_pm == NOT_SET) dcfg->crypto_hash_faction_pm = 0; if (dcfg->crypto_hash_location_pm == NOT_SET) dcfg->crypto_hash_location_pm = 0; if (dcfg->crypto_hash_iframesrc_pm == NOT_SET) dcfg->crypto_hash_iframesrc_pm = 0; if (dcfg->crypto_hash_framesrc_pm == NOT_SET) dcfg->crypto_hash_framesrc_pm = 0; } /** * */ static const char *add_rule(cmd_parms *cmd, directory_config *dcfg, int type, const char *p1, const char *p2, const char *p3) { char *my_error_msg = NULL; //msre_rule *rule = NULL, *tmp_rule = NULL; char *rid = NULL; msre_rule *rule = NULL; extern msc_engine *modsecurity; int offset = 0; #ifdef DEBUG_CONF ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_NOERRNO, 0, cmd->pool, "Rule: type=%d p1='%s' p2='%s' p3='%s'", type, p1, p2, p3); #endif /* Create a ruleset if one does not exist. */ if ((dcfg->ruleset == NULL)||(dcfg->ruleset == NOT_SET_P)) { dcfg->ruleset = msre_ruleset_create(modsecurity->msre, cmd->pool); if (dcfg->ruleset == NULL) return FATAL_ERROR; } /* Create the rule now. */ switch(type) { #if defined(WITH_LUA) case RULE_TYPE_LUA : rule = msre_rule_lua_create(dcfg->ruleset, cmd->directive->filename, cmd->directive->line_num, p1, p2, &my_error_msg); break; #endif default : rule = msre_rule_create(dcfg->ruleset, type, cmd->directive->filename, cmd->directive->line_num, p1, p2, p3, &my_error_msg); break; } if (rule == NULL) { return my_error_msg; } /* Rules must have uniq ID */ if ( #if defined(WITH_LUA) type != RULE_TYPE_LUA && #endif (dcfg->tmp_chain_starter == NULL)) if(rule->actionset == NULL) return "ModSecurity: Rules must have at least id action"; if(rule->actionset != NULL && (dcfg->tmp_chain_starter == NULL)) { if(rule->actionset->id == NOT_SET_P #if defined(WITH_LUA) && (type != RULE_TYPE_LUA) #endif ) return "ModSecurity: No action id present within the rule"; #if defined(WITH_LUA) if(type != RULE_TYPE_LUA) #endif { rid = apr_hash_get(dcfg->rule_id_htab, rule->actionset->id, APR_HASH_KEY_STRING); if(rid != NULL) { return "ModSecurity: Found another rule with the same id"; } else { apr_hash_set(dcfg->rule_id_htab, apr_pstrdup(dcfg->mp, rule->actionset->id), APR_HASH_KEY_STRING, apr_pstrdup(dcfg->mp, "1")); } //tmp_rule = msre_ruleset_fetch_rule(dcfg->ruleset, rule->actionset->id, offset); //if(tmp_rule != NULL) // return "ModSecurity: Found another rule with the same id"; } } /* Create default actionset if one does not already exist. */ if (dcfg->tmp_default_actionset == NULL) { dcfg->tmp_default_actionset = msre_actionset_create_default(modsecurity->msre); if (dcfg->tmp_default_actionset == NULL) return FATAL_ERROR; } /* Check some cases prior to merging so we know where it came from */ /* Check syntax for chained rules */ if ((rule->actionset != NULL) && (dcfg->tmp_chain_starter != NULL)) { /* Must NOT specify a disruptive action. */ if (rule->actionset->intercept_action != NOT_SET) { return apr_psprintf(cmd->pool, "ModSecurity: Disruptive actions can only " "be specified by chain starter rules."); } /* Must NOT specify a skipafter action. */ if (rule->actionset->skip_after != NOT_SET_P) { return apr_psprintf(cmd->pool, "ModSecurity: SkipAfter actions can only " "be specified by chain starter rules."); } /* Must NOT specify a phase. */ if (rule->actionset->phase != NOT_SET) { return apr_psprintf(cmd->pool, "ModSecurity: Execution phases can only be " "specified by chain starter rules."); } /* Must NOT use metadata actions. */ /* ENH: loop through to check for tags */ if ((rule->actionset->id != NOT_SET_P) ||(rule->actionset->rev != NOT_SET_P) ||(rule->actionset->msg != NOT_SET_P) ||(rule->actionset->severity != NOT_SET) ||(rule->actionset->version != NOT_SET_P) ||(rule->actionset->accuracy != NOT_SET) ||(rule->actionset->maturity != NOT_SET) ||(rule->actionset->logdata != NOT_SET_P)) { return apr_psprintf(cmd->pool, "ModSecurity: Metadata actions (id, rev, msg, tag, severity, ver, accuracy, maturity, logdata) " " can only be specified by chain starter rules."); } /* Must NOT use skip. */ if (rule->actionset->skip_count != NOT_SET) { return apr_psprintf(cmd->pool, "ModSecurity: The skip action can only be used " " by chain starter rules. "); } } /* Merge actions with the parent. * * ENH Probably do not want this done fully for chained rules. */ rule->actionset = msre_actionset_merge(modsecurity->msre, dcfg->tmp_default_actionset, rule->actionset, 1); /* Keep track of the parent action for "block" */ rule->actionset->parent_intercept_action_rec = dcfg->tmp_default_actionset->intercept_action_rec; rule->actionset->parent_intercept_action = dcfg->tmp_default_actionset->intercept_action; /* Must NOT specify a disruptive action in logging phase. */ if ((rule->actionset != NULL) && (rule->actionset->phase == PHASE_LOGGING) && (rule->actionset->intercept_action != ACTION_ALLOW) && (rule->actionset->intercept_action != ACTION_ALLOW_REQUEST) && (rule->actionset->intercept_action != ACTION_NONE) ) { return apr_psprintf(cmd->pool, "ModSecurity: Disruptive actions " "cannot be specified in the logging phase."); } if (dcfg->tmp_chain_starter != NULL) { rule->chain_starter = dcfg->tmp_chain_starter; rule->actionset->phase = rule->chain_starter->actionset->phase; } if (rule->actionset->is_chained != 1) { /* If this rule is part of the chain but does * not want more rules to follow in the chain * then cut it (the chain). */ dcfg->tmp_chain_starter = NULL; } else { /* On the other hand, if this rule wants other * rules to follow it, then start a new chain * if there isn't one already. */ if (dcfg->tmp_chain_starter == NULL) { dcfg->tmp_chain_starter = rule; } } /* Optimisation */ if ((rule->op_name != NULL)&&(strcasecmp(rule->op_name, "inspectFile") == 0)) { dcfg->upload_validates_files = 1; } /* Create skip table if one does not already exist. */ if (dcfg->tmp_rule_placeholders == NULL) { dcfg->tmp_rule_placeholders = apr_table_make(cmd->pool, 10); if (dcfg->tmp_rule_placeholders == NULL) return FATAL_ERROR; } /* Keep track of any rule IDs we need to skip after */ if (rule->actionset->skip_after != NOT_SET_P) { char *tmp_id = apr_pstrdup(cmd->pool, rule->actionset->skip_after); apr_table_setn(dcfg->tmp_rule_placeholders, tmp_id, tmp_id); #ifdef DEBUG_CONF ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_NOERRNO, 0, cmd->pool, "Watching for skipafter target rule id=\"%s\".", tmp_id); #endif } #ifdef DEBUG_CONF ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_NOERRNO, 0, cmd->pool, "Adding rule %pp phase=%d id=\"%s\".", rule, rule->actionset->phase, (rule->actionset->id == NOT_SET_P ? "(none)" : rule->actionset->id)); #endif /* Add rule to the recipe. */ if (msre_ruleset_rule_add(dcfg->ruleset, rule, rule->actionset->phase) < 0) { return "Internal Error: Failed to add rule to the ruleset."; } /* Add an additional placeholder if this rule ID is on the list */ if ((rule->actionset->id != NULL) && apr_table_get(dcfg->tmp_rule_placeholders, rule->actionset->id)) { msre_rule *phrule = apr_palloc(rule->ruleset->mp, sizeof(msre_rule)); if (phrule == NULL) { return FATAL_ERROR; } #ifdef DEBUG_CONF ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_NOERRNO, 0, cmd->pool, "Adding placeholder %pp for rule %pp id=\"%s\".", phrule, rule, rule->actionset->id); #endif /* shallow copy of original rule with placeholder marked as target */ memcpy(phrule, rule, sizeof(msre_rule)); phrule->placeholder = RULE_PH_SKIPAFTER; /* Add placeholder. */ if (msre_ruleset_rule_add(dcfg->ruleset, phrule, phrule->actionset->phase) < 0) { return "Internal Error: Failed to add placeholder to the ruleset."; } /* No longer need to search for the ID */ apr_table_unset(dcfg->tmp_rule_placeholders, rule->actionset->id); } /* Update the unparsed rule */ rule->unparsed = msre_rule_generate_unparsed(dcfg->ruleset->mp, rule, NULL, NULL, NULL); return NULL; } /** * */ static const char *add_marker(cmd_parms *cmd, directory_config *dcfg, const char *p1, const char *p2, const char *p3) { char *my_error_msg = NULL; msre_rule *rule = NULL; extern msc_engine *modsecurity; int p; #ifdef DEBUG_CONF ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_NOERRNO, 0, cmd->pool, "Rule: type=%d p1='%s' p2='%s' p3='%s'", RULE_TYPE_MARKER, p1, p2, p3); #endif /* Create a ruleset if one does not exist. */ if ((dcfg->ruleset == NULL)||(dcfg->ruleset == NOT_SET_P)) { dcfg->ruleset = msre_ruleset_create(modsecurity->msre, cmd->pool); if (dcfg->ruleset == NULL) return FATAL_ERROR; } /* Create the rule now. */ rule = msre_rule_create(dcfg->ruleset, RULE_TYPE_MARKER, cmd->directive->filename, cmd->directive->line_num, p1, p2, p3, &my_error_msg); if (rule == NULL) { return my_error_msg; } /* This is a marker */ rule->placeholder = RULE_PH_MARKER; /* Add placeholder to each phase */ for (p = PHASE_FIRST; p <= PHASE_LAST; p++) { #ifdef DEBUG_CONF ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_NOERRNO, 0, cmd->pool, "Adding marker %pp phase=%d id=\"%s\".", rule, p, (rule->actionset->id == NOT_SET_P ? "(none)" : rule->actionset->id)); #endif if (msre_ruleset_rule_add(dcfg->ruleset, rule, p) < 0) { return "Internal Error: Failed to add marker to the ruleset."; } } /* No longer need to search for the ID */ if (dcfg->tmp_rule_placeholders != NULL) { apr_table_unset(dcfg->tmp_rule_placeholders, rule->actionset->id); } return NULL; } /** * */ static const char *update_rule_action(cmd_parms *cmd, directory_config *dcfg, const char *p1, const char *p2, int offset) { char *my_error_msg = NULL; msre_rule *rule = NULL; msre_actionset *new_actionset = NULL; msre_ruleset *ruleset = dcfg->ruleset; extern msc_engine *modsecurity; /* Get the ruleset if one exists */ if ((ruleset == NULL)||(ruleset == NOT_SET_P)) { return NULL; } #ifdef DEBUG_CONF ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_NOERRNO, 0, cmd->pool, "Update rule id=\"%s\" with action \"%s\".", p1, p2); #endif /* Fetch the rule */ rule = msre_ruleset_fetch_rule(ruleset, p1, offset); if (rule == NULL) { #ifdef DEBUG_CONF ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_NOERRNO, 0, cmd->pool, "Update rule id=\"%s\" with action \"%s\" failed: Rule not found.", p1, p2); #endif return NULL; } /* Check the rule actionset */ /* ENH: Can this happen? */ if (rule->actionset == NULL) { return apr_psprintf(cmd->pool, "ModSecurity: Attempt to update action for rule \"%s\" failed: Rule does not have an actionset.", p1); } /* Create a new actionset */ new_actionset = msre_actionset_create(modsecurity->msre, p2, &my_error_msg); if (new_actionset == NULL) return FATAL_ERROR; if (my_error_msg != NULL) return my_error_msg; /* Must NOT change an id */ if ((new_actionset->id != NOT_SET_P) && (rule->actionset->id != NULL) && (strcmp(rule->actionset->id, new_actionset->id) != 0)) { return apr_psprintf(cmd->pool, "ModSecurity: Rule IDs cannot be updated via SecRuleUpdateActionById."); } /* Must NOT alter the phase */ if ((new_actionset->phase != NOT_SET) && (rule->actionset->phase != new_actionset->phase)) { return apr_psprintf(cmd->pool, "ModSecurity: Rule phases cannot be updated via SecRuleUpdateActionById."); } #ifdef DEBUG_CONF { char *actions = msre_actionset_generate_action_string(ruleset->mp, rule->actionset); ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_NOERRNO, 0, cmd->pool, "Update rule %pp id=\"%s\" old action: \"%s\"", rule, (rule->actionset->id == NOT_SET_P ? "(none)" : rule->actionset->id), actions); } #endif /* Merge new actions with the rule */ /* ENH: Will this leak the old actionset? */ rule->actionset = msre_actionset_merge(modsecurity->msre, rule->actionset, new_actionset, 1); msre_actionset_set_defaults(rule->actionset); /* Update the unparsed rule */ rule->unparsed = msre_rule_generate_unparsed(ruleset->mp, rule, NULL, NULL, NULL); #ifdef DEBUG_CONF { char *actions = msre_actionset_generate_action_string(ruleset->mp, rule->actionset); ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_NOERRNO, 0, cmd->pool, "Update rule %pp id=\"%s\" new action: \"%s\"", rule, (rule->actionset->id == NOT_SET_P ? "(none)" : rule->actionset->id), actions); } #endif return NULL; } /* -- Configuration directives -- */ static const char *cmd_action(cmd_parms *cmd, void *_dcfg, const char *p1) { return add_rule(cmd, (directory_config *)_dcfg, RULE_TYPE_ACTION, SECACTION_TARGETS, SECACTION_ARGS, p1); } static const char *cmd_marker(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; const char *action = apr_pstrcat(dcfg->mp, SECMARKER_BASE_ACTIONS, p1, NULL); return add_marker(cmd, (directory_config *)_dcfg, SECMARKER_TARGETS, SECMARKER_ARGS, action); } static const char *cmd_cookiev0_separator(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; if (strlen(p1) != 1) { return apr_psprintf(cmd->pool, "ModSecurity: Invalid cookie v0 separator: %s", p1); } dcfg->cookiev0_separator = p1; return NULL; } static const char *cmd_argument_separator(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; if (strlen(p1) != 1) { return apr_psprintf(cmd->pool, "ModSecurity: Invalid argument separator: %s", p1); } dcfg->argument_separator = p1[0]; return NULL; } static const char *cmd_audit_engine(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = _dcfg; if (strcasecmp(p1, "On") == 0) dcfg->auditlog_flag = AUDITLOG_ON; else if (strcasecmp(p1, "Off") == 0) dcfg->auditlog_flag = AUDITLOG_OFF; else if (strcasecmp(p1, "RelevantOnly") == 0) dcfg->auditlog_flag = AUDITLOG_RELEVANT; else return (const char *)apr_psprintf(cmd->pool, "ModSecurity: Unrecognised parameter value for SecAuditEngine: %s", p1); return NULL; } static const char *cmd_audit_log(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = _dcfg; dcfg->auditlog_name = (char *)p1; if (dcfg->auditlog_name[0] == '|') { const char *pipe_name = dcfg->auditlog_name + 1; piped_log *pipe_log; pipe_log = ap_open_piped_log(cmd->pool, pipe_name); if (pipe_log == NULL) { return apr_psprintf(cmd->pool, "ModSecurity: Failed to open the audit log pipe: %s", pipe_name); } dcfg->auditlog_fd = ap_piped_log_write_fd(pipe_log); } else { const char *file_name = ap_server_root_relative(cmd->pool, dcfg->auditlog_name); apr_status_t rc; rc = apr_file_open(&dcfg->auditlog_fd, file_name, APR_WRITE | APR_APPEND | APR_CREATE | APR_BINARY, CREATEMODE, cmd->pool); if (rc != APR_SUCCESS) { return apr_psprintf(cmd->pool, "ModSecurity: Failed to open the audit log file: %s", file_name); } } return NULL; } static const char *cmd_audit_log2(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = _dcfg; if (dcfg->auditlog_name == NOT_SET_P) { return apr_psprintf(cmd->pool, "ModSecurity: Cannot configure a secondary audit log without a primary defined: %s", p1); } dcfg->auditlog2_name = (char *)p1; if (dcfg->auditlog2_name[0] == '|') { const char *pipe_name = ap_server_root_relative(cmd->pool, dcfg->auditlog2_name + 1); piped_log *pipe_log; pipe_log = ap_open_piped_log(cmd->pool, pipe_name); if (pipe_log == NULL) { return apr_psprintf(cmd->pool, "ModSecurity: Failed to open the secondary audit log pipe: %s", pipe_name); } dcfg->auditlog2_fd = ap_piped_log_write_fd(pipe_log); } else { const char *file_name = ap_server_root_relative(cmd->pool, dcfg->auditlog2_name); apr_status_t rc; rc = apr_file_open(&dcfg->auditlog2_fd, file_name, APR_WRITE | APR_APPEND | APR_CREATE | APR_BINARY, CREATEMODE, cmd->pool); if (rc != APR_SUCCESS) { return apr_psprintf(cmd->pool, "ModSecurity: Failed to open the secondary audit log file: %s", file_name); } } return NULL; } static const char *cmd_audit_log_parts(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = _dcfg; if (is_valid_parts_specification((char *)p1) != 1) { return apr_psprintf(cmd->pool, "Invalid parts specification for SecAuditLogParts: %s", p1); } dcfg->auditlog_parts = (char *)p1; return NULL; } static const char *cmd_audit_log_relevant_status(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = _dcfg; dcfg->auditlog_relevant_regex = msc_pregcomp(cmd->pool, p1, PCRE_DOTALL, NULL, NULL); if (dcfg->auditlog_relevant_regex == NULL) { return apr_psprintf(cmd->pool, "ModSecurity: Invalid regular expression: %s", p1); } return NULL; } static const char *cmd_audit_log_type(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = _dcfg; if (strcasecmp(p1, "Serial") == 0) dcfg->auditlog_type = AUDITLOG_SERIAL; else if (strcasecmp(p1, "Concurrent") == 0) dcfg->auditlog_type = AUDITLOG_CONCURRENT; else return (const char *)apr_psprintf(cmd->pool, "ModSecurity: Unrecognised parameter value for SecAuditLogType: %s", p1); return NULL; } static const char *cmd_audit_log_dirmode(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; if (dcfg == NULL) return NULL; if (strcasecmp(p1, "default") == 0) { dcfg->auditlog_dirperms = NOT_SET; } else { long int mode = strtol(p1, NULL, 8); /* expects octal mode */ if ((mode == LONG_MAX)||(mode == LONG_MIN)||(mode <= 0)||(mode > 07777)) { return apr_psprintf(cmd->pool, "ModSecurity: Invalid value for SecAuditLogDirMode: %s", p1); } dcfg->auditlog_dirperms = mode2fileperms(mode); } return NULL; } static const char *cmd_audit_log_filemode(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; if (dcfg == NULL) return NULL; if (strcasecmp(p1, "default") == 0) { dcfg->auditlog_fileperms = NOT_SET; } else { long int mode = strtol(p1, NULL, 8); /* expects octal mode */ if ((mode == LONG_MAX)||(mode == LONG_MIN)||(mode <= 0)||(mode > 07777)) { return apr_psprintf(cmd->pool, "ModSecurity: Invalid value for SecAuditLogFileMode: %s", p1); } dcfg->auditlog_fileperms = mode2fileperms(mode); } return NULL; } static const char *cmd_audit_log_storage_dir(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = _dcfg; dcfg->auditlog_storage_dir = ap_server_root_relative(cmd->pool, p1); return NULL; } static const char *cmd_cookie_format(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; if (strcmp(p1, "0") == 0) dcfg->cookie_format = COOKIES_V0; else if (strcmp(p1, "1") == 0) dcfg->cookie_format = COOKIES_V1; else { return apr_psprintf(cmd->pool, "ModSecurity: Invalid cookie format: %s", p1); } return NULL; } static const char *cmd_chroot_dir(cmd_parms *cmd, void *_dcfg, const char *p1) { char cwd[1025] = ""; if (cmd->server->is_virtual) { return "ModSecurity: SecChrootDir not allowed in VirtualHost"; } chroot_dir = (char *)p1; if (getcwd(cwd, 1024) == NULL) { return "ModSecurity: Failed to get the current working directory"; } if (chdir(chroot_dir) < 0) { return apr_psprintf(cmd->pool, "ModSecurity: Failed to chdir to %s, errno=%d (%s)", chroot_dir, errno, strerror(errno)); } if (chdir(cwd) < 0) { return apr_psprintf(cmd->pool, "ModSecurity: Failed to chdir to %s, errno=%d (%s)", cwd, errno, strerror(errno)); } return NULL; } /** * Adds component signature to the list of signatures kept in configuration. */ static const char *cmd_component_signature(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; /* ENH Enforce "Name/VersionX.Y.Z (comment)" format. */ *(char **)apr_array_push(dcfg->component_signatures) = (char *)p1; return NULL; } static const char *cmd_content_injection(cmd_parms *cmd, void *_dcfg, int flag) { directory_config *dcfg = (directory_config *)_dcfg; if (dcfg == NULL) return NULL; dcfg->content_injection_enabled = flag; return NULL; } static const char *cmd_data_dir(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; if (cmd->server->is_virtual) { return "ModSecurity: SecDataDir not allowed in VirtualHost."; } dcfg->data_dir = ap_server_root_relative(cmd->pool, p1); return NULL; } static const char *cmd_debug_log(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; apr_status_t rc; dcfg->debuglog_name = ap_server_root_relative(cmd->pool, p1); rc = apr_file_open(&dcfg->debuglog_fd, dcfg->debuglog_name, APR_WRITE | APR_APPEND | APR_CREATE | APR_BINARY, CREATEMODE, cmd->pool); if (rc != APR_SUCCESS) { return apr_psprintf(cmd->pool, "ModSecurity: Failed to open debug log file: %s", dcfg->debuglog_name); } return NULL; } /** * \brief Add SecCollectionTimeout configuration option * * \param cmd Pointer to configuration data * \param _dcfg Pointer to directory configuration * \param p1 Pointer to configuration option * * \retval NULL On failure * \retval apr_psprintf On Success */ static const char *cmd_collection_timeout(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; dcfg->col_timeout = atoi(p1); /* max 30 days */ if ((dcfg->col_timeout >= 0)&&(dcfg->col_timeout <= 2592000)) return NULL; return apr_psprintf(cmd->pool, "ModSecurity: Invalid value for SecCollectionTimeout: %s", p1); } static const char *cmd_debug_log_level(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; dcfg->debuglog_level = atoi(p1); if ((dcfg->debuglog_level >= 0)&&(dcfg->debuglog_level <= 9)) return NULL; return apr_psprintf(cmd->pool, "ModSecurity: Invalid value for SecDebugLogLevel: %s", p1); } static const char *cmd_default_action(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; extern msc_engine *modsecurity; char *my_error_msg = NULL; dcfg->tmp_default_actionset = msre_actionset_create(modsecurity->msre, p1, &my_error_msg); if (dcfg->tmp_default_actionset == NULL) { if (my_error_msg != NULL) return my_error_msg; else return FATAL_ERROR; } /* Must specify a disruptive action. */ /* ENH: Remove this requirement? */ if (dcfg->tmp_default_actionset->intercept_action == NOT_SET) { return apr_psprintf(cmd->pool, "ModSecurity: SecDefaultAction must specify a disruptive action."); } /* Must specify a phase. */ /* ENH: Remove this requirement? */ if (dcfg->tmp_default_actionset->phase == NOT_SET) { return apr_psprintf(cmd->pool, "ModSecurity: SecDefaultAction must specify a phase."); } /* Must not use metadata actions. */ /* ENH: loop through to check for tags */ if ((dcfg->tmp_default_actionset->id != NOT_SET_P) ||(dcfg->tmp_default_actionset->rev != NOT_SET_P) ||(dcfg->tmp_default_actionset->version != NOT_SET_P) ||(dcfg->tmp_default_actionset->maturity != NOT_SET) ||(dcfg->tmp_default_actionset->accuracy != NOT_SET) ||(dcfg->tmp_default_actionset->msg != NOT_SET_P)) { return apr_psprintf(cmd->pool, "ModSecurity: SecDefaultAction must not " "contain any metadata actions (id, rev, msg, tag, severity, ver, accuracy, maturity, logdata)."); } /* These are just a warning for now. */ if ((dcfg->tmp_default_actionset->severity != NOT_SET) ||(dcfg->tmp_default_actionset->logdata != NOT_SET_P)) { ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_WARNING|APLOG_NOERRNO, 0, cmd->pool, "ModSecurity: WARNING Using \"severity\" or \"logdata\" in " "SecDefaultAction is deprecated (%s:%d).", cmd->directive->filename, cmd->directive->line_num); } if (apr_table_get(dcfg->tmp_default_actionset->actions, "t")) { ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_WARNING|APLOG_NOERRNO, 0, cmd->pool, "ModSecurity: WARNING Using transformations in " "SecDefaultAction is deprecated (%s:%d).", cmd->directive->filename, cmd->directive->line_num); } /* Must not use chain. */ if (dcfg->tmp_default_actionset->is_chained != NOT_SET) { return apr_psprintf(cmd->pool, "ModSecurity: SecDefaultAction must not " "contain a chain action."); } /* Must not use skip. */ if (dcfg->tmp_default_actionset->skip_count != NOT_SET) { return apr_psprintf(cmd->pool, "ModSecurity: SecDefaultAction must not " "contain a skip action."); } /* Must not use skipAfter. */ if (dcfg->tmp_default_actionset->skip_after != NOT_SET_P) { return apr_psprintf(cmd->pool, "ModSecurity: SecDefaultAction must not " "contain a skipAfter action."); } return NULL; } static const char *cmd_disable_backend_compression(cmd_parms *cmd, void *_dcfg, int flag) { directory_config *dcfg = (directory_config *)_dcfg; if (dcfg == NULL) return NULL; dcfg->disable_backend_compression = flag; return NULL; } static const char *cmd_guardian_log(cmd_parms *cmd, void *_dcfg, const char *p1, const char *p2) { extern char *guardianlog_name; extern apr_file_t *guardianlog_fd; extern char *guardianlog_condition; if (cmd->server->is_virtual) { return "ModSecurity: SecGuardianLog not allowed in VirtualHost"; } if (p2 != NULL) { if (strncmp(p2, "env=", 4) != 0) { return "ModSecurity: Error in condition clause"; } if ( (p2[4] == '\0') || ((p2[4] == '!')&&(p2[5] == '\0')) ) { return "ModSecurity: Missing variable name"; } guardianlog_condition = apr_pstrdup(cmd->pool, p2 + 4); } guardianlog_name = (char *)p1; if (guardianlog_name[0] == '|') { const char *pipe_name = ap_server_root_relative(cmd->pool, guardianlog_name + 1); piped_log *pipe_log; pipe_log = ap_open_piped_log(cmd->pool, pipe_name); if (pipe_log == NULL) { return apr_psprintf(cmd->pool, "ModSecurity: Failed to open the guardian log pipe: %s", pipe_name); } guardianlog_fd = ap_piped_log_write_fd(pipe_log); } else { const char *file_name = ap_server_root_relative(cmd->pool, guardianlog_name); apr_status_t rc; rc = apr_file_open(&guardianlog_fd, file_name, APR_WRITE | APR_APPEND | APR_CREATE | APR_BINARY, CREATEMODE, cmd->pool); if (rc != APR_SUCCESS) { return apr_psprintf(cmd->pool, "ModSecurity: Failed to open the guardian log file: %s", file_name); } } return NULL; } /** * \brief Add SecStreamInBodyInspection configuration option * * \param cmd Pointer to configuration data * \param _dcfg Pointer to directory configuration * \param p1 Pointer to configuration option * * \retval NULL On failure * \retval apr_psprintf On Success */ static const char *cmd_stream_inbody_inspection(cmd_parms *cmd, void *_dcfg, int flag) { directory_config *dcfg = (directory_config *)_dcfg; if (dcfg == NULL) return NULL; dcfg->stream_inbody_inspection = flag; return NULL; } /** * \brief Add SecStreamOutBodyInspection configuration option * * \param cmd Pointer to configuration data * \param _dcfg Pointer to directory configuration * \param p1 Pointer to configuration option * * \retval NULL On failure * \retval apr_psprintf On Success */ static const char *cmd_stream_outbody_inspection(cmd_parms *cmd, void *_dcfg, int flag) { directory_config *dcfg = (directory_config *)_dcfg; if (dcfg == NULL) return NULL; dcfg->stream_outbody_inspection = flag; return NULL; } /** * \brief Add SecRulePerfTime configuration option * * \param cmd Pointer to configuration data * \param _dcfg Pointer to directory configuration * \param p1 Pointer to configuration option * * \retval NULL On failure * \retval apr_psprintf On Success */ static const char *cmd_rule_perf_time(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; long int limit; if (dcfg == NULL) return NULL; limit = strtol(p1, NULL, 10); if ((limit == LONG_MAX)||(limit == LONG_MIN)||(limit <= 0)) { return apr_psprintf(cmd->pool, "ModSecurity: Invalid value for SecRulePerfTime: %s", p1); } dcfg->max_rule_time = limit; return NULL; } /** * \brief Add SecReadStateLimit configuration option * * \param cmd Pointer to configuration data * \param _dcfg Pointer to directory configuration * \param p1 Pointer to configuration option * * \retval NULL On failure * \retval apr_psprintf On Success */ static const char *cmd_conn_read_state_limit(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; long int limit; if (dcfg == NULL) return NULL; limit = strtol(p1, NULL, 10); if ((limit == LONG_MAX)||(limit == LONG_MIN)||(limit <= 0)) { return apr_psprintf(cmd->pool, "ModSecurity: Invalid value for SecReadStateLimit: %s", p1); } conn_read_state_limit = limit; return NULL; } /** * \brief Add SecWriteStateLimit configuration option * * \param cmd Pointer to configuration data * \param _dcfg Pointer to directory configuration * \param p1 Pointer to configuration option * * \retval NULL On failure * \retval apr_psprintf On Success */ static const char *cmd_conn_write_state_limit(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; long int limit; if (dcfg == NULL) return NULL; limit = strtol(p1, NULL, 10); if ((limit == LONG_MAX)||(limit == LONG_MIN)||(limit <= 0)) { return apr_psprintf(cmd->pool, "ModSecurity: Invalid value for SecWriteStateLimit: %s", p1); } conn_write_state_limit = limit; return NULL; } static const char *cmd_request_body_inmemory_limit(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; long int limit; if (dcfg == NULL) return NULL; limit = strtol(p1, NULL, 10); if ((limit == LONG_MAX)||(limit == LONG_MIN)||(limit <= 0)) { return apr_psprintf(cmd->pool, "ModSecurity: Invalid value for SecRequestBodyInMemoryLimit: %s", p1); } dcfg->reqbody_inmemory_limit = limit; return NULL; } static const char *cmd_request_body_limit(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; long int limit; if (dcfg == NULL) return NULL; limit = strtol(p1, NULL, 10); if ((limit == LONG_MAX)||(limit == LONG_MIN)||(limit <= 0)) { return apr_psprintf(cmd->pool, "ModSecurity: Invalid value for SecRequestBodyLimit: %s", p1); } dcfg->reqbody_limit = limit; return NULL; } static const char *cmd_request_body_no_files_limit(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; long int limit; if (dcfg == NULL) return NULL; limit = strtol(p1, NULL, 10); if ((limit == LONG_MAX)||(limit == LONG_MIN)||(limit <= 0)) { return apr_psprintf(cmd->pool, "ModSecurity: Invalid value for SecRequestBodyNoFilesLimit: %s", p1); } dcfg->reqbody_no_files_limit = limit; return NULL; } static const char *cmd_request_body_access(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; if (dcfg == NULL) return NULL; if (strcasecmp(p1, "on") == 0) dcfg->reqbody_access = 1; else if (strcasecmp(p1, "off") == 0) dcfg->reqbody_access = 0; else return apr_psprintf(cmd->pool, "ModSecurity: Invalid value for SecRequestBodyAccess: %s", p1); return NULL; } /** * \brief Add SecInterceptOnError configuration option * * \param cmd Pointer to configuration data * \param _dcfg Pointer to directory configuration * \param p1 Pointer to configuration option * * \retval NULL On failure * \retval apr_psprintf On success */ static const char *cmd_request_intercept_on_error(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; if (dcfg == NULL) return NULL; if (strcasecmp(p1, "on") == 0) dcfg->reqintercept_oe = 1; else if (strcasecmp(p1, "off") == 0) dcfg->reqintercept_oe = 0; else return apr_psprintf(cmd->pool, "ModSecurity: Invalid value for SecInterceptOnError: %s", p1); return NULL; } static const char *cmd_request_encoding(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; if (dcfg == NULL) return NULL; /* ENH Validate encoding */ dcfg->request_encoding = p1; return NULL; } static const char *cmd_response_body_access(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; if (dcfg == NULL) return NULL; if (strcasecmp(p1, "on") == 0) dcfg->resbody_access = 1; else if (strcasecmp(p1, "off") == 0) dcfg->resbody_access = 0; else return apr_psprintf(cmd->pool, "ModSecurity: Invalid value for SecResponseBodyAccess: %s", p1); return NULL; } static const char *cmd_response_body_limit(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; long int limit; limit = strtol(p1, NULL, 10); if ((limit == LONG_MAX)||(limit == LONG_MIN)||(limit <= 0)) { return apr_psprintf(cmd->pool, "ModSecurity: Invalid value for SecResponseBodyLimit: %s", p1); } if (limit > RESPONSE_BODY_HARD_LIMIT) { return apr_psprintf(cmd->pool, "ModSecurity: Response size limit can not exceed the hard limit: %li", RESPONSE_BODY_HARD_LIMIT); } dcfg->of_limit = limit; return NULL; } static const char *cmd_response_body_limit_action(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; if (dcfg == NULL) return NULL; if (dcfg->is_enabled == MODSEC_DETECTION_ONLY) { dcfg->of_limit_action = RESPONSE_BODY_LIMIT_ACTION_PARTIAL; return NULL; } if (strcasecmp(p1, "ProcessPartial") == 0) dcfg->of_limit_action = RESPONSE_BODY_LIMIT_ACTION_PARTIAL; else if (strcasecmp(p1, "Reject") == 0) dcfg->of_limit_action = RESPONSE_BODY_LIMIT_ACTION_REJECT; else return apr_psprintf(cmd->pool, "ModSecurity: Invalid value for SecResponseBodyLimitAction: %s", p1); return NULL; } /** * \brief Add SecRequestBodyLimitAction configuration option * * \param cmd Pointer to configuration data * \param _dcfg Pointer to directory configuration * \param p1 Pointer to configuration option * * \retval NULL On failure * \retval apr_psprintf On success */ static const char *cmd_resquest_body_limit_action(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; if (dcfg == NULL) return NULL; if (dcfg->is_enabled == MODSEC_DETECTION_ONLY) { dcfg->if_limit_action = REQUEST_BODY_LIMIT_ACTION_PARTIAL; return NULL; } if (strcasecmp(p1, "ProcessPartial") == 0) dcfg->if_limit_action = REQUEST_BODY_LIMIT_ACTION_PARTIAL; else if (strcasecmp(p1, "Reject") == 0) dcfg->if_limit_action = REQUEST_BODY_LIMIT_ACTION_REJECT; else return apr_psprintf(cmd->pool, "ModSecurity: Invalid value for SecRequestBodyLimitAction: %s", p1); return NULL; } static const char *cmd_response_body_mime_type(cmd_parms *cmd, void *_dcfg, const char *_p1) { directory_config *dcfg = (directory_config *)_dcfg; char *p1 = apr_pstrdup(cmd->pool, _p1); /* TODO check whether the parameter is a valid MIME type of "???" */ if ((dcfg->of_mime_types == NULL)||(dcfg->of_mime_types == NOT_SET_P)) { dcfg->of_mime_types = apr_table_make(cmd->pool, 10); } strtolower_inplace((unsigned char *)p1); apr_table_setn(dcfg->of_mime_types, p1, "1"); return NULL; } static const char *cmd_response_body_mime_types_clear(cmd_parms *cmd, void *_dcfg) { directory_config *dcfg = (directory_config *)_dcfg; if (dcfg == NULL) return NULL; dcfg->of_mime_types_cleared = 1; if ((dcfg->of_mime_types != NULL)&&(dcfg->of_mime_types != NOT_SET_P)) { apr_table_clear(dcfg->of_mime_types); } return NULL; } /** * \brief Add SecRuleUpdateTargetById * * \param cmd Pointer to configuration data * \param _dcfg Pointer to directory configuration * \param p1 Pointer to configuration option * \param p2 Pointer to configuration option * \param p3 Pointer to configuration option * * \retval NULL On failure|Success */ static const char *cmd_rule_update_target_by_id(cmd_parms *cmd, void *_dcfg, const char *p1, const char *p2, const char *p3) { directory_config *dcfg = (directory_config *)_dcfg; rule_exception *re = apr_pcalloc(cmd->pool, sizeof(rule_exception)); if (dcfg == NULL) return NULL; if(p1 == NULL) { return apr_psprintf(cmd->pool, "Updating target by ID with no ID"); } re->type = RULE_EXCEPTION_REMOVE_ID; /* TODO: Validate the range here, while we can still tell the user if it's invalid */ re->param = p1; return msre_ruleset_rule_update_target_matching_exception(NULL, dcfg->ruleset, re, p2, p3); } /** * \brief Add SecRuleUpdateTargetByTag configuration option * * \param cmd Pointer to configuration data * \param _dcfg Pointer to directory configuration * \param p1 Pointer to configuration option RULETAG * \param p2 Pointer to configuration option TARGET * \param p3 Pointer to configuration option REPLACED_TARGET * \todo Finish documenting * * \retval NULL On success * \retval apr_psprintf On failure * * \todo Figure out error checking */ static const char *cmd_rule_update_target_by_tag(cmd_parms *cmd, void *_dcfg, const char *p1, const char *p2, const char *p3) { directory_config *dcfg = (directory_config *)_dcfg; rule_exception *re = apr_pcalloc(cmd->pool, sizeof(rule_exception)); if (dcfg == NULL) return NULL; if(p1 == NULL) { return apr_psprintf(cmd->pool, "Updating target by tag with no tag"); } re->type = RULE_EXCEPTION_REMOVE_TAG; re->param = p1; re->param_data = msc_pregcomp(cmd->pool, p1, 0, NULL, NULL); if (re->param_data == NULL) { return apr_psprintf(cmd->pool, "ModSecurity: Invalid regular expression: %s", p1); } return msre_ruleset_rule_update_target_matching_exception(NULL, dcfg->ruleset, re, p2, p3); } /** * \brief Add SecRuleUpdateTargetByMsg configuration option * * \param cmd Pointer to configuration data * \param _dcfg Pointer to directory configuration * \param p1 Pointer to configuration option RULEMSG * \param p2 Pointer to configuration option TARGET * \param p3 Pointer to configuration option REPLACED_TARGET * \todo Finish documenting * * \retval NULL On success * \retval apr_psprintf On failure * * \todo Figure out error checking */ static const char *cmd_rule_update_target_by_msg(cmd_parms *cmd, void *_dcfg, const char *p1, const char *p2, const char *p3) { directory_config *dcfg = (directory_config *)_dcfg; rule_exception *re = apr_pcalloc(cmd->pool, sizeof(rule_exception)); if (dcfg == NULL) return NULL; if(p1 == NULL) { return apr_psprintf(cmd->pool, "Updating target by message with no message"); } re->type = RULE_EXCEPTION_REMOVE_MSG; re->param = p1; re->param_data = msc_pregcomp(cmd->pool, p1, 0, NULL, NULL); if (re->param_data == NULL) { return apr_psprintf(cmd->pool, "ModSecurity: Invalid regular expression: %s", p1); } return msre_ruleset_rule_update_target_matching_exception(NULL, dcfg->ruleset, re, p2, p3); } static const char *cmd_rule(cmd_parms *cmd, void *_dcfg, const char *p1, const char *p2, const char *p3) { return add_rule(cmd, (directory_config *)_dcfg, RULE_TYPE_NORMAL, p1, p2, p3); } static const char *cmd_rule_engine(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; if (dcfg == NULL) return NULL; if (strcasecmp(p1, "on") == 0) dcfg->is_enabled = MODSEC_ENABLED; else if (strcasecmp(p1, "off") == 0) dcfg->is_enabled = MODSEC_DISABLED; else if (strcasecmp(p1, "detectiononly") == 0) { dcfg->is_enabled = MODSEC_DETECTION_ONLY; dcfg->of_limit_action = RESPONSE_BODY_LIMIT_ACTION_PARTIAL; dcfg->if_limit_action = REQUEST_BODY_LIMIT_ACTION_PARTIAL; } else return apr_psprintf(cmd->pool, "ModSecurity: Invalid value for SecRuleEngine: %s", p1); return NULL; } static const char *cmd_rule_inheritance(cmd_parms *cmd, void *_dcfg, int flag) { directory_config *dcfg = (directory_config *)_dcfg; if (dcfg == NULL) return NULL; dcfg->rule_inheritance = flag; return NULL; } static const char *cmd_rule_script(cmd_parms *cmd, void *_dcfg, const char *p1, const char *p2) { #if defined(WITH_LUA) const char *filename = resolve_relative_path(cmd->pool, cmd->directive->filename, p1); return add_rule(cmd, (directory_config *)_dcfg, RULE_TYPE_LUA, filename, p2, NULL); #else ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_NOERRNO, 0, cmd->pool, "Ignoring SecRuleScript \"%s\" directive (%s:%d): No Lua scripting support.", p1, cmd->directive->filename, cmd->directive->line_num); return NULL; #endif } static const char *cmd_rule_remove_by_id(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; rule_exception *re = apr_pcalloc(cmd->pool, sizeof(rule_exception)); if (dcfg == NULL) return NULL; re->type = RULE_EXCEPTION_REMOVE_ID; re->param = p1; *(rule_exception **)apr_array_push(dcfg->rule_exceptions) = re; /* Remove the corresponding rules from the context straight away. */ msre_ruleset_rule_remove_with_exception(dcfg->ruleset, re); return NULL; } /** * \brief Add SecRuleRemoveByTag configuration option * * \param cmd Pointer to configuration data * \param _dcfg Pointer to directory configuration * \param p1 Pointer to configuration option * * \retval NULL On failure * \retval apr_psprintf On success */ static const char *cmd_rule_remove_by_tag(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; rule_exception *re = apr_pcalloc(cmd->pool, sizeof(rule_exception)); if (dcfg == NULL) return NULL; re->type = RULE_EXCEPTION_REMOVE_TAG; re->param = p1; re->param_data = msc_pregcomp(cmd->pool, p1, 0, NULL, NULL); if (re->param_data == NULL) { return apr_psprintf(cmd->pool, "ModSecurity: Invalid regular expression: %s", p1); } *(rule_exception **)apr_array_push(dcfg->rule_exceptions) = re; /* Remove the corresponding rules from the context straight away. */ msre_ruleset_rule_remove_with_exception(dcfg->ruleset, re); #ifdef DEBUG_CONF ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_NOERRNO, 0, cmd->pool, "Added exception %pp (%d %s) to dcfg %pp.", re, re->type, re->param, dcfg); #endif return NULL; } static const char *cmd_rule_remove_by_msg(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; rule_exception *re = apr_pcalloc(cmd->pool, sizeof(rule_exception)); if (dcfg == NULL) return NULL; re->type = RULE_EXCEPTION_REMOVE_MSG; re->param = p1; re->param_data = msc_pregcomp(cmd->pool, p1, 0, NULL, NULL); if (re->param_data == NULL) { return apr_psprintf(cmd->pool, "ModSecurity: Invalid regular expression: %s", p1); } *(rule_exception **)apr_array_push(dcfg->rule_exceptions) = re; /* Remove the corresponding rules from the context straight away. */ msre_ruleset_rule_remove_with_exception(dcfg->ruleset, re); #ifdef DEBUG_CONF ap_log_perror(APLOG_MARK, APLOG_STARTUP|APLOG_NOERRNO, 0, cmd->pool, "Added exception %pp (%d %s) to dcfg %pp.", re, re->type, re->param, dcfg); #endif return NULL; } static const char *cmd_rule_update_action_by_id(cmd_parms *cmd, void *_dcfg, const char *p1, const char *p2) { int offset = 0, rule_id = atoi(p1); char *opt = strchr(p1,':'); char *savedptr = NULL; char *param = apr_pstrdup(cmd->pool, p1); if ((rule_id == LONG_MAX)||(rule_id == LONG_MIN)||(rule_id <= 0)) { return apr_psprintf(cmd->pool, "ModSecurity: Invalid value for ID for update action: %s", p1); } if(opt != NULL) { opt++; offset = atoi(opt); opt = apr_strtok(param,":", &savedptr); return update_rule_action(cmd, (directory_config *)_dcfg, (const char *)opt, p2, offset); } return update_rule_action(cmd, (directory_config *)_dcfg, p1, p2, offset); } static const char *cmd_server_signature(cmd_parms *cmd, void *_dcfg, const char *p1) { if (cmd->server->is_virtual) { return "ModSecurity: SecServerSignature not allowed in VirtualHost"; } new_server_signature = (char *)p1; return NULL; } static const char *cmd_tmp_dir(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; if (dcfg == NULL) return NULL; if (strcasecmp(p1, "none") == 0) dcfg->tmp_dir = NULL; else dcfg->tmp_dir = ap_server_root_relative(cmd->pool, p1); return NULL; } static const char *cmd_upload_dir(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; if (dcfg == NULL) return NULL; if (strcasecmp(p1, "none") == 0) dcfg->upload_dir = NULL; else dcfg->upload_dir = ap_server_root_relative(cmd->pool, p1); return NULL; } static const char *cmd_upload_file_limit(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; if (dcfg == NULL) return NULL; if (strcasecmp(p1, "default") == 0) { dcfg->upload_file_limit = NOT_SET; } else { dcfg->upload_file_limit = atoi(p1); } return NULL; } static const char *cmd_upload_filemode(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; if (dcfg == NULL) return NULL; if (strcasecmp(p1, "default") == 0) { dcfg->upload_filemode = NOT_SET; } else { long int mode = strtol(p1, NULL, 8); /* expects octal mode */ if ((mode == LONG_MAX)||(mode == LONG_MIN)||(mode <= 0)||(mode > 07777)) { return apr_psprintf(cmd->pool, "ModSecurity: Invalid value for SecUploadFileMode: %s", p1); } dcfg->upload_filemode = (int)mode; } return NULL; } static const char *cmd_upload_keep_files(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; if (dcfg == NULL) return NULL; if (strcasecmp(p1, "on") == 0) { dcfg->upload_keep_files = KEEP_FILES_ON; } else if (strcasecmp(p1, "off") == 0) { dcfg->upload_keep_files = KEEP_FILES_OFF; } else if (strcasecmp(p1, "relevantonly") == 0) { dcfg->upload_keep_files = KEEP_FILES_RELEVANT_ONLY; } else { return apr_psprintf(cmd->pool, "ModSecurity: Invalid setting for SecUploadKeepFiles: %s", p1); } return NULL; } static const char *cmd_web_app_id(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; /* ENH enforce format (letters, digits, ., _, -) */ dcfg->webappid = p1; return NULL; } static const char *cmd_sensor_id(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; /* ENH enforce format (letters, digits, ., _, -) */ dcfg->sensor_id = p1; return NULL; } /** * \brief Add SecHash configuration option * * \param cmd Pointer to configuration data * \param _dcfg Pointer to directory configuration * \param p1 Pointer to configuration option * * \retval NULL On failure * \retval apr_psprintf On Success */ static const char *cmd_hash_engine(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; if (dcfg == NULL) return NULL; if (strcasecmp(p1, "on") == 0) { dcfg->hash_is_enabled = HASH_ENABLED; dcfg->hash_enforcement = HASH_ENABLED; } else if (strcasecmp(p1, "off") == 0) { dcfg->hash_is_enabled = HASH_DISABLED; dcfg->hash_enforcement = HASH_DISABLED; } else return apr_psprintf(cmd->pool, "ModSecurity: Invalid value for SecRuleEngine: %s", p1); return NULL; } /** * \brief Add SecHashPram configuration option * * \param cmd Pointer to configuration data * \param _dcfg Pointer to directory configuration * \param p1 Pointer to configuration option * * \retval NULL On success */ static const char *cmd_hash_param(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; if (dcfg == NULL) return NULL; if (p1 == NULL) return NULL; dcfg->crypto_param_name = p1; return NULL; } /** * \brief Add SecHashKey configuration option * * \param cmd Pointer to configuration data * \param _dcfg Pointer to directory configuration * \param _p1 Pointer to configuration option * \param _p2 Pointer to configuration option * * \retval NULL On success */ static const char *cmd_hash_key(cmd_parms *cmd, void *_dcfg, const char *_p1, const char *_p2) { directory_config *dcfg = (directory_config *)_dcfg; char *p1 = NULL; if (dcfg == NULL) return NULL; if (_p1 == NULL) return NULL; if (strcasecmp(_p1, "Rand") == 0) { p1 = apr_pstrdup(cmd->pool, getkey(cmd->pool)); dcfg->crypto_key = p1; dcfg->crypto_key_len = strlen(dcfg->crypto_key); } else { p1 = apr_pstrdup(cmd->pool, _p1); dcfg->crypto_key = p1; dcfg->crypto_key_len = strlen(p1); } if(_p2 == NULL) { return NULL; } else { if (strcasecmp(_p2, "KeyOnly") == 0) dcfg->crypto_key_add = HASH_KEYONLY; else if (strcasecmp(_p2, "SessionID") == 0) dcfg->crypto_key_add = HASH_SESSIONID; else if (strcasecmp(_p2, "RemoteIP") == 0) dcfg->crypto_key_add = HASH_REMOTEIP; } return NULL; } /** * \brief Add SecHashMethodPm configuration option * * \param cmd Pointer to configuration data * \param _dcfg Pointer to directory configuration * \param p1 Pointer to configuration option * \param p2 Pointer to configuration option * * \retval NULL On failure * \retval apr_psprintf On Success */ static const char *cmd_hash_method_pm(cmd_parms *cmd, void *_dcfg, const char *p1, const char *p2) { directory_config *dcfg = (directory_config *)_dcfg; rule_exception *re = apr_pcalloc(cmd->pool, sizeof(hash_method)); const char *_p2 = apr_pstrdup(cmd->pool, p2); ACMP *p = NULL; const char *phrase = NULL; const char *next = NULL; if (dcfg == NULL) return NULL; p = acmp_create(0, cmd->pool); if (p == NULL) return NULL; if(phrase == NULL) phrase = apr_pstrdup(cmd->pool, _p2); for (;;) { while((apr_isspace(*phrase) != 0) && (*phrase != '\0')) phrase++; if (*phrase == '\0') break; next = phrase; while((apr_isspace(*next) == 0) && (*next != 0)) next++; acmp_add_pattern(p, phrase, NULL, NULL, next - phrase); phrase = next; } acmp_prepare(p); if (strcasecmp(p1, "HashHref") == 0) { re->type = HASH_URL_HREF_HASH_PM; re->param = _p2; re->param_data = (void *)p; if (re->param_data == NULL) { return apr_psprintf(cmd->pool, "ModSecurity: Invalid pattern: %s", p2); } dcfg->crypto_hash_href_pm = 1; } else if (strcasecmp(p1, "HashFormAction") == 0) { re->type = HASH_URL_FACTION_HASH_PM; re->param = _p2; re->param_data = (void *)p; if (re->param_data == NULL) { return apr_psprintf(cmd->pool, "ModSecurity: Invalid pattern: %s", p2); } dcfg->crypto_hash_faction_pm = 1; } else if (strcasecmp(p1, "HashLocation") == 0) { re->type = HASH_URL_LOCATION_HASH_PM; re->param = _p2; re->param_data = (void *)p; if (re->param_data == NULL) { return apr_psprintf(cmd->pool, "ModSecurity: Invalid pattern: %s", p2); } dcfg->crypto_hash_location_pm = 1; } else if (strcasecmp(p1, "HashIframeSrc") == 0) { re->type = HASH_URL_IFRAMESRC_HASH_PM; re->param = _p2; re->param_data = (void *)p; if (re->param_data == NULL) { return apr_psprintf(cmd->pool, "ModSecurity: Invalid pattern: %s", p2); } dcfg->crypto_hash_iframesrc_pm = 1; } else if (strcasecmp(p1, "HashFrameSrc") == 0) { re->type = HASH_URL_FRAMESRC_HASH_PM; re->param = _p2; re->param_data = (void *)p; if (re->param_data == NULL) { return apr_psprintf(cmd->pool, "ModSecurity: Invalid pattern: %s", p2); } dcfg->crypto_hash_framesrc_pm = 1; } *(hash_method **)apr_array_push(dcfg->hash_method) = re; return NULL; } /** * \brief Add SecHashMethodRx configuration option * * \param cmd Pointer to configuration data * \param _dcfg Pointer to directory configuration * \param p1 Pointer to configuration option * \param p2 Pointer to configuration option * * \retval NULL On failure * \retval apr_psprintf On Success */ static const char *cmd_hash_method_rx(cmd_parms *cmd, void *_dcfg, const char *p1, const char *p2) { directory_config *dcfg = (directory_config *)_dcfg; rule_exception *re = apr_pcalloc(cmd->pool, sizeof(hash_method)); const char *_p2 = apr_pstrdup(cmd->pool, p2); if (dcfg == NULL) return NULL; if (strcasecmp(p1, "HashHref") == 0) { re->type = HASH_URL_HREF_HASH_RX; re->param = _p2; re->param_data = msc_pregcomp(cmd->pool, p2, 0, NULL, NULL); if (re->param_data == NULL) { return apr_psprintf(cmd->pool, "ModSecurity: Invalid regular expression: %s", p2); } dcfg->crypto_hash_href_rx = 1; } else if (strcasecmp(p1, "HashFormAction") == 0) { re->type = HASH_URL_FACTION_HASH_RX; re->param = _p2; re->param_data = msc_pregcomp(cmd->pool, p2, 0, NULL, NULL); if (re->param_data == NULL) { return apr_psprintf(cmd->pool, "ModSecurity: Invalid regular expression: %s", p2); } dcfg->crypto_hash_faction_rx = 1; } else if (strcasecmp(p1, "HashLocation") == 0) { re->type = HASH_URL_LOCATION_HASH_RX; re->param = _p2; re->param_data = msc_pregcomp(cmd->pool, p2, 0, NULL, NULL); if (re->param_data == NULL) { return apr_psprintf(cmd->pool, "ModSecurity: Invalid regular expression: %s", p2); } dcfg->crypto_hash_location_rx = 1; } else if (strcasecmp(p1, "HashIframeSrc") == 0) { re->type = HASH_URL_IFRAMESRC_HASH_RX; re->param = _p2; re->param_data = msc_pregcomp(cmd->pool, p2, 0, NULL, NULL); if (re->param_data == NULL) { return apr_psprintf(cmd->pool, "ModSecurity: Invalid regular expression: %s", p2); } dcfg->crypto_hash_iframesrc_rx = 1; } else if (strcasecmp(p1, "HashFrameSrc") == 0) { re->type = HASH_URL_FRAMESRC_HASH_RX; re->param = _p2; re->param_data = msc_pregcomp(cmd->pool, p2, 0, NULL, NULL); if (re->param_data == NULL) { return apr_psprintf(cmd->pool, "ModSecurity: Invalid regular expression: %s", p2); } dcfg->crypto_hash_framesrc_rx = 1; } *(hash_method **)apr_array_push(dcfg->hash_method) = re; return NULL; } /** * \brief Add SecHttpBlKey configuration option * * \param cmd Pointer to configuration data * \param _dcfg Pointer to directory configuration * \param p1 Pointer to configuration option * * \retval NULL On success */ static const char *cmd_httpBl_key(cmd_parms *cmd, void *_dcfg, const char *p1) { directory_config *dcfg = (directory_config *)_dcfg; if (dcfg == NULL) return NULL; if (p1 == NULL) return NULL; dcfg->httpBlkey = p1; return NULL; } /* PCRE Limits */ static const char *cmd_pcre_match_limit(cmd_parms *cmd, void *_dcfg, const char *p1) { long val; if (cmd->server->is_virtual) { return "ModSecurity: SecPcreMatchLimit not allowed in VirtualHost"; } val = atol(p1); if (val <= 0) { return apr_psprintf(cmd->pool, "ModSecurity: Invalid setting for " "SecPcreMatchLimit: %s", p1); } msc_pcre_match_limit = (unsigned long int)val; return NULL; } static const char *cmd_pcre_match_limit_recursion(cmd_parms *cmd, void *_dcfg, const char *p1) { long val; if (cmd->server->is_virtual) { return "ModSecurity: SecPcreMatchLimitRecursion not allowed in VirtualHost"; } val = atol(p1); if (val <= 0) { return apr_psprintf(cmd->pool, "ModSecurity: Invalid setting for " "SecPcreMatchLimitRecursion: %s", p1); } msc_pcre_match_limit_recursion = (unsigned long int)val; return NULL; } /* -- Geo Lookup configuration -- */ static const char *cmd_geo_lookup_db(cmd_parms *cmd, void *_dcfg, const char *p1) { const char *filename = resolve_relative_path(cmd->pool, cmd->directive->filename, p1); char *error_msg; directory_config *dcfg = (directory_config *)_dcfg; if (dcfg == NULL) return NULL; if (geo_init(dcfg, filename, &error_msg) <= 0) { return error_msg; } return NULL; } /** * \brief Add SecUnicodeCodePage configuration option * * \param cmd Pointer to configuration data * \param _dcfg Pointer to directory configuration * \param p1 Pointer to configuration option * * \retval NULL On success */ static const char *cmd_unicode_codepage(cmd_parms *cmd, void *_dcfg, const char *p1) { long val; val = atol(p1); if (val <= 0) { return apr_psprintf(cmd->pool, "ModSecurity: Invalid setting for " "SecUnicodeCodePage: %s", p1); } unicode_codepage = (unsigned long int)val; return NULL; } /** * \brief Add SecUnicodeMapFile configuration option * * \param cmd Pointer to configuration data * \param _dcfg Pointer to directory configuration * \param p1 Pointer to configuration option * * \retval NULL On success */ static const char *cmd_unicode_map(cmd_parms *cmd, void *_dcfg, const char *p1) { const char *filename = resolve_relative_path(cmd->pool, cmd->directive->filename, p1); char *error_msg; directory_config *dcfg = (directory_config *)_dcfg; if (dcfg == NULL) return NULL; if (unicode_map_init(dcfg, filename, &error_msg) <= 0) { return error_msg; } return NULL; } /** * \brief Add SecGsbLookupDb configuration option * * \param cmd Pointer to configuration data * \param _dcfg Pointer to directory configuration * \param p1 Pointer to configuration option * * \retval NULL On success */ static const char *cmd_gsb_lookup_db(cmd_parms *cmd, void *_dcfg, const char *p1) { const char *filename = resolve_relative_path(cmd->pool, cmd->directive->filename, p1); char *error_msg; directory_config *dcfg = (directory_config *)_dcfg; if (dcfg == NULL) return NULL; if (gsb_db_init(dcfg, filename, &error_msg) <= 0) { return error_msg; } return NULL; } /* -- Cache -- */ static const char *cmd_cache_transformations(cmd_parms *cmd, void *_dcfg, const char *p1, const char *p2) { directory_config *dcfg = (directory_config *)_dcfg; if (dcfg == NULL) return NULL; if (strcasecmp(p1, "on") == 0) dcfg->cache_trans = MODSEC_CACHE_ENABLED; else if (strcasecmp(p1, "off") == 0) dcfg->cache_trans = MODSEC_CACHE_DISABLED; else return apr_psprintf(cmd->pool, "ModSecurity: Invalid value for SecCacheTransformations: %s", p1); /* Process options */ if (p2 != NULL) { apr_table_t *vartable = apr_table_make(cmd->pool, 4); apr_status_t rc; char *error_msg = NULL; const char *charval = NULL; apr_int64_t intval = 0; if (vartable == NULL) { return apr_psprintf(cmd->pool, "ModSecurity: Unable to process options for SecCacheTransformations"); } rc = msre_parse_generic(cmd->pool, p2, vartable, &error_msg); if (rc < 0) { return apr_psprintf(cmd->pool, "ModSecurity: Unable to parse options for SecCacheTransformations: %s", error_msg); } /* incremental */ charval = apr_table_get(vartable, "incremental"); if (charval != NULL) { if (strcasecmp(charval, "on") == 0) dcfg->cache_trans_incremental = 1; else if (strcasecmp(charval, "off") == 0) dcfg->cache_trans_incremental = 0; else return apr_psprintf(cmd->pool, "ModSecurity: SecCacheTransformations invalid incremental value: %s", charval); } /* minlen */ charval = apr_table_get(vartable, "minlen"); if (charval != NULL) { intval = apr_atoi64(charval); if (errno == ERANGE) { return apr_psprintf(cmd->pool, "ModSecurity: SecCacheTransformations minlen out of range: %s", charval); } if (intval < 0) { return apr_psprintf(cmd->pool, "ModSecurity: SecCacheTransformations minlen must be positive: %s", charval); } /* The NOT_SET indicator is -1, a signed long, and therfore * we cannot be >= the unsigned value of NOT_SET. */ if ((unsigned long)intval >= (unsigned long)NOT_SET) { return apr_psprintf(cmd->pool, "ModSecurity: SecCacheTransformations minlen must be less than: %lu", (unsigned long)NOT_SET); } dcfg->cache_trans_min = (apr_size_t)intval; } /* maxlen */ charval = apr_table_get(vartable, "maxlen"); if (charval != NULL) { intval = apr_atoi64(charval); if (errno == ERANGE) { return apr_psprintf(cmd->pool, "ModSecurity: SecCacheTransformations maxlen out of range: %s", charval); } if (intval < 0) { return apr_psprintf(cmd->pool, "ModSecurity: SecCacheTransformations maxlen must be positive: %s", charval); } /* The NOT_SET indicator is -1, a signed long, and therfore * we cannot be >= the unsigned value of NOT_SET. */ if ((unsigned long)intval >= (unsigned long)NOT_SET) { return apr_psprintf(cmd->pool, "ModSecurity: SecCacheTransformations maxlen must be less than: %lu", (unsigned long)NOT_SET); } if ((intval != 0) && ((apr_size_t)intval < dcfg->cache_trans_min)) { return apr_psprintf(cmd->pool, "ModSecurity: SecCacheTransformations maxlen must not be less than minlen: %lu < %" APR_SIZE_T_FMT, (unsigned long)intval, dcfg->cache_trans_min); } dcfg->cache_trans_max = (apr_size_t)intval; } /* maxitems */ charval = apr_table_get(vartable, "maxitems"); if (charval != NULL) { intval = apr_atoi64(charval); if (errno == ERANGE) { return apr_psprintf(cmd->pool, "ModSecurity: SecCacheTransformations maxitems out of range: %s", charval); } if (intval < 0) { return apr_psprintf(cmd->pool, "ModSecurity: SecCacheTransformations maxitems must be positive: %s", charval); } dcfg->cache_trans_maxitems = (apr_size_t)intval; } } return NULL; } /* -- Configuration directives definitions -- */ #define CMD_SCOPE_MAIN (RSRC_CONF) #define CMD_SCOPE_ANY (RSRC_CONF | ACCESS_CONF) #if defined(HTACCESS_CONFIG) #define CMD_SCOPE_HTACCESS (OR_OPTIONS) #endif const command_rec module_directives[] = { #ifdef HTACCESS_CONFIG AP_INIT_TAKE1 ( "SecAction", cmd_action, NULL, CMD_SCOPE_HTACCESS, "an action list" ), #else AP_INIT_TAKE1 ( "SecAction", cmd_action, NULL, CMD_SCOPE_ANY, "an action list" ), #endif AP_INIT_TAKE1 ( "SecArgumentSeparator", cmd_argument_separator, NULL, CMD_SCOPE_ANY, "character that will be used as separator when parsing application/x-www-form-urlencoded content." ), AP_INIT_TAKE1 ( "SecCookiev0Separator", cmd_cookiev0_separator, NULL, CMD_SCOPE_ANY, "character that will be used as separator when parsing cookie v0 content." ), AP_INIT_TAKE1 ( "SecAuditEngine", cmd_audit_engine, NULL, CMD_SCOPE_ANY, "On, Off or RelevantOnly to determine the level of audit logging" ), AP_INIT_TAKE1 ( "SecAuditLog", cmd_audit_log, NULL, CMD_SCOPE_ANY, "filename of the primary audit log file" ), AP_INIT_TAKE1 ( "SecAuditLog2", cmd_audit_log2, NULL, CMD_SCOPE_ANY, "filename of the secondary audit log file" ), AP_INIT_TAKE1 ( "SecAuditLogParts", cmd_audit_log_parts, NULL, CMD_SCOPE_ANY, "list of audit log parts that go into the log." ), AP_INIT_TAKE1 ( "SecAuditLogRelevantStatus", cmd_audit_log_relevant_status, NULL, CMD_SCOPE_ANY, "regular expression that will be used to determine if the response status is relevant for audit logging" ), AP_INIT_TAKE1 ( "SecAuditLogType", cmd_audit_log_type, NULL, CMD_SCOPE_ANY, "whether to use the old audit log format (Serial) or new (Concurrent)" ), AP_INIT_TAKE1 ( "SecAuditLogStorageDir", cmd_audit_log_storage_dir, NULL, CMD_SCOPE_ANY, "path to the audit log storage area; absolute, or relative to the root of the server" ), AP_INIT_TAKE1 ( "SecAuditLogDirMode", cmd_audit_log_dirmode, NULL, CMD_SCOPE_ANY, "octal permissions mode for concurrent audit log directories" ), AP_INIT_TAKE1 ( "SecAuditLogFileMode", cmd_audit_log_filemode, NULL, CMD_SCOPE_ANY, "octal permissions mode for concurrent audit log files" ), AP_INIT_TAKE12 ( "SecCacheTransformations", cmd_cache_transformations, NULL, CMD_SCOPE_ANY, "whether or not to cache transformations. Defaults to true." ), AP_INIT_TAKE1 ( "SecChrootDir", cmd_chroot_dir, NULL, CMD_SCOPE_MAIN, "path of the directory to which server will be chrooted" ), AP_INIT_TAKE1 ( "SecComponentSignature", cmd_component_signature, NULL, CMD_SCOPE_MAIN, "component signature to add to ModSecurity signature." ), AP_INIT_FLAG ( "SecContentInjection", cmd_content_injection, NULL, CMD_SCOPE_ANY, "On or Off" ), AP_INIT_FLAG ( "SecStreamOutBodyInspection", cmd_stream_outbody_inspection, NULL, CMD_SCOPE_ANY, "On or Off" ), AP_INIT_FLAG ( "SecStreamInBodyInspection", cmd_stream_inbody_inspection, NULL, CMD_SCOPE_ANY, "On or Off" ), AP_INIT_TAKE1 ( "SecCookieFormat", cmd_cookie_format, NULL, CMD_SCOPE_ANY, "version of the Cookie specification to use for parsing. Possible values are 0 and 1." ), AP_INIT_TAKE1 ( "SecDataDir", cmd_data_dir, NULL, CMD_SCOPE_MAIN, "path to the persistent data storage area" // TODO ), AP_INIT_TAKE1 ( "SecDebugLog", cmd_debug_log, NULL, CMD_SCOPE_ANY, "path to the debug log file" ), AP_INIT_TAKE1 ( "SecDebugLogLevel", cmd_debug_log_level, NULL, CMD_SCOPE_ANY, "debug log level, which controls the verbosity of logging." " Use values from 0 (no logging) to 9 (a *lot* of logging)." ), AP_INIT_TAKE1 ( "SecCollectionTimeout", cmd_collection_timeout, NULL, CMD_SCOPE_ANY, "set default collections timeout. default it 3600" ), AP_INIT_TAKE1 ( "SecDefaultAction", cmd_default_action, NULL, CMD_SCOPE_ANY, "default action list" ), AP_INIT_FLAG ( "SecDisableBackendCompression", cmd_disable_backend_compression, NULL, CMD_SCOPE_ANY, "When set to On, removes the compression headers from the backend requests." ), AP_INIT_TAKE1 ( "SecGsbLookupDB", cmd_gsb_lookup_db, NULL, RSRC_CONF, "database google safe browsing" ), AP_INIT_TAKE1 ( "SecUnicodeCodePage", cmd_unicode_codepage, NULL, CMD_SCOPE_MAIN, "Unicode CodePage" ), AP_INIT_TAKE1 ( "SecUnicodeMapFile", cmd_unicode_map, NULL, CMD_SCOPE_MAIN, "Unicode Map file" ), AP_INIT_TAKE1 ( "SecGeoLookupDB", cmd_geo_lookup_db, NULL, RSRC_CONF, "database for geographical lookups module." ), AP_INIT_TAKE12 ( "SecGuardianLog", cmd_guardian_log, NULL, CMD_SCOPE_MAIN, "The filename of the filter debugging log file" ), AP_INIT_TAKE1 ( "SecMarker", cmd_marker, NULL, CMD_SCOPE_ANY, "marker for a skipAfter target" ), AP_INIT_TAKE1 ( "SecPcreMatchLimit", cmd_pcre_match_limit, NULL, CMD_SCOPE_MAIN, "PCRE match limit" ), AP_INIT_TAKE1 ( "SecPcreMatchLimitRecursion", cmd_pcre_match_limit_recursion, NULL, CMD_SCOPE_MAIN, "PCRE match limit recursion" ), AP_INIT_TAKE1 ( "SecRequestBodyAccess", cmd_request_body_access, NULL, CMD_SCOPE_ANY, "On or Off" ), AP_INIT_TAKE1 ( "SecInterceptOnError", cmd_request_intercept_on_error, NULL, CMD_SCOPE_ANY, "On or Off" ), AP_INIT_TAKE1 ( "SecRulePerfTime", cmd_rule_perf_time, NULL, CMD_SCOPE_ANY, "Threshold to log slow rules in usecs." ), AP_INIT_TAKE1 ( "SecReadStateLimit", cmd_conn_read_state_limit, NULL, CMD_SCOPE_ANY, "maximum number of threads in READ_BUSY state per ip address" ), AP_INIT_TAKE1 ( "SecWriteStateLimit", cmd_conn_write_state_limit, NULL, CMD_SCOPE_ANY, "maximum number of threads in WRITE_BUSY state per ip address" ), AP_INIT_TAKE1 ( "SecRequestBodyInMemoryLimit", cmd_request_body_inmemory_limit, NULL, CMD_SCOPE_ANY, "maximum request body size that will be placed in memory (except for POST urlencoded requests)." ), AP_INIT_TAKE1 ( "SecRequestBodyLimit", cmd_request_body_limit, NULL, CMD_SCOPE_ANY, "maximum request body size ModSecurity will accept." ), AP_INIT_TAKE1 ( "SecRequestBodyNoFilesLimit", cmd_request_body_no_files_limit, NULL, CMD_SCOPE_ANY, "maximum request body size ModSecurity will accept, but excluding the size of uploaded files." ), AP_INIT_TAKE1 ( "SecRequestEncoding", cmd_request_encoding, NULL, CMD_SCOPE_ANY, "character encoding used in request." ), AP_INIT_TAKE1 ( "SecResponseBodyAccess", cmd_response_body_access, NULL, CMD_SCOPE_ANY, "On or Off" ), AP_INIT_TAKE1 ( "SecResponseBodyLimit", cmd_response_body_limit, NULL, CMD_SCOPE_ANY, "byte limit for response body" ), AP_INIT_TAKE1 ( "SecResponseBodyLimitAction", cmd_response_body_limit_action, NULL, CMD_SCOPE_ANY, "what happens when the response body limit is reached" ), AP_INIT_TAKE1 ( "SecRequestBodyLimitAction", cmd_resquest_body_limit_action, NULL, CMD_SCOPE_ANY, "what happens when the request body limit is reached" ), AP_INIT_ITERATE ( "SecResponseBodyMimeType", cmd_response_body_mime_type, NULL, CMD_SCOPE_ANY, "adds given MIME types to the list of types that will be buffered on output" ), AP_INIT_NO_ARGS ( "SecResponseBodyMimeTypesClear", cmd_response_body_mime_types_clear, NULL, CMD_SCOPE_ANY, "clears the list of MIME types that will be buffered on output" ), #ifdef HTACCESS_CONFIG AP_INIT_TAKE23 ( "SecRule", cmd_rule, NULL, CMD_SCOPE_HTACCESS, "rule target, operator and optional action list" ), #else AP_INIT_TAKE23 ( "SecRule", cmd_rule, NULL, CMD_SCOPE_ANY, "rule target, operator and optional action list" ), #endif AP_INIT_TAKE1 ( "SecRuleEngine", cmd_rule_engine, NULL, CMD_SCOPE_ANY, "On or Off" ), AP_INIT_FLAG ( "SecRuleInheritance", cmd_rule_inheritance, NULL, CMD_SCOPE_ANY, "On or Off" ), AP_INIT_TAKE12 ( "SecRuleScript", cmd_rule_script, NULL, CMD_SCOPE_ANY, "rule script and optional actionlist" ), #ifdef HTACCESS_CONFIG AP_INIT_ITERATE ( "SecRuleRemoveById", cmd_rule_remove_by_id, NULL, CMD_SCOPE_HTACCESS, "rule ID for removal" ), AP_INIT_ITERATE ( "SecRuleRemoveByTag", cmd_rule_remove_by_tag, NULL, CMD_SCOPE_HTACCESS, "rule tag for removal" ), AP_INIT_ITERATE ( "SecRuleRemoveByMsg", cmd_rule_remove_by_msg, NULL, CMD_SCOPE_HTACCESS, "rule message for removal" ), #else AP_INIT_ITERATE ( "SecRuleRemoveById", cmd_rule_remove_by_id, NULL, CMD_SCOPE_ANY, "rule ID for removal" ), AP_INIT_ITERATE ( "SecRuleRemoveByTag", cmd_rule_remove_by_tag, NULL, CMD_SCOPE_ANY, "rule tag for removal" ), AP_INIT_ITERATE ( "SecRuleRemoveByMsg", cmd_rule_remove_by_msg, NULL, CMD_SCOPE_ANY, "rule message for removal" ), #endif AP_INIT_TAKE2 ( "SecHashMethodPm", cmd_hash_method_pm, NULL, CMD_SCOPE_ANY, "Hash method and pattern" ), AP_INIT_TAKE2 ( "SecHashMethodRx", cmd_hash_method_rx, NULL, CMD_SCOPE_ANY, "Hash method and regex" ), #ifdef HTACCESS_CONFIG AP_INIT_TAKE2 ( "SecRuleUpdateActionById", cmd_rule_update_action_by_id, NULL, CMD_SCOPE_HTACCESS, "updated action list" ), AP_INIT_TAKE23 ( "SecRuleUpdateTargetById", cmd_rule_update_target_by_id, NULL, CMD_SCOPE_HTACCESS, "updated target list" ), AP_INIT_TAKE23 ( "SecRuleUpdateTargetByTag", cmd_rule_update_target_by_tag, NULL, CMD_SCOPE_HTACCESS, "rule tag pattern and updated target list" ), AP_INIT_TAKE23 ( "SecRuleUpdateTargetByMsg", cmd_rule_update_target_by_msg, NULL, CMD_SCOPE_HTACCESS, "rule message pattern and updated target list" ), #else AP_INIT_TAKE2 ( "SecRuleUpdateActionById", cmd_rule_update_action_by_id, NULL, CMD_SCOPE_ANY, "updated action list" ), AP_INIT_TAKE23 ( "SecRuleUpdateTargetById", cmd_rule_update_target_by_id, NULL, CMD_SCOPE_ANY, "updated target list" ), AP_INIT_TAKE23 ( "SecRuleUpdateTargetByTag", cmd_rule_update_target_by_tag, NULL, CMD_SCOPE_ANY, "rule tag pattern and updated target list" ), AP_INIT_TAKE23 ( "SecRuleUpdateTargetByMsg", cmd_rule_update_target_by_msg, NULL, CMD_SCOPE_ANY, "rule message pattern and updated target list" ), #endif AP_INIT_TAKE1 ( "SecServerSignature", cmd_server_signature, NULL, CMD_SCOPE_MAIN, "the new signature of the server" ), AP_INIT_TAKE1 ( "SecTmpDir", cmd_tmp_dir, NULL, CMD_SCOPE_ANY, "path to the temporary storage area" ), AP_INIT_TAKE1 ( "SecUploadDir", cmd_upload_dir, NULL, CMD_SCOPE_ANY, "path to the file upload area" ), AP_INIT_TAKE1 ( "SecUploadFileLimit", cmd_upload_file_limit, NULL, CMD_SCOPE_ANY, "limit the number of uploaded files processed" ), AP_INIT_TAKE1 ( "SecUploadFileMode", cmd_upload_filemode, NULL, CMD_SCOPE_ANY, "octal permissions mode for uploaded files" ), AP_INIT_TAKE1 ( "SecUploadKeepFiles", cmd_upload_keep_files, NULL, CMD_SCOPE_ANY, "On or Off" ), AP_INIT_TAKE1 ( "SecWebAppId", cmd_web_app_id, NULL, CMD_SCOPE_ANY, "id" ), AP_INIT_TAKE1 ( "SecSensorId", cmd_sensor_id, NULL, CMD_SCOPE_MAIN, "sensor id" ), AP_INIT_TAKE1 ( "SecHttpBlKey", cmd_httpBl_key, NULL, CMD_SCOPE_ANY, "httpBl access key" ), AP_INIT_TAKE1 ( "SecHashEngine", cmd_hash_engine, NULL, CMD_SCOPE_ANY, "On or Off" ), AP_INIT_TAKE2 ( "SecHashKey", cmd_hash_key, NULL, CMD_SCOPE_ANY, "Set Hash key" ), AP_INIT_TAKE1 ( "SecHashParam", cmd_hash_param, NULL, CMD_SCOPE_ANY, "Set Hash parameter" ), { NULL } };
./CrossVul/dataset_final_sorted/CWE-20/c/bad_5608_0
crossvul-cpp_data_good_721_0
/* Copyright (C) 2007-2013 Open Information Security Foundation * * You can copy, redistribute or modify this Program under the terms of * the GNU General Public License version 2 as published by the Free * Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * version 2 along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ /** * \ingroup decode * * @{ */ /** * \file * * \author Victor Julien <victor@inliniac.net> * * Decode IPv6 */ #include "suricata-common.h" #include "packet-queue.h" #include "decode.h" #include "decode-ipv6.h" #include "decode-icmpv6.h" #include "decode-events.h" #include "defrag.h" #include "pkt-var.h" #include "util-debug.h" #include "util-print.h" #include "util-unittest.h" #include "util-profiling.h" #include "host.h" #define IPV6_EXTHDRS ip6eh.ip6_exthdrs #define IPV6_EH_CNT ip6eh.ip6_exthdrs_cnt /** * \brief Function to decode IPv4 in IPv6 packets * */ static void DecodeIPv4inIPv6(ThreadVars *tv, DecodeThreadVars *dtv, Packet *p, uint8_t *pkt, uint16_t plen, PacketQueue *pq) { if (unlikely(plen < IPV4_HEADER_LEN)) { ENGINE_SET_INVALID_EVENT(p, IPV4_IN_IPV6_PKT_TOO_SMALL); return; } if (IP_GET_RAW_VER(pkt) == 4) { if (pq != NULL) { Packet *tp = PacketTunnelPktSetup(tv, dtv, p, pkt, plen, DECODE_TUNNEL_IPV4, pq); if (tp != NULL) { PKT_SET_SRC(tp, PKT_SRC_DECODER_IPV6); /* add the tp to the packet queue. */ PacketEnqueue(pq,tp); StatsIncr(tv, dtv->counter_ipv4inipv6); return; } } } else { ENGINE_SET_EVENT(p, IPV4_IN_IPV6_WRONG_IP_VER); } return; } /** * \brief Function to decode IPv6 in IPv6 packets * */ static int DecodeIP6inIP6(ThreadVars *tv, DecodeThreadVars *dtv, Packet *p, uint8_t *pkt, uint16_t plen, PacketQueue *pq) { if (unlikely(plen < IPV6_HEADER_LEN)) { ENGINE_SET_INVALID_EVENT(p, IPV6_IN_IPV6_PKT_TOO_SMALL); return TM_ECODE_FAILED; } if (IP_GET_RAW_VER(pkt) == 6) { if (unlikely(pq != NULL)) { Packet *tp = PacketTunnelPktSetup(tv, dtv, p, pkt, plen, DECODE_TUNNEL_IPV6, pq); if (tp != NULL) { PKT_SET_SRC(tp, PKT_SRC_DECODER_IPV6); PacketEnqueue(pq,tp); StatsIncr(tv, dtv->counter_ipv6inipv6); } } } else { ENGINE_SET_EVENT(p, IPV6_IN_IPV6_WRONG_IP_VER); } return TM_ECODE_OK; } #ifndef UNITTESTS // ugly, but we need this in defrag tests static inline #endif void DecodeIPV6FragHeader(Packet *p, uint8_t *pkt, uint16_t hdrextlen, uint16_t plen, uint16_t prev_hdrextlen) { uint16_t frag_offset = (*(pkt + 2) << 8 | *(pkt + 3)) & 0xFFF8; int frag_morefrags = (*(pkt + 2) << 8 | *(pkt + 3)) & 0x0001; p->ip6eh.fh_offset = frag_offset; p->ip6eh.fh_more_frags_set = frag_morefrags ? TRUE : FALSE; p->ip6eh.fh_nh = *pkt; uint32_t fh_id; memcpy(&fh_id, pkt+4, 4); p->ip6eh.fh_id = SCNtohl(fh_id); SCLogDebug("IPV6 FH: offset %u, mf %s, nh %u, id %u/%x", p->ip6eh.fh_offset, p->ip6eh.fh_more_frags_set ? "true" : "false", p->ip6eh.fh_nh, p->ip6eh.fh_id, p->ip6eh.fh_id); // store header offset, data offset uint16_t frag_hdr_offset = (uint16_t)(pkt - GET_PKT_DATA(p)); uint16_t data_offset = (uint16_t)(frag_hdr_offset + hdrextlen); uint16_t data_len = plen - hdrextlen; p->ip6eh.fh_header_offset = frag_hdr_offset; p->ip6eh.fh_data_offset = data_offset; p->ip6eh.fh_data_len = data_len; /* if we have a prev hdr, store the type and offset of it */ if (prev_hdrextlen) { p->ip6eh.fh_prev_hdr_offset = frag_hdr_offset - prev_hdrextlen; } SCLogDebug("IPV6 FH: frag_hdr_offset %u, data_offset %u, data_len %u", p->ip6eh.fh_header_offset, p->ip6eh.fh_data_offset, p->ip6eh.fh_data_len); } static void DecodeIPV6ExtHdrs(ThreadVars *tv, DecodeThreadVars *dtv, Packet *p, uint8_t *pkt, uint16_t len, PacketQueue *pq) { SCEnter(); uint8_t *orig_pkt = pkt; uint8_t nh = IPV6_GET_NH(p); /* careful, 0 is actually a real type */ uint16_t hdrextlen = 0; uint16_t plen = len; char dstopts = 0; char exthdr_fh_done = 0; int hh = 0; int rh = 0; int eh = 0; int ah = 0; while(1) { if (nh == IPPROTO_NONE) { if (plen > 0) { /* No upper layer, but we do have data. Suspicious. */ ENGINE_SET_EVENT(p, IPV6_DATA_AFTER_NONE_HEADER); } SCReturn; } if (plen < 2) { /* minimal needed in a hdr */ ENGINE_SET_INVALID_EVENT(p, IPV6_TRUNC_EXTHDR); SCReturn; } switch(nh) { case IPPROTO_TCP: IPV6_SET_L4PROTO(p,nh); DecodeTCP(tv, dtv, p, pkt, plen, pq); SCReturn; case IPPROTO_UDP: IPV6_SET_L4PROTO(p,nh); DecodeUDP(tv, dtv, p, pkt, plen, pq); SCReturn; case IPPROTO_ICMPV6: IPV6_SET_L4PROTO(p,nh); DecodeICMPV6(tv, dtv, p, pkt, plen, pq); SCReturn; case IPPROTO_SCTP: IPV6_SET_L4PROTO(p,nh); DecodeSCTP(tv, dtv, p, pkt, plen, pq); SCReturn; case IPPROTO_ROUTING: IPV6_SET_L4PROTO(p,nh); hdrextlen = 8 + (*(pkt+1) * 8); /* 8 bytes + length in 8 octet units */ SCLogDebug("hdrextlen %"PRIu8, hdrextlen); if (hdrextlen > plen) { ENGINE_SET_INVALID_EVENT(p, IPV6_TRUNC_EXTHDR); SCReturn; } if (rh) { ENGINE_SET_EVENT(p, IPV6_EXTHDR_DUPL_RH); /* skip past this extension so we can continue parsing the rest * of the packet */ nh = *pkt; pkt += hdrextlen; plen -= hdrextlen; break; } rh = 1; IPV6_EXTHDR_SET_RH(p); uint8_t ip6rh_type = *(pkt + 2); if (ip6rh_type == 0) { ENGINE_SET_EVENT(p, IPV6_EXTHDR_RH_TYPE_0); } p->ip6eh.rh_type = ip6rh_type; nh = *pkt; pkt += hdrextlen; plen -= hdrextlen; break; case IPPROTO_HOPOPTS: case IPPROTO_DSTOPTS: { IPV6OptHAO hao_s, *hao = &hao_s; IPV6OptRA ra_s, *ra = &ra_s; IPV6OptJumbo jumbo_s, *jumbo = &jumbo_s; uint16_t optslen = 0; IPV6_SET_L4PROTO(p,nh); hdrextlen = (*(pkt+1) + 1) << 3; if (hdrextlen > plen) { ENGINE_SET_INVALID_EVENT(p, IPV6_TRUNC_EXTHDR); SCReturn; } uint8_t *ptr = pkt + 2; /* +2 to go past nxthdr and len */ /* point the pointers to right structures * in Packet. */ if (nh == IPPROTO_HOPOPTS) { if (hh) { ENGINE_SET_EVENT(p, IPV6_EXTHDR_DUPL_HH); /* skip past this extension so we can continue parsing the rest * of the packet */ nh = *pkt; pkt += hdrextlen; plen -= hdrextlen; break; } hh = 1; optslen = ((*(pkt + 1) + 1 ) << 3) - 2; } else if (nh == IPPROTO_DSTOPTS) { if (dstopts == 0) { optslen = ((*(pkt + 1) + 1 ) << 3) - 2; dstopts = 1; } else if (dstopts == 1) { optslen = ((*(pkt + 1) + 1 ) << 3) - 2; dstopts = 2; } else { ENGINE_SET_EVENT(p, IPV6_EXTHDR_DUPL_DH); /* skip past this extension so we can continue parsing the rest * of the packet */ nh = *pkt; pkt += hdrextlen; plen -= hdrextlen; break; } } if (optslen > plen) { /* since the packet is long enough (we checked * plen against hdrlen, the optlen must be malformed. */ ENGINE_SET_INVALID_EVENT(p, IPV6_EXTHDR_INVALID_OPTLEN); /* skip past this extension so we can continue parsing the rest * of the packet */ nh = *pkt; pkt += hdrextlen; plen -= hdrextlen; break; } /** \todo move into own function to loaded on demand */ uint16_t padn_cnt = 0; uint16_t other_cnt = 0; uint16_t offset = 0; while(offset < optslen) { if (*ptr == IPV6OPT_PAD1) { padn_cnt++; offset++; ptr++; continue; } if (offset + 1 >= optslen) { ENGINE_SET_INVALID_EVENT(p, IPV6_EXTHDR_INVALID_OPTLEN); break; } /* length field for each opt */ uint8_t ip6_optlen = *(ptr + 1); /* see if the optlen from the packet fits the total optslen */ if ((offset + 1 + ip6_optlen) > optslen) { ENGINE_SET_INVALID_EVENT(p, IPV6_EXTHDR_INVALID_OPTLEN); break; } if (*ptr == IPV6OPT_PADN) /* PadN */ { //printf("PadN option\n"); padn_cnt++; /* a zero padN len would be weird */ if (ip6_optlen == 0) ENGINE_SET_EVENT(p, IPV6_EXTHDR_ZERO_LEN_PADN); } else if (*ptr == IPV6OPT_RA) /* RA */ { ra->ip6ra_type = *(ptr); ra->ip6ra_len = ip6_optlen; if (ip6_optlen < sizeof(ra->ip6ra_value)) { ENGINE_SET_INVALID_EVENT(p, IPV6_EXTHDR_INVALID_OPTLEN); break; } memcpy(&ra->ip6ra_value, (ptr + 2), sizeof(ra->ip6ra_value)); ra->ip6ra_value = SCNtohs(ra->ip6ra_value); //printf("RA option: type %" PRIu32 " len %" PRIu32 " value %" PRIu32 "\n", // ra->ip6ra_type, ra->ip6ra_len, ra->ip6ra_value); other_cnt++; } else if (*ptr == IPV6OPT_JUMBO) /* Jumbo */ { jumbo->ip6j_type = *(ptr); jumbo->ip6j_len = ip6_optlen; if (ip6_optlen < sizeof(jumbo->ip6j_payload_len)) { ENGINE_SET_INVALID_EVENT(p, IPV6_EXTHDR_INVALID_OPTLEN); break; } memcpy(&jumbo->ip6j_payload_len, (ptr+2), sizeof(jumbo->ip6j_payload_len)); jumbo->ip6j_payload_len = SCNtohl(jumbo->ip6j_payload_len); //printf("Jumbo option: type %" PRIu32 " len %" PRIu32 " payload len %" PRIu32 "\n", // jumbo->ip6j_type, jumbo->ip6j_len, jumbo->ip6j_payload_len); } else if (*ptr == IPV6OPT_HAO) /* HAO */ { hao->ip6hao_type = *(ptr); hao->ip6hao_len = ip6_optlen; if (ip6_optlen < sizeof(hao->ip6hao_hoa)) { ENGINE_SET_INVALID_EVENT(p, IPV6_EXTHDR_INVALID_OPTLEN); break; } memcpy(&hao->ip6hao_hoa, (ptr+2), sizeof(hao->ip6hao_hoa)); //printf("HAO option: type %" PRIu32 " len %" PRIu32 " ", // hao->ip6hao_type, hao->ip6hao_len); //char addr_buf[46]; //PrintInet(AF_INET6, (char *)&(hao->ip6hao_hoa), // addr_buf,sizeof(addr_buf)); //printf("home addr %s\n", addr_buf); other_cnt++; } else { if (nh == IPPROTO_HOPOPTS) ENGINE_SET_EVENT(p, IPV6_HOPOPTS_UNKNOWN_OPT); else ENGINE_SET_EVENT(p, IPV6_DSTOPTS_UNKNOWN_OPT); other_cnt++; } uint16_t optlen = (*(ptr + 1) + 2); ptr += optlen; /* +2 for opt type and opt len fields */ offset += optlen; } /* flag packets that have only padding */ if (padn_cnt > 0 && other_cnt == 0) { if (nh == IPPROTO_HOPOPTS) ENGINE_SET_EVENT(p, IPV6_HOPOPTS_ONLY_PADDING); else ENGINE_SET_EVENT(p, IPV6_DSTOPTS_ONLY_PADDING); } nh = *pkt; pkt += hdrextlen; plen -= hdrextlen; break; } case IPPROTO_FRAGMENT: { IPV6_SET_L4PROTO(p,nh); /* store the offset of this extension into the packet * past the ipv6 header. We use it in defrag for creating * a defragmented packet without the frag header */ if (exthdr_fh_done == 0) { p->ip6eh.fh_offset = pkt - orig_pkt; exthdr_fh_done = 1; } uint16_t prev_hdrextlen = hdrextlen; hdrextlen = sizeof(IPV6FragHdr); if (hdrextlen > plen) { ENGINE_SET_INVALID_EVENT(p, IPV6_TRUNC_EXTHDR); SCReturn; } /* for the frag header, the length field is reserved */ if (*(pkt + 1) != 0) { ENGINE_SET_EVENT(p, IPV6_FH_NON_ZERO_RES_FIELD); /* non fatal, lets try to continue */ } if (IPV6_EXTHDR_ISSET_FH(p)) { ENGINE_SET_EVENT(p, IPV6_EXTHDR_DUPL_FH); nh = *pkt; pkt += hdrextlen; plen -= hdrextlen; break; } /* set the header flag first */ IPV6_EXTHDR_SET_FH(p); /* parse the header and setup the vars */ DecodeIPV6FragHeader(p, pkt, hdrextlen, plen, prev_hdrextlen); /* if FH has offset 0 and no more fragments are coming, we * parse this packet further right away, no defrag will be * needed. It is a useless FH then though, so we do set an * decoder event. */ if (p->ip6eh.fh_more_frags_set == 0 && p->ip6eh.fh_offset == 0) { ENGINE_SET_EVENT(p, IPV6_EXTHDR_USELESS_FH); nh = *pkt; pkt += hdrextlen; plen -= hdrextlen; break; } /* the rest is parsed upon reassembly */ p->flags |= PKT_IS_FRAGMENT; SCReturn; } case IPPROTO_ESP: { IPV6_SET_L4PROTO(p,nh); hdrextlen = sizeof(IPV6EspHdr); if (hdrextlen > plen) { ENGINE_SET_INVALID_EVENT(p, IPV6_TRUNC_EXTHDR); SCReturn; } if (eh) { ENGINE_SET_EVENT(p, IPV6_EXTHDR_DUPL_EH); SCReturn; } eh = 1; nh = IPPROTO_NONE; pkt += hdrextlen; plen -= hdrextlen; break; } case IPPROTO_AH: { IPV6_SET_L4PROTO(p,nh); /* we need the header as a minimum */ hdrextlen = sizeof(IPV6AuthHdr); /* the payload len field is the number of extra 4 byte fields, * IPV6AuthHdr already contains the first */ if (*(pkt+1) > 0) hdrextlen += ((*(pkt+1) - 1) * 4); SCLogDebug("hdrextlen %"PRIu8, hdrextlen); if (hdrextlen > plen) { ENGINE_SET_INVALID_EVENT(p, IPV6_TRUNC_EXTHDR); SCReturn; } IPV6AuthHdr *ahhdr = (IPV6AuthHdr *)pkt; if (ahhdr->ip6ah_reserved != 0x0000) { ENGINE_SET_EVENT(p, IPV6_EXTHDR_AH_RES_NOT_NULL); } if (ah) { ENGINE_SET_EVENT(p, IPV6_EXTHDR_DUPL_AH); nh = *pkt; pkt += hdrextlen; plen -= hdrextlen; break; } ah = 1; nh = *pkt; pkt += hdrextlen; plen -= hdrextlen; break; } case IPPROTO_IPIP: IPV6_SET_L4PROTO(p,nh); DecodeIPv4inIPv6(tv, dtv, p, pkt, plen, pq); SCReturn; /* none, last header */ case IPPROTO_NONE: IPV6_SET_L4PROTO(p,nh); SCReturn; case IPPROTO_ICMP: ENGINE_SET_EVENT(p,IPV6_WITH_ICMPV4); SCReturn; /* no parsing yet, just skip it */ case IPPROTO_MH: case IPPROTO_HIP: case IPPROTO_SHIM6: hdrextlen = 8 + (*(pkt+1) * 8); /* 8 bytes + length in 8 octet units */ if (hdrextlen > plen) { ENGINE_SET_INVALID_EVENT(p, IPV6_TRUNC_EXTHDR); SCReturn; } nh = *pkt; pkt += hdrextlen; plen -= hdrextlen; break; default: ENGINE_SET_EVENT(p, IPV6_UNKNOWN_NEXT_HEADER); IPV6_SET_L4PROTO(p,nh); SCReturn; } } SCReturn; } static int DecodeIPV6Packet (ThreadVars *tv, DecodeThreadVars *dtv, Packet *p, uint8_t *pkt, uint16_t len) { if (unlikely(len < IPV6_HEADER_LEN)) { return -1; } if (unlikely(IP_GET_RAW_VER(pkt) != 6)) { SCLogDebug("wrong ip version %" PRIu8 "",IP_GET_RAW_VER(pkt)); ENGINE_SET_INVALID_EVENT(p, IPV6_WRONG_IP_VER); return -1; } p->ip6h = (IPV6Hdr *)pkt; if (unlikely(len < (IPV6_HEADER_LEN + IPV6_GET_PLEN(p)))) { ENGINE_SET_INVALID_EVENT(p, IPV6_TRUNC_PKT); return -1; } SET_IPV6_SRC_ADDR(p,&p->src); SET_IPV6_DST_ADDR(p,&p->dst); return 0; } int DecodeIPV6(ThreadVars *tv, DecodeThreadVars *dtv, Packet *p, uint8_t *pkt, uint16_t len, PacketQueue *pq) { int ret; StatsIncr(tv, dtv->counter_ipv6); /* do the actual decoding */ ret = DecodeIPV6Packet (tv, dtv, p, pkt, len); if (unlikely(ret < 0)) { p->ip6h = NULL; return TM_ECODE_FAILED; } #ifdef DEBUG if (SCLogDebugEnabled()) { /* only convert the addresses if debug is really enabled */ /* debug print */ char s[46], d[46]; PrintInet(AF_INET6, (const void *)GET_IPV6_SRC_ADDR(p), s, sizeof(s)); PrintInet(AF_INET6, (const void *)GET_IPV6_DST_ADDR(p), d, sizeof(d)); SCLogDebug("IPV6 %s->%s - CLASS: %" PRIu32 " FLOW: %" PRIu32 " NH: %" PRIu32 " PLEN: %" PRIu32 " HLIM: %" PRIu32 "", s,d, IPV6_GET_CLASS(p), IPV6_GET_FLOW(p), IPV6_GET_NH(p), IPV6_GET_PLEN(p), IPV6_GET_HLIM(p)); } #endif /* DEBUG */ /* now process the Ext headers and/or the L4 Layer */ switch(IPV6_GET_NH(p)) { case IPPROTO_TCP: IPV6_SET_L4PROTO (p, IPPROTO_TCP); DecodeTCP(tv, dtv, p, pkt + IPV6_HEADER_LEN, IPV6_GET_PLEN(p), pq); return TM_ECODE_OK; case IPPROTO_UDP: IPV6_SET_L4PROTO (p, IPPROTO_UDP); DecodeUDP(tv, dtv, p, pkt + IPV6_HEADER_LEN, IPV6_GET_PLEN(p), pq); return TM_ECODE_OK; case IPPROTO_ICMPV6: IPV6_SET_L4PROTO (p, IPPROTO_ICMPV6); DecodeICMPV6(tv, dtv, p, pkt + IPV6_HEADER_LEN, IPV6_GET_PLEN(p), pq); return TM_ECODE_OK; case IPPROTO_SCTP: IPV6_SET_L4PROTO (p, IPPROTO_SCTP); DecodeSCTP(tv, dtv, p, pkt + IPV6_HEADER_LEN, IPV6_GET_PLEN(p), pq); return TM_ECODE_OK; case IPPROTO_IPIP: IPV6_SET_L4PROTO(p, IPPROTO_IPIP); DecodeIPv4inIPv6(tv, dtv, p, pkt + IPV6_HEADER_LEN, IPV6_GET_PLEN(p), pq); return TM_ECODE_OK; case IPPROTO_IPV6: DecodeIP6inIP6(tv, dtv, p, pkt + IPV6_HEADER_LEN, IPV6_GET_PLEN(p), pq); return TM_ECODE_OK; case IPPROTO_FRAGMENT: case IPPROTO_HOPOPTS: case IPPROTO_ROUTING: case IPPROTO_NONE: case IPPROTO_DSTOPTS: case IPPROTO_AH: case IPPROTO_ESP: case IPPROTO_MH: case IPPROTO_HIP: case IPPROTO_SHIM6: DecodeIPV6ExtHdrs(tv, dtv, p, pkt + IPV6_HEADER_LEN, IPV6_GET_PLEN(p), pq); break; case IPPROTO_ICMP: ENGINE_SET_EVENT(p,IPV6_WITH_ICMPV4); break; default: ENGINE_SET_EVENT(p, IPV6_UNKNOWN_NEXT_HEADER); IPV6_SET_L4PROTO (p, IPV6_GET_NH(p)); break; } p->proto = IPV6_GET_L4PROTO (p); /* Pass to defragger if a fragment. */ if (IPV6_EXTHDR_ISSET_FH(p)) { Packet *rp = Defrag(tv, dtv, p, pq); if (rp != NULL) { PacketEnqueue(pq,rp); } } return TM_ECODE_OK; } #ifdef UNITTESTS /** * \test fragment decoding */ static int DecodeIPV6FragTest01 (void) { uint8_t raw_frag1[] = { 0x60, 0x0f, 0x1a, 0xcf, 0x05, 0xa8, 0x2c, 0x36, 0x20, 0x01, 0x04, 0x70, 0x00, 0x01, 0x00, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x20, 0x01, 0x09, 0x80, 0x32, 0xb2, 0x00, 0x01, 0x2e, 0x41, 0x38, 0xff, 0xfe, 0xa7, 0xea, 0xeb, 0x06, 0x00, 0x00, 0x01, 0xdf, 0xf8, 0x11, 0xd7, 0x00, 0x50, 0xa6, 0x5c, 0xcc, 0xd7, 0x28, 0x9f, 0xc3, 0x34, 0xc6, 0x58, 0x80, 0x10, 0x20, 0x13, 0x18, 0x1f, 0x00, 0x00, 0x01, 0x01, 0x08, 0x0a, 0xcd, 0xf9, 0x3a, 0x41, 0x00, 0x1a, 0x91, 0x8a, 0x48, 0x54, 0x54, 0x50, 0x2f, 0x31, 0x2e, 0x31, 0x20, 0x32, 0x30, 0x30, 0x20, 0x4f, 0x4b, 0x0d, 0x0a, 0x44, 0x61, 0x74, 0x65, 0x3a, 0x20, 0x46, 0x72, 0x69, 0x2c, 0x20, 0x30, 0x32, 0x20, 0x44, 0x65, 0x63, 0x20, 0x32, 0x30, 0x31, 0x31, 0x20, 0x30, 0x38, 0x3a, 0x33, 0x32, 0x3a, 0x35, 0x37, 0x20, 0x47, 0x4d, 0x54, 0x0d, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x3a, 0x20, 0x41, 0x70, 0x61, 0x63, 0x68, 0x65, 0x0d, 0x0a, 0x43, 0x61, 0x63, 0x68, 0x65, 0x2d, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x3a, 0x20, 0x6e, 0x6f, 0x2d, 0x63, 0x61, 0x63, 0x68, 0x65, 0x0d, 0x0a, 0x50, 0x72, 0x61, 0x67, 0x6d, 0x61, 0x3a, 0x20, 0x6e, 0x6f, 0x2d, 0x63, 0x61, 0x63, 0x68, 0x65, 0x0d, 0x0a, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x3a, 0x20, 0x54, 0x68, 0x75, 0x2c, 0x20, 0x30, 0x31, 0x20, 0x4a, 0x61, 0x6e, 0x20, 0x31, 0x39, 0x37, 0x31, 0x20, 0x30, 0x30, 0x3a, 0x30, 0x30, 0x3a, 0x30, 0x30, 0x20, 0x47, 0x4d, 0x54, 0x0d, 0x0a, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x3a, 0x20, 0x31, 0x35, 0x39, 0x39, 0x0d, 0x0a, 0x4b, 0x65, 0x65, 0x70, 0x2d, 0x41, 0x6c, 0x69, 0x76, 0x65, 0x3a, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x3d, 0x35, 0x2c, 0x20, 0x6d, 0x61, 0x78, 0x3d, 0x39, 0x39, 0x0d, 0x0a, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x20, 0x4b, 0x65, 0x65, 0x70, 0x2d, 0x41, 0x6c, 0x69, 0x76, 0x65, 0x0d, 0x0a, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x20, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6a, 0x61, 0x76, 0x61, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x3b, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x3d, 0x61, 0x73, 0x63, 0x69, 0x69, 0x0d, 0x0a, 0x0d, 0x0a, 0x5f, 0x6a, 0x71, 0x6a, 0x73, 0x70, 0x28, 0x7b, 0x22, 0x69, 0x70, 0x22, 0x3a, 0x22, 0x32, 0x30, 0x30, 0x31, 0x3a, 0x39, 0x38, 0x30, 0x3a, 0x33, 0x32, 0x62, 0x32, 0x3a, 0x31, 0x3a, 0x32, 0x65, 0x34, 0x31, 0x3a, 0x33, 0x38, 0x66, 0x66, 0x3a, 0x66, 0x65, 0x61, 0x37, 0x3a, 0x65, 0x61, 0x65, 0x62, 0x22, 0x2c, 0x22, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3a, 0x22, 0x69, 0x70, 0x76, 0x36, 0x22, 0x2c, 0x22, 0x73, 0x75, 0x62, 0x74, 0x79, 0x70, 0x65, 0x22, 0x3a, 0x22, 0x22, 0x2c, 0x22, 0x76, 0x69, 0x61, 0x22, 0x3a, 0x22, 0x22, 0x2c, 0x22, 0x70, 0x61, 0x64, 0x64, 0x69, 0x6e, 0x67, 0x22, 0x3a, 0x22, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, }; uint8_t raw_frag2[] = { 0x60, 0x0f, 0x1a, 0xcf, 0x00, 0x1c, 0x2c, 0x36, 0x20, 0x01, 0x04, 0x70, 0x00, 0x01, 0x00, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x20, 0x01, 0x09, 0x80, 0x32, 0xb2, 0x00, 0x01, 0x2e, 0x41, 0x38, 0xff, 0xfe, 0xa7, 0xea, 0xeb, 0x06, 0x00, 0x05, 0xa0, 0xdf, 0xf8, 0x11, 0xd7, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, }; Packet *pkt; Packet *p1 = PacketGetFromAlloc(); if (unlikely(p1 == NULL)) return 0; Packet *p2 = PacketGetFromAlloc(); if (unlikely(p2 == NULL)) { SCFree(p1); return 0; } ThreadVars tv; DecodeThreadVars dtv; int result = 0; PacketQueue pq; FlowInitConfig(FLOW_QUIET); DefragInit(); memset(&pq, 0, sizeof(PacketQueue)); memset(&tv, 0, sizeof(ThreadVars)); memset(&dtv, 0, sizeof(DecodeThreadVars)); PacketCopyData(p1, raw_frag1, sizeof(raw_frag1)); PacketCopyData(p2, raw_frag2, sizeof(raw_frag2)); DecodeIPV6(&tv, &dtv, p1, GET_PKT_DATA(p1), GET_PKT_LEN(p1), &pq); if (!(IPV6_EXTHDR_ISSET_FH(p1))) { printf("ipv6 frag header not detected: "); goto end; } DecodeIPV6(&tv, &dtv, p2, GET_PKT_DATA(p2), GET_PKT_LEN(p2), &pq); if (!(IPV6_EXTHDR_ISSET_FH(p2))) { printf("ipv6 frag header not detected: "); goto end; } if (pq.len != 1) { printf("no reassembled packet: "); goto end; } result = 1; end: PACKET_RECYCLE(p1); PACKET_RECYCLE(p2); SCFree(p1); SCFree(p2); pkt = PacketDequeue(&pq); while (pkt != NULL) { PACKET_RECYCLE(pkt); SCFree(pkt); pkt = PacketDequeue(&pq); } DefragDestroy(); FlowShutdown(); return result; } /** * \test routing header decode */ static int DecodeIPV6RouteTest01 (void) { uint8_t raw_pkt1[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x2b, 0x40, 0x20, 0x01, 0xaa, 0xaa, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x20, 0x01, 0xaa, 0xaa, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb2, 0xed, 0x00, 0x50, 0x1b, 0xc7, 0x6a, 0xdf, 0x00, 0x00, 0x00, 0x00, 0x50, 0x02, 0x20, 0x00, 0xfa, 0x87, 0x00, 0x00, }; Packet *p1 = PacketGetFromAlloc(); FAIL_IF(unlikely(p1 == NULL)); ThreadVars tv; DecodeThreadVars dtv; PacketQueue pq; FlowInitConfig(FLOW_QUIET); memset(&pq, 0, sizeof(PacketQueue)); memset(&tv, 0, sizeof(ThreadVars)); memset(&dtv, 0, sizeof(DecodeThreadVars)); PacketCopyData(p1, raw_pkt1, sizeof(raw_pkt1)); DecodeIPV6(&tv, &dtv, p1, GET_PKT_DATA(p1), GET_PKT_LEN(p1), &pq); FAIL_IF (!(IPV6_EXTHDR_ISSET_RH(p1))); FAIL_IF (p1->ip6eh.rh_type != 0); PACKET_RECYCLE(p1); SCFree(p1); FlowShutdown(); PASS; } /** * \test HOP header decode */ static int DecodeIPV6HopTest01 (void) { uint8_t raw_pkt1[] = { 0x60,0x00,0x00,0x00,0x00,0x20,0x00,0x01,0xfe,0x80,0x00,0x00,0x00,0x00,0x00,0x00, 0x02,0x0f,0xfe,0xff,0xfe,0x98,0x3d,0x01,0xff,0x02,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x3a,0x00,0xff, /* 0xff is a nonsene opt */ 0x02,0x00,0x00,0x00,0x00, 0x82,0x00,0x1c,0x6f,0x27,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 }; Packet *p1 = PacketGetFromAlloc(); FAIL_IF(unlikely(p1 == NULL)); ThreadVars tv; DecodeThreadVars dtv; PacketQueue pq; FlowInitConfig(FLOW_QUIET); memset(&pq, 0, sizeof(PacketQueue)); memset(&tv, 0, sizeof(ThreadVars)); memset(&dtv, 0, sizeof(DecodeThreadVars)); PacketCopyData(p1, raw_pkt1, sizeof(raw_pkt1)); DecodeIPV6(&tv, &dtv, p1, GET_PKT_DATA(p1), GET_PKT_LEN(p1), &pq); FAIL_IF (!(ENGINE_ISSET_EVENT(p1, IPV6_HOPOPTS_UNKNOWN_OPT))); PACKET_RECYCLE(p1); SCFree(p1); FlowShutdown(); PASS; } #endif /* UNITTESTS */ /** * \brief this function registers unit tests for IPV6 decoder */ void DecodeIPV6RegisterTests(void) { #ifdef UNITTESTS UtRegisterTest("DecodeIPV6FragTest01", DecodeIPV6FragTest01); UtRegisterTest("DecodeIPV6RouteTest01", DecodeIPV6RouteTest01); UtRegisterTest("DecodeIPV6HopTest01", DecodeIPV6HopTest01); #endif /* UNITTESTS */ } /** * @} */
./CrossVul/dataset_final_sorted/CWE-20/c/good_721_0
crossvul-cpp_data_good_907_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % CCC U U TTTTT % % C U U T % % C U U T % % C U U T % % CCC UUU T % % % % % % Read DR Halo Image Format % % % % Software Design % % Jaroslav Fojtik % % June 2000 % % % % % % Permission is hereby granted, free of charge, to any person obtaining a % % copy of this software and associated documentation files ("ImageMagick"), % % to deal in ImageMagick without restriction, including without limitation % % the rights to use, copy, modify, merge, publish, distribute, sublicense, % % and/or sell copies of ImageMagick, and to permit persons to whom the % % ImageMagick is furnished to do so, subject to the following conditions: % % % % The above copyright notice and this permission notice shall be included in % % all copies or substantial portions of ImageMagick. % % % % The software is provided "as is", without warranty of any kind, express or % % implied, including but not limited to the warranties of merchantability, % % fitness for a particular purpose and noninfringement. In no event shall % % ImageMagick Studio be liable for any claim, damages or other liability, % % whether in an action of contract, tort or otherwise, arising from, out of % % or in connection with ImageMagick or the use or other dealings in % % ImageMagick. % % % % Except as contained in this notice, the name of the ImageMagick Studio % % shall not be used in advertising or otherwise to promote the sale, use or % % other dealings in ImageMagick without prior written authorization from the % % ImageMagick Studio. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colormap-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/pixel-accessor.h" #include "magick/quantum-private.h" #include "magick/static.h" #include "magick/string_.h" #include "magick/module.h" #include "magick/utility.h" #include "magick/utility-private.h" typedef struct { unsigned Width; unsigned Height; unsigned Reserved; } CUTHeader; typedef struct { char FileId[2]; unsigned Version; unsigned Size; char FileType; char SubType; unsigned BoardID; unsigned GraphicsMode; unsigned MaxIndex; unsigned MaxRed; unsigned MaxGreen; unsigned MaxBlue; char PaletteId[20]; } CUTPalHeader; static MagickBooleanType InsertRow(int bpp,unsigned char *p,ssize_t y, Image *image) { ExceptionInfo *exception; int bit; ssize_t x; register PixelPacket *q; IndexPacket index; register IndexPacket *indexes; exception=(&image->exception); q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) return(MagickFalse); indexes=GetAuthenticIndexQueue(image); switch (bpp) { case 1: /* Convert bitmap scanline. */ { for (x=0; x < ((ssize_t) image->columns-7); x+=8) { for (bit=0; bit < 8; bit++) { index=((*p) & (0x80 >> bit) ? 0x01 : 0x00); SetPixelIndex(indexes+x+bit,index); if (index < image->colors) SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; } p++; } if ((image->columns % 8) != 0) { for (bit=0; bit < (ssize_t) (image->columns % 8); bit++) { index=((*p) & (0x80 >> bit) ? 0x01 : 0x00); SetPixelIndex(indexes+x+bit,index); if (index < image->colors) SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; } p++; } break; } case 2: /* Convert PseudoColor scanline. */ { if ((image->storage_class != PseudoClass) || (indexes == (IndexPacket *) NULL)) break; for (x=0; x < ((ssize_t) image->columns-3); x+=4) { index=ConstrainColormapIndex(image,(*p >> 6) & 0x3); SetPixelIndex(indexes+x,index); if (index < image->colors) SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; index=ConstrainColormapIndex(image,(*p >> 4) & 0x3); SetPixelIndex(indexes+x,index); if (index < image->colors) SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; index=ConstrainColormapIndex(image,(*p >> 2) & 0x3); SetPixelIndex(indexes+x,index); if (index < image->colors) SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; index=ConstrainColormapIndex(image,(*p) & 0x3); SetPixelIndex(indexes+x+1,index); if (index < image->colors) SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; p++; } if ((image->columns % 4) != 0) { index=ConstrainColormapIndex(image,(*p >> 6) & 0x3); SetPixelIndex(indexes+x,index); if (index < image->colors) SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; if ((image->columns % 4) > 1) { index=ConstrainColormapIndex(image,(*p >> 4) & 0x3); SetPixelIndex(indexes+x,index); if (index < image->colors) SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; if ((image->columns % 4) > 2) { index=ConstrainColormapIndex(image,(*p >> 2) & 0x3); SetPixelIndex(indexes+x,index); if (index < image->colors) SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; } } p++; } break; } case 4: /* Convert PseudoColor scanline. */ { for (x=0; x < ((ssize_t) image->columns-1); x+=2) { index=ConstrainColormapIndex(image,(*p >> 4) & 0x0f); SetPixelIndex(indexes+x,index); if (index < image->colors) SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; index=ConstrainColormapIndex(image,(*p) & 0x0f); SetPixelIndex(indexes+x+1,index); if (index < image->colors) SetPixelRGBO(q,image->colormap+(ssize_t) index); p++; q++; } if ((image->columns % 2) != 0) { index=ConstrainColormapIndex(image,(*p >> 4) & 0x0f); SetPixelIndex(indexes+x,index); if (index < image->colors) SetPixelRGBO(q,image->colormap+(ssize_t) index); p++; q++; } break; } case 8: /* Convert PseudoColor scanline. */ { for (x=0; x < (ssize_t) image->columns; x++) { index=ConstrainColormapIndex(image,*p); SetPixelIndex(indexes+x,index); if (index < image->colors) SetPixelRGBO(q,image->colormap+(ssize_t) index); p++; q++; } } break; case 24: /* Convert DirectColor scanline. */ for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ScaleCharToQuantum(*p++)); SetPixelGreen(q,ScaleCharToQuantum(*p++)); SetPixelBlue(q,ScaleCharToQuantum(*p++)); q++; } break; } if (!SyncAuthenticPixels(image,exception)) return(MagickFalse); return(MagickTrue); } /* Compute the number of colors in Grayed R[i]=G[i]=B[i] image */ static int GetCutColors(Image *image) { ExceptionInfo *exception; Quantum intensity, scale_intensity; register PixelPacket *q; ssize_t x, y; exception=(&image->exception); intensity=0; scale_intensity=ScaleCharToQuantum(16); for (y=0; y < (ssize_t) image->rows; y++) { q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (intensity < GetPixelRed(q)) intensity=GetPixelRed(q); if (intensity >= scale_intensity) return(255); q++; } } if (intensity < ScaleCharToQuantum(2)) return(2); if (intensity < ScaleCharToQuantum(16)) return(16); return((int) intensity); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d C U T I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadCUTImage() reads an CUT X image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadCUTImage method is: % % Image *ReadCUTImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadCUTImage(const ImageInfo *image_info,ExceptionInfo *exception) { #define ThrowCUTReaderException(severity,tag) \ { \ if (palette != NULL) \ palette=DestroyImage(palette); \ if (clone_info != NULL) \ clone_info=DestroyImageInfo(clone_info); \ ThrowReaderException(severity,tag); \ } Image *image,*palette; ImageInfo *clone_info; MagickBooleanType status; MagickOffsetType offset; size_t EncodedByte; unsigned char RunCount,RunValue,RunCountMasked; CUTHeader Header; CUTPalHeader PalHeader; ssize_t depth; ssize_t i,j; ssize_t ldblk; unsigned char *BImgBuff=NULL,*ptrB; PixelPacket *q; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read CUT image. */ palette=NULL; clone_info=NULL; Header.Width=ReadBlobLSBShort(image); Header.Height=ReadBlobLSBShort(image); Header.Reserved=ReadBlobLSBShort(image); if (Header.Width==0 || Header.Height==0 || Header.Reserved!=0) CUT_KO: ThrowCUTReaderException(CorruptImageError,"ImproperImageHeader"); /*---This code checks first line of image---*/ EncodedByte=ReadBlobLSBShort(image); RunCount=(unsigned char) ReadBlobByte(image); RunCountMasked=RunCount & 0x7F; ldblk=0; while((int) RunCountMasked!=0) /*end of line?*/ { i=1; if((int) RunCount<0x80) i=(ssize_t) RunCountMasked; offset=SeekBlob(image,TellBlob(image)+i,SEEK_SET); if (offset < 0) ThrowCUTReaderException(CorruptImageError,"ImproperImageHeader"); if(EOFBlob(image) != MagickFalse) goto CUT_KO; /*wrong data*/ EncodedByte-=i+1; ldblk+=(ssize_t) RunCountMasked; RunCount=(unsigned char) ReadBlobByte(image); if(EOFBlob(image) != MagickFalse) goto CUT_KO; /*wrong data: unexpected eof in line*/ RunCountMasked=RunCount & 0x7F; } if(EncodedByte!=1) goto CUT_KO; /*wrong data: size incorrect*/ i=0; /*guess a number of bit planes*/ if(ldblk==(int) Header.Width) i=8; if(2*ldblk==(int) Header.Width) i=4; if(8*ldblk==(int) Header.Width) i=1; if(i==0) goto CUT_KO; /*wrong data: incorrect bit planes*/ depth=i; image->columns=Header.Width; image->rows=Header.Height; image->depth=8; image->colors=(size_t) (GetQuantumRange(1UL*i)+1); if (image_info->ping != MagickFalse) goto Finish; status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } /* ----- Do something with palette ----- */ if ((clone_info=CloneImageInfo(image_info)) == NULL) goto NoPalette; i=(ssize_t) strlen(clone_info->filename); j=i; while(--i>0) { if(clone_info->filename[i]=='.') { break; } if(clone_info->filename[i]=='/' || clone_info->filename[i]=='\\' || clone_info->filename[i]==':' ) { i=j; break; } } (void) CopyMagickString(clone_info->filename+i,".PAL",(size_t) (MaxTextExtent-i)); if((clone_info->file=fopen_utf8(clone_info->filename,"rb"))==NULL) { (void) CopyMagickString(clone_info->filename+i,".pal",(size_t) (MaxTextExtent-i)); if((clone_info->file=fopen_utf8(clone_info->filename,"rb"))==NULL) { clone_info->filename[i]='\0'; if((clone_info->file=fopen_utf8(clone_info->filename,"rb"))==NULL) { clone_info=DestroyImageInfo(clone_info); clone_info=NULL; goto NoPalette; } } } if( (palette=AcquireImage(clone_info))==NULL ) goto NoPalette; status=OpenBlob(clone_info,palette,ReadBinaryBlobMode,exception); if (status == MagickFalse) { ErasePalette: palette=DestroyImage(palette); palette=NULL; goto NoPalette; } if(palette!=NULL) { (void) ReadBlob(palette,2,(unsigned char *) PalHeader.FileId); if(strncmp(PalHeader.FileId,"AH",2) != 0) goto ErasePalette; PalHeader.Version=ReadBlobLSBShort(palette); PalHeader.Size=ReadBlobLSBShort(palette); PalHeader.FileType=(char) ReadBlobByte(palette); PalHeader.SubType=(char) ReadBlobByte(palette); PalHeader.BoardID=ReadBlobLSBShort(palette); PalHeader.GraphicsMode=ReadBlobLSBShort(palette); PalHeader.MaxIndex=ReadBlobLSBShort(palette); PalHeader.MaxRed=ReadBlobLSBShort(palette); PalHeader.MaxGreen=ReadBlobLSBShort(palette); PalHeader.MaxBlue=ReadBlobLSBShort(palette); (void) ReadBlob(palette,20,(unsigned char *) PalHeader.PaletteId); if (EOFBlob(image)) ThrowCUTReaderException(CorruptImageError,"UnexpectedEndOfFile"); if(PalHeader.MaxIndex<1) goto ErasePalette; image->colors=PalHeader.MaxIndex+1; if (AcquireImageColormap(image,image->colors) == MagickFalse) goto NoMemory; if(PalHeader.MaxRed==0) PalHeader.MaxRed=(unsigned int) QuantumRange; /*avoid division by 0*/ if(PalHeader.MaxGreen==0) PalHeader.MaxGreen=(unsigned int) QuantumRange; if(PalHeader.MaxBlue==0) PalHeader.MaxBlue=(unsigned int) QuantumRange; for(i=0;i<=(int) PalHeader.MaxIndex;i++) { /*this may be wrong- I don't know why is palette such strange*/ j=(ssize_t) TellBlob(palette); if((j % 512)>512-6) { j=((j / 512)+1)*512; offset=SeekBlob(palette,j,SEEK_SET); if (offset < 0) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } image->colormap[i].red=(Quantum) ReadBlobLSBShort(palette); if (QuantumRange != (Quantum) PalHeader.MaxRed) { image->colormap[i].red=ClampToQuantum(((double) image->colormap[i].red*QuantumRange+(PalHeader.MaxRed>>1))/ PalHeader.MaxRed); } image->colormap[i].green=(Quantum) ReadBlobLSBShort(palette); if (QuantumRange != (Quantum) PalHeader.MaxGreen) { image->colormap[i].green=ClampToQuantum (((double) image->colormap[i].green*QuantumRange+(PalHeader.MaxGreen>>1))/PalHeader.MaxGreen); } image->colormap[i].blue=(Quantum) ReadBlobLSBShort(palette); if (QuantumRange != (Quantum) PalHeader.MaxBlue) { image->colormap[i].blue=ClampToQuantum (((double)image->colormap[i].blue*QuantumRange+(PalHeader.MaxBlue>>1))/PalHeader.MaxBlue); } } if (EOFBlob(image)) ThrowCUTReaderException(CorruptImageError,"UnexpectedEndOfFile"); } NoPalette: if(palette==NULL) { image->colors=256; if (AcquireImageColormap(image,image->colors) == MagickFalse) { NoMemory: ThrowCUTReaderException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t)image->colors; i++) { image->colormap[i].red=ScaleCharToQuantum((unsigned char) i); image->colormap[i].green=ScaleCharToQuantum((unsigned char) i); image->colormap[i].blue=ScaleCharToQuantum((unsigned char) i); } } /* ----- Load RLE compressed raster ----- */ BImgBuff=(unsigned char *) AcquireQuantumMemory((size_t) ldblk, sizeof(*BImgBuff)); /*Ldblk was set in the check phase*/ if(BImgBuff==NULL) goto NoMemory; (void) memset(BImgBuff,0,(size_t) ldblk*sizeof(*BImgBuff)); offset=SeekBlob(image,6 /*sizeof(Header)*/,SEEK_SET); if (offset < 0) { if (palette != NULL) palette=DestroyImage(palette); if (clone_info != NULL) clone_info=DestroyImageInfo(clone_info); BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } for (i=0; i < (int) Header.Height; i++) { EncodedByte=ReadBlobLSBShort(image); ptrB=BImgBuff; j=ldblk; RunCount=(unsigned char) ReadBlobByte(image); RunCountMasked=RunCount & 0x7F; while ((int) RunCountMasked != 0) { if((ssize_t) RunCountMasked>j) { /*Wrong Data*/ RunCountMasked=(unsigned char) j; if(j==0) { break; } } if((int) RunCount>0x80) { RunValue=(unsigned char) ReadBlobByte(image); (void) memset(ptrB,(int) RunValue,(size_t) RunCountMasked); } else { (void) ReadBlob(image,(size_t) RunCountMasked,ptrB); } ptrB+=(int) RunCountMasked; j-=(int) RunCountMasked; if (EOFBlob(image) != MagickFalse) goto Finish; /* wrong data: unexpected eof in line */ RunCount=(unsigned char) ReadBlobByte(image); RunCountMasked=RunCount & 0x7F; } InsertRow(depth,BImgBuff,i,image); } (void) SyncImage(image); /*detect monochrome image*/ if(palette==NULL) { /*attempt to detect binary (black&white) images*/ if ((image->storage_class == PseudoClass) && (SetImageGray(image,&image->exception) != MagickFalse)) { if(GetCutColors(image)==2) { for (i=0; i < (ssize_t)image->colors; i++) { register Quantum sample; sample=ScaleCharToQuantum((unsigned char) i); if(image->colormap[i].red!=sample) goto Finish; if(image->colormap[i].green!=sample) goto Finish; if(image->colormap[i].blue!=sample) goto Finish; } image->colormap[1].red=image->colormap[1].green= image->colormap[1].blue=QuantumRange; for (i=0; i < (ssize_t)image->rows; i++) { q=QueueAuthenticPixels(image,0,i,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; for (j=0; j < (ssize_t)image->columns; j++) { if (GetPixelRed(q) == ScaleCharToQuantum(1)) { SetPixelRed(q,QuantumRange); SetPixelGreen(q,QuantumRange); SetPixelBlue(q,QuantumRange); } q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) goto Finish; } } } } Finish: if (BImgBuff != NULL) BImgBuff=(unsigned char *) RelinquishMagickMemory(BImgBuff); if (palette != NULL) palette=DestroyImage(palette); if (clone_info != NULL) clone_info=DestroyImageInfo(clone_info); if (EOFBlob(image) != MagickFalse) ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r C U T I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterCUTImage() adds attributes for the CUT image format to % the list of supported formats. The attributes include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterCUTImage method is: % % size_t RegisterCUTImage(void) % */ ModuleExport size_t RegisterCUTImage(void) { MagickInfo *entry; entry=SetMagickInfo("CUT"); entry->decoder=(DecodeImageHandler *) ReadCUTImage; entry->seekable_stream=MagickTrue; entry->description=ConstantString("DR Halo"); entry->module=ConstantString("CUT"); (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r C U T I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterCUTImage() removes format registrations made by the % CUT module from the list of supported formats. % % The format of the UnregisterCUTImage method is: % % UnregisterCUTImage(void) % */ ModuleExport void UnregisterCUTImage(void) { (void) UnregisterMagickInfo("CUT"); }
./CrossVul/dataset_final_sorted/CWE-20/c/good_907_0
crossvul-cpp_data_bad_254_0
/** * @file * Usenet network mailbox type; talk to an NNTP server * * @authors * Copyright (C) 1998 Brandon Long <blong@fiction.net> * Copyright (C) 1999 Andrej Gritsenko <andrej@lucky.net> * Copyright (C) 2000-2017 Vsevolod Volkov <vvv@mutt.org.ua> * * @copyright * This program is free software: you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the Free Software * Foundation, either version 2 of the License, or (at your option) any later * version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ /** * @page nntp Usenet network mailbox type; talk to an NNTP server * * Usenet network mailbox type; talk to an NNTP server */ #include "config.h" #include <ctype.h> #include <limits.h> #include <string.h> #include <strings.h> #include <unistd.h> #include "mutt/mutt.h" #include "conn/conn.h" #include "mutt.h" #include "nntp.h" #include "bcache.h" #include "body.h" #include "context.h" #include "envelope.h" #include "globals.h" #include "header.h" #include "mailbox.h" #include "mutt_account.h" #include "mutt_curses.h" #include "mutt_logging.h" #include "mutt_socket.h" #include "mx.h" #include "ncrypt/ncrypt.h" #include "options.h" #include "progress.h" #include "protos.h" #include "thread.h" #include "url.h" #ifdef USE_HCACHE #include "hcache/hcache.h" #endif #ifdef USE_SASL #include <sasl/sasl.h> #include <sasl/saslutil.h> #endif struct NntpServer *CurrentNewsSrv; /** * nntp_connect_error - Signal a failed connection * @param nserv NNTP server * @retval -1 Always */ static int nntp_connect_error(struct NntpServer *nserv) { nserv->status = NNTP_NONE; mutt_error(_("Server closed connection!")); return -1; } /** * nntp_capabilities - Get capabilities * @param nserv NNTP server * @retval -1 Error, connection is closed * @retval 0 Mode is reader, capabilities set up * @retval 1 Need to switch to reader mode */ static int nntp_capabilities(struct NntpServer *nserv) { struct Connection *conn = nserv->conn; bool mode_reader = false; char buf[LONG_STRING]; char authinfo[LONG_STRING] = ""; nserv->hasCAPABILITIES = false; nserv->hasSTARTTLS = false; nserv->hasDATE = false; nserv->hasLIST_NEWSGROUPS = false; nserv->hasLISTGROUP = false; nserv->hasLISTGROUPrange = false; nserv->hasOVER = false; FREE(&nserv->authenticators); if (mutt_socket_send(conn, "CAPABILITIES\r\n") < 0 || mutt_socket_readln(buf, sizeof(buf), conn) < 0) { return nntp_connect_error(nserv); } /* no capabilities */ if (mutt_str_strncmp("101", buf, 3) != 0) return 1; nserv->hasCAPABILITIES = true; /* parse capabilities */ do { if (mutt_socket_readln(buf, sizeof(buf), conn) < 0) return nntp_connect_error(nserv); if (mutt_str_strcmp("STARTTLS", buf) == 0) nserv->hasSTARTTLS = true; else if (mutt_str_strcmp("MODE-READER", buf) == 0) mode_reader = true; else if (mutt_str_strcmp("READER", buf) == 0) { nserv->hasDATE = true; nserv->hasLISTGROUP = true; nserv->hasLISTGROUPrange = true; } else if (mutt_str_strncmp("AUTHINFO ", buf, 9) == 0) { mutt_str_strcat(buf, sizeof(buf), " "); mutt_str_strfcpy(authinfo, buf + 8, sizeof(authinfo)); } #ifdef USE_SASL else if (mutt_str_strncmp("SASL ", buf, 5) == 0) { char *p = buf + 5; while (*p == ' ') p++; nserv->authenticators = mutt_str_strdup(p); } #endif else if (mutt_str_strcmp("OVER", buf) == 0) nserv->hasOVER = true; else if (mutt_str_strncmp("LIST ", buf, 5) == 0) { char *p = strstr(buf, " NEWSGROUPS"); if (p) { p += 11; if (*p == '\0' || *p == ' ') nserv->hasLIST_NEWSGROUPS = true; } } } while (mutt_str_strcmp(".", buf) != 0); *buf = '\0'; #ifdef USE_SASL if (nserv->authenticators && strcasestr(authinfo, " SASL ")) mutt_str_strfcpy(buf, nserv->authenticators, sizeof(buf)); #endif if (strcasestr(authinfo, " USER ")) { if (*buf) mutt_str_strcat(buf, sizeof(buf), " "); mutt_str_strcat(buf, sizeof(buf), "USER"); } mutt_str_replace(&nserv->authenticators, buf); /* current mode is reader */ if (nserv->hasDATE) return 0; /* server is mode-switching, need to switch to reader mode */ if (mode_reader) return 1; mutt_socket_close(conn); nserv->status = NNTP_BYE; mutt_error(_("Server doesn't support reader mode.")); return -1; } char *OverviewFmt = "Subject:\0" "From:\0" "Date:\0" "Message-ID:\0" "References:\0" "Content-Length:\0" "Lines:\0" "\0"; /** * nntp_attempt_features - Detect supported commands * @param nserv NNTP server * @retval 0 Success * @retval -1 Failure */ static int nntp_attempt_features(struct NntpServer *nserv) { struct Connection *conn = nserv->conn; char buf[LONG_STRING]; /* no CAPABILITIES, trying DATE, LISTGROUP, LIST NEWSGROUPS */ if (!nserv->hasCAPABILITIES) { if (mutt_socket_send(conn, "DATE\r\n") < 0 || mutt_socket_readln(buf, sizeof(buf), conn) < 0) { return nntp_connect_error(nserv); } if (mutt_str_strncmp("500", buf, 3) != 0) nserv->hasDATE = true; if (mutt_socket_send(conn, "LISTGROUP\r\n") < 0 || mutt_socket_readln(buf, sizeof(buf), conn) < 0) { return nntp_connect_error(nserv); } if (mutt_str_strncmp("500", buf, 3) != 0) nserv->hasLISTGROUP = true; if (mutt_socket_send(conn, "LIST NEWSGROUPS +\r\n") < 0 || mutt_socket_readln(buf, sizeof(buf), conn) < 0) { return nntp_connect_error(nserv); } if (mutt_str_strncmp("500", buf, 3) != 0) nserv->hasLIST_NEWSGROUPS = true; if (mutt_str_strncmp("215", buf, 3) == 0) { do { if (mutt_socket_readln(buf, sizeof(buf), conn) < 0) return nntp_connect_error(nserv); } while (mutt_str_strcmp(".", buf) != 0); } } /* no LIST NEWSGROUPS, trying XGTITLE */ if (!nserv->hasLIST_NEWSGROUPS) { if (mutt_socket_send(conn, "XGTITLE\r\n") < 0 || mutt_socket_readln(buf, sizeof(buf), conn) < 0) { return nntp_connect_error(nserv); } if (mutt_str_strncmp("500", buf, 3) != 0) nserv->hasXGTITLE = true; } /* no OVER, trying XOVER */ if (!nserv->hasOVER) { if (mutt_socket_send(conn, "XOVER\r\n") < 0 || mutt_socket_readln(buf, sizeof(buf), conn) < 0) { return nntp_connect_error(nserv); } if (mutt_str_strncmp("500", buf, 3) != 0) nserv->hasXOVER = true; } /* trying LIST OVERVIEW.FMT */ if (nserv->hasOVER || nserv->hasXOVER) { if (mutt_socket_send(conn, "LIST OVERVIEW.FMT\r\n") < 0 || mutt_socket_readln(buf, sizeof(buf), conn) < 0) { return nntp_connect_error(nserv); } if (mutt_str_strncmp("215", buf, 3) != 0) nserv->overview_fmt = OverviewFmt; else { int cont = 0; size_t buflen = 2 * LONG_STRING, off = 0, b = 0; if (nserv->overview_fmt) FREE(&nserv->overview_fmt); nserv->overview_fmt = mutt_mem_malloc(buflen); while (true) { if (buflen - off < LONG_STRING) { buflen *= 2; mutt_mem_realloc(&nserv->overview_fmt, buflen); } const int chunk = mutt_socket_readln(nserv->overview_fmt + off, buflen - off, conn); if (chunk < 0) { FREE(&nserv->overview_fmt); return nntp_connect_error(nserv); } if (!cont && (mutt_str_strcmp(".", nserv->overview_fmt + off) == 0)) break; cont = chunk >= buflen - off ? 1 : 0; off += strlen(nserv->overview_fmt + off); if (!cont) { char *colon = NULL; if (nserv->overview_fmt[b] == ':') { memmove(nserv->overview_fmt + b, nserv->overview_fmt + b + 1, off - b - 1); nserv->overview_fmt[off - 1] = ':'; } colon = strchr(nserv->overview_fmt + b, ':'); if (!colon) nserv->overview_fmt[off++] = ':'; else if (strcmp(colon + 1, "full") != 0) off = colon + 1 - nserv->overview_fmt; if (strcasecmp(nserv->overview_fmt + b, "Bytes:") == 0) { size_t len = strlen(nserv->overview_fmt + b); mutt_str_strfcpy(nserv->overview_fmt + b, "Content-Length:", len + 1); off = b + len; } nserv->overview_fmt[off++] = '\0'; b = off; } } nserv->overview_fmt[off++] = '\0'; mutt_mem_realloc(&nserv->overview_fmt, off); } } return 0; } /** * nntp_auth - Get login, password and authenticate * @param nserv NNTP server * @retval 0 Success * @retval -1 Failure */ static int nntp_auth(struct NntpServer *nserv) { struct Connection *conn = nserv->conn; char buf[LONG_STRING]; char authenticators[LONG_STRING] = "USER"; char *method = NULL, *a = NULL, *p = NULL; unsigned char flags = conn->account.flags; while (true) { /* get login and password */ if ((mutt_account_getuser(&conn->account) < 0) || (conn->account.user[0] == '\0') || (mutt_account_getpass(&conn->account) < 0) || (conn->account.pass[0] == '\0')) { break; } /* get list of authenticators */ if (NntpAuthenticators && *NntpAuthenticators) mutt_str_strfcpy(authenticators, NntpAuthenticators, sizeof(authenticators)); else if (nserv->hasCAPABILITIES) { mutt_str_strfcpy(authenticators, NONULL(nserv->authenticators), sizeof(authenticators)); p = authenticators; while (*p) { if (*p == ' ') *p = ':'; p++; } } p = authenticators; while (*p) { *p = toupper(*p); p++; } mutt_debug(1, "available methods: %s\n", nserv->authenticators); a = authenticators; while (true) { if (!a) { mutt_error(_("No authenticators available")); break; } method = a; a = strchr(a, ':'); if (a) *a++ = '\0'; /* check authenticator */ if (nserv->hasCAPABILITIES) { char *m = NULL; if (!nserv->authenticators) continue; m = strcasestr(nserv->authenticators, method); if (!m) continue; if (m > nserv->authenticators && *(m - 1) != ' ') continue; m += strlen(method); if (*m != '\0' && *m != ' ') continue; } mutt_debug(1, "trying method %s\n", method); /* AUTHINFO USER authentication */ if (strcmp(method, "USER") == 0) { mutt_message(_("Authenticating (%s)..."), method); snprintf(buf, sizeof(buf), "AUTHINFO USER %s\r\n", conn->account.user); if (mutt_socket_send(conn, buf) < 0 || mutt_socket_readln(buf, sizeof(buf), conn) < 0) { break; } /* authenticated, password is not required */ if (mutt_str_strncmp("281", buf, 3) == 0) return 0; /* username accepted, sending password */ if (mutt_str_strncmp("381", buf, 3) == 0) { if (DebugLevel < MUTT_SOCK_LOG_FULL) mutt_debug(MUTT_SOCK_LOG_CMD, "%d> AUTHINFO PASS *\n", conn->fd); snprintf(buf, sizeof(buf), "AUTHINFO PASS %s\r\n", conn->account.pass); if (mutt_socket_send_d(conn, buf, MUTT_SOCK_LOG_FULL) < 0 || mutt_socket_readln(buf, sizeof(buf), conn) < 0) { break; } /* authenticated */ if (mutt_str_strncmp("281", buf, 3) == 0) return 0; } /* server doesn't support AUTHINFO USER, trying next method */ if (*buf == '5') continue; } else { #ifdef USE_SASL sasl_conn_t *saslconn = NULL; sasl_interact_t *interaction = NULL; int rc; char inbuf[LONG_STRING] = ""; const char *mech = NULL; const char *client_out = NULL; unsigned int client_len, len; if (mutt_sasl_client_new(conn, &saslconn) < 0) { mutt_debug(1, "error allocating SASL connection.\n"); continue; } while (true) { rc = sasl_client_start(saslconn, method, &interaction, &client_out, &client_len, &mech); if (rc != SASL_INTERACT) break; mutt_sasl_interact(interaction); } if (rc != SASL_OK && rc != SASL_CONTINUE) { sasl_dispose(&saslconn); mutt_debug(1, "error starting SASL authentication exchange.\n"); continue; } mutt_message(_("Authenticating (%s)..."), method); snprintf(buf, sizeof(buf), "AUTHINFO SASL %s", method); /* looping protocol */ while (rc == SASL_CONTINUE || (rc == SASL_OK && client_len)) { /* send out client response */ if (client_len) { if (DebugLevel >= MUTT_SOCK_LOG_FULL) { char tmp[LONG_STRING]; memcpy(tmp, client_out, client_len); for (p = tmp; p < tmp + client_len; p++) { if (*p == '\0') *p = '.'; } *p = '\0'; mutt_debug(1, "SASL> %s\n", tmp); } if (*buf) mutt_str_strcat(buf, sizeof(buf), " "); len = strlen(buf); if (sasl_encode64(client_out, client_len, buf + len, sizeof(buf) - len, &len) != SASL_OK) { mutt_debug(1, "error base64-encoding client response.\n"); break; } } mutt_str_strcat(buf, sizeof(buf), "\r\n"); if (DebugLevel < MUTT_SOCK_LOG_FULL) { if (strchr(buf, ' ')) { mutt_debug(MUTT_SOCK_LOG_CMD, "%d> AUTHINFO SASL %s%s\n", conn->fd, method, client_len ? " sasl_data" : ""); } else mutt_debug(MUTT_SOCK_LOG_CMD, "%d> sasl_data\n", conn->fd); } client_len = 0; if (mutt_socket_send_d(conn, buf, MUTT_SOCK_LOG_FULL) < 0 || mutt_socket_readln_d(inbuf, sizeof(inbuf), conn, MUTT_SOCK_LOG_FULL) < 0) { break; } if ((mutt_str_strncmp(inbuf, "283 ", 4) != 0) && (mutt_str_strncmp(inbuf, "383 ", 4) != 0)) { if (DebugLevel < MUTT_SOCK_LOG_FULL) mutt_debug(MUTT_SOCK_LOG_CMD, "%d< %s\n", conn->fd, inbuf); break; } if (DebugLevel < MUTT_SOCK_LOG_FULL) { inbuf[3] = '\0'; mutt_debug(MUTT_SOCK_LOG_CMD, "%d< %s sasl_data\n", conn->fd, inbuf); } if (strcmp("=", inbuf + 4) == 0) len = 0; else if (sasl_decode64(inbuf + 4, strlen(inbuf + 4), buf, sizeof(buf) - 1, &len) != SASL_OK) { mutt_debug(1, "error base64-decoding server response.\n"); break; } else if (DebugLevel >= MUTT_SOCK_LOG_FULL) { char tmp[LONG_STRING]; memcpy(tmp, buf, len); for (p = tmp; p < tmp + len; p++) { if (*p == '\0') *p = '.'; } *p = '\0'; mutt_debug(1, "SASL< %s\n", tmp); } while (true) { rc = sasl_client_step(saslconn, buf, len, &interaction, &client_out, &client_len); if (rc != SASL_INTERACT) break; mutt_sasl_interact(interaction); } if (*inbuf != '3') break; *buf = '\0'; } /* looping protocol */ if (rc == SASL_OK && client_len == 0 && *inbuf == '2') { mutt_sasl_setup_conn(conn, saslconn); return 0; } /* terminate SASL session */ sasl_dispose(&saslconn); if (conn->fd < 0) break; if (mutt_str_strncmp(inbuf, "383 ", 4) == 0) { if (mutt_socket_send(conn, "*\r\n") < 0 || mutt_socket_readln(inbuf, sizeof(inbuf), conn) < 0) { break; } } /* server doesn't support AUTHINFO SASL, trying next method */ if (*inbuf == '5') continue; #else continue; #endif /* USE_SASL */ } mutt_error(_("%s authentication failed."), method); break; } break; } /* error */ nserv->status = NNTP_BYE; conn->account.flags = flags; if (conn->fd < 0) { mutt_error(_("Server closed connection!")); } else mutt_socket_close(conn); return -1; } /** * nntp_open_connection - Connect to server, authenticate and get capabilities * @param nserv NNTP server * @retval 0 Success * @retval -1 Failure */ int nntp_open_connection(struct NntpServer *nserv) { struct Connection *conn = nserv->conn; char buf[STRING]; int cap; bool posting = false, auth = true; if (nserv->status == NNTP_OK) return 0; if (nserv->status == NNTP_BYE) return -1; nserv->status = NNTP_NONE; if (mutt_socket_open(conn) < 0) return -1; if (mutt_socket_readln(buf, sizeof(buf), conn) < 0) return nntp_connect_error(nserv); if (mutt_str_strncmp("200", buf, 3) == 0) posting = true; else if (mutt_str_strncmp("201", buf, 3) != 0) { mutt_socket_close(conn); mutt_str_remove_trailing_ws(buf); mutt_error("%s", buf); return -1; } /* get initial capabilities */ cap = nntp_capabilities(nserv); if (cap < 0) return -1; /* tell news server to switch to mode reader if it isn't so */ if (cap > 0) { if (mutt_socket_send(conn, "MODE READER\r\n") < 0 || mutt_socket_readln(buf, sizeof(buf), conn) < 0) { return nntp_connect_error(nserv); } if (mutt_str_strncmp("200", buf, 3) == 0) posting = true; else if (mutt_str_strncmp("201", buf, 3) == 0) posting = false; /* error if has capabilities, ignore result if no capabilities */ else if (nserv->hasCAPABILITIES) { mutt_socket_close(conn); mutt_error(_("Could not switch to reader mode.")); return -1; } /* recheck capabilities after MODE READER */ if (nserv->hasCAPABILITIES) { cap = nntp_capabilities(nserv); if (cap < 0) return -1; } } mutt_message(_("Connected to %s. %s"), conn->account.host, posting ? _("Posting is ok.") : _("Posting is NOT ok.")); mutt_sleep(1); #ifdef USE_SSL /* Attempt STARTTLS if available and desired. */ if (nserv->use_tls != 1 && (nserv->hasSTARTTLS || SslForceTls)) { if (nserv->use_tls == 0) { nserv->use_tls = SslForceTls || query_quadoption(SslStarttls, _("Secure connection with TLS?")) == MUTT_YES ? 2 : 1; } if (nserv->use_tls == 2) { if (mutt_socket_send(conn, "STARTTLS\r\n") < 0 || mutt_socket_readln(buf, sizeof(buf), conn) < 0) { return nntp_connect_error(nserv); } if (mutt_str_strncmp("382", buf, 3) != 0) { nserv->use_tls = 0; mutt_error("STARTTLS: %s", buf); } else if (mutt_ssl_starttls(conn)) { nserv->use_tls = 0; nserv->status = NNTP_NONE; mutt_socket_close(nserv->conn); mutt_error(_("Could not negotiate TLS connection")); return -1; } else { /* recheck capabilities after STARTTLS */ cap = nntp_capabilities(nserv); if (cap < 0) return -1; } } } #endif /* authentication required? */ if (conn->account.flags & MUTT_ACCT_USER) { if (!conn->account.user[0]) auth = false; } else { if (mutt_socket_send(conn, "STAT\r\n") < 0 || mutt_socket_readln(buf, sizeof(buf), conn) < 0) { return nntp_connect_error(nserv); } if (mutt_str_strncmp("480", buf, 3) != 0) auth = false; } /* authenticate */ if (auth && nntp_auth(nserv) < 0) return -1; /* get final capabilities after authentication */ if (nserv->hasCAPABILITIES && (auth || cap > 0)) { cap = nntp_capabilities(nserv); if (cap < 0) return -1; if (cap > 0) { mutt_socket_close(conn); mutt_error(_("Could not switch to reader mode.")); return -1; } } /* attempt features */ if (nntp_attempt_features(nserv) < 0) return -1; nserv->status = NNTP_OK; return 0; } /** * nntp_query - Send data from buffer and receive answer to same buffer * @param nntp_data NNTP server data * @param line Buffer containing data * @param linelen Length of buffer * @retval 0 Success * @retval -1 Failure */ static int nntp_query(struct NntpData *nntp_data, char *line, size_t linelen) { struct NntpServer *nserv = nntp_data->nserv; char buf[LONG_STRING] = { 0 }; if (nserv->status == NNTP_BYE) return -1; while (true) { if (nserv->status == NNTP_OK) { int rc = 0; if (*line) rc = mutt_socket_send(nserv->conn, line); else if (nntp_data->group) { snprintf(buf, sizeof(buf), "GROUP %s\r\n", nntp_data->group); rc = mutt_socket_send(nserv->conn, buf); } if (rc >= 0) rc = mutt_socket_readln(buf, sizeof(buf), nserv->conn); if (rc >= 0) break; } /* reconnect */ while (true) { nserv->status = NNTP_NONE; if (nntp_open_connection(nserv) == 0) break; snprintf(buf, sizeof(buf), _("Connection to %s lost. Reconnect?"), nserv->conn->account.host); if (mutt_yesorno(buf, MUTT_YES) != MUTT_YES) { nserv->status = NNTP_BYE; return -1; } } /* select newsgroup after reconnection */ if (nntp_data->group) { snprintf(buf, sizeof(buf), "GROUP %s\r\n", nntp_data->group); if (mutt_socket_send(nserv->conn, buf) < 0 || mutt_socket_readln(buf, sizeof(buf), nserv->conn) < 0) { return nntp_connect_error(nserv); } } if (!*line) break; } mutt_str_strfcpy(line, buf, linelen); return 0; } /** * nntp_fetch_lines - Read lines, calling a callback function for each * @param nntp_data NNTP server data * @param query Query to match * @param qlen Length of query * @param msg Progess message (OPTIONAL) * @param funct Callback function * @param data Data for callback function * @retval 0 Success * @retval 1 Bad response (answer in query buffer) * @retval -1 Connection lost * @retval -2 Error in funct(*line, *data) * * This function calls funct(*line, *data) for each received line, * funct(NULL, *data) if rewind(*data) needs, exits when fail or done: */ static int nntp_fetch_lines(struct NntpData *nntp_data, char *query, size_t qlen, const char *msg, int (*funct)(char *, void *), void *data) { int done = false; int rc; while (!done) { char buf[LONG_STRING]; char *line = NULL; unsigned int lines = 0; size_t off = 0; struct Progress progress; if (msg) mutt_progress_init(&progress, msg, MUTT_PROGRESS_MSG, ReadInc, 0); mutt_str_strfcpy(buf, query, sizeof(buf)); if (nntp_query(nntp_data, buf, sizeof(buf)) < 0) return -1; if (buf[0] != '2') { mutt_str_strfcpy(query, buf, qlen); return 1; } line = mutt_mem_malloc(sizeof(buf)); rc = 0; while (true) { char *p = NULL; int chunk = mutt_socket_readln_d(buf, sizeof(buf), nntp_data->nserv->conn, MUTT_SOCK_LOG_HDR); if (chunk < 0) { nntp_data->nserv->status = NNTP_NONE; break; } p = buf; if (!off && buf[0] == '.') { if (buf[1] == '\0') { done = true; break; } if (buf[1] == '.') p++; } mutt_str_strfcpy(line + off, p, sizeof(buf)); if (chunk >= sizeof(buf)) off += strlen(p); else { if (msg) mutt_progress_update(&progress, ++lines, -1); if (rc == 0 && funct(line, data) < 0) rc = -2; off = 0; } mutt_mem_realloc(&line, off + sizeof(buf)); } FREE(&line); funct(NULL, data); } return rc; } /** * fetch_description - Parse newsgroup description * @param line String to parse * @param data NNTP Server * @retval 0 Always */ static int fetch_description(char *line, void *data) { struct NntpServer *nserv = data; struct NntpData *nntp_data = NULL; char *desc = NULL; if (!line) return 0; desc = strpbrk(line, " \t"); if (desc) { *desc++ = '\0'; desc += strspn(desc, " \t"); } else desc = strchr(line, '\0'); nntp_data = mutt_hash_find(nserv->groups_hash, line); if (nntp_data && (mutt_str_strcmp(desc, nntp_data->desc) != 0)) { mutt_str_replace(&nntp_data->desc, desc); mutt_debug(2, "group: %s, desc: %s\n", line, desc); } return 0; } /** * get_description - Fetch newsgroups descriptions * @param nntp_data NNTP data * @param wildmat String to match * @param msg Progress message * @retval 0 Success * @retval 1 Bad response (answer in query buffer) * @retval -1 Connection lost * @retval -2 Error */ static int get_description(struct NntpData *nntp_data, char *wildmat, char *msg) { char buf[STRING]; char *cmd = NULL; /* get newsgroup description, if possible */ struct NntpServer *nserv = nntp_data->nserv; if (!wildmat) wildmat = nntp_data->group; if (nserv->hasLIST_NEWSGROUPS) cmd = "LIST NEWSGROUPS"; else if (nserv->hasXGTITLE) cmd = "XGTITLE"; else return 0; snprintf(buf, sizeof(buf), "%s %s\r\n", cmd, wildmat); int rc = nntp_fetch_lines(nntp_data, buf, sizeof(buf), msg, fetch_description, nserv); if (rc > 0) { mutt_error("%s: %s", cmd, buf); } return rc; } /** * nntp_parse_xref - Parse cross-reference * @param ctx Mailbox * @param hdr Email header * * Update read flag and set article number if empty */ static void nntp_parse_xref(struct Context *ctx, struct Header *hdr) { struct NntpData *nntp_data = ctx->data; char *buf = mutt_str_strdup(hdr->env->xref); char *p = buf; while (p) { anum_t anum; /* skip to next word */ p += strspn(p, " \t"); char *grp = p; /* skip to end of word */ p = strpbrk(p, " \t"); if (p) *p++ = '\0'; /* find colon */ char *colon = strchr(grp, ':'); if (!colon) continue; *colon++ = '\0'; if (sscanf(colon, ANUM, &anum) != 1) continue; nntp_article_status(ctx, hdr, grp, anum); if (!NHDR(hdr)->article_num && (mutt_str_strcmp(nntp_data->group, grp) == 0)) NHDR(hdr)->article_num = anum; } FREE(&buf); } /** * fetch_tempfile - Write line to temporary file * @param line Text to write * @param data FILE pointer * @retval 0 Success * @retval -1 Failure */ static int fetch_tempfile(char *line, void *data) { FILE *fp = data; if (!line) rewind(fp); else if (fputs(line, fp) == EOF || fputc('\n', fp) == EOF) return -1; return 0; } /** * struct FetchCtx - Keep track when getting data from a server */ struct FetchCtx { struct Context *ctx; anum_t first; anum_t last; int restore; unsigned char *messages; struct Progress progress; #ifdef USE_HCACHE header_cache_t *hc; #endif }; /** * fetch_numbers - Parse article number * @param line Article number * @param data FetchCtx * @retval 0 Always */ static int fetch_numbers(char *line, void *data) { struct FetchCtx *fc = data; anum_t anum; if (!line) return 0; if (sscanf(line, ANUM, &anum) != 1) return 0; if (anum < fc->first || anum > fc->last) return 0; fc->messages[anum - fc->first] = 1; return 0; } /** * parse_overview_line - Parse overview line * @param line String to parse * @param data FetchCtx * @retval 0 Success * @retval -1 Failure */ static int parse_overview_line(char *line, void *data) { struct FetchCtx *fc = data; struct Context *ctx = fc->ctx; struct NntpData *nntp_data = ctx->data; struct Header *hdr = NULL; char *header = NULL, *field = NULL; bool save = true; anum_t anum; if (!line) return 0; /* parse article number */ field = strchr(line, '\t'); if (field) *field++ = '\0'; if (sscanf(line, ANUM, &anum) != 1) return 0; mutt_debug(2, "" ANUM "\n", anum); /* out of bounds */ if (anum < fc->first || anum > fc->last) return 0; /* not in LISTGROUP */ if (!fc->messages[anum - fc->first]) { /* progress */ if (!ctx->quiet) mutt_progress_update(&fc->progress, anum - fc->first + 1, -1); return 0; } /* convert overview line to header */ FILE *fp = mutt_file_mkstemp(); if (!fp) return -1; header = nntp_data->nserv->overview_fmt; while (field) { char *b = field; if (*header) { if (strstr(header, ":full") == NULL && fputs(header, fp) == EOF) { mutt_file_fclose(&fp); return -1; } header = strchr(header, '\0') + 1; } field = strchr(field, '\t'); if (field) *field++ = '\0'; if (fputs(b, fp) == EOF || fputc('\n', fp) == EOF) { mutt_file_fclose(&fp); return -1; } } rewind(fp); /* allocate memory for headers */ if (ctx->msgcount >= ctx->hdrmax) mx_alloc_memory(ctx); /* parse header */ hdr = ctx->hdrs[ctx->msgcount] = mutt_header_new(); hdr->env = mutt_rfc822_read_header(fp, hdr, 0, 0); hdr->env->newsgroups = mutt_str_strdup(nntp_data->group); hdr->received = hdr->date_sent; mutt_file_fclose(&fp); #ifdef USE_HCACHE if (fc->hc) { char buf[16]; /* try to replace with header from cache */ snprintf(buf, sizeof(buf), "%u", anum); void *hdata = mutt_hcache_fetch(fc->hc, buf, strlen(buf)); if (hdata) { mutt_debug(2, "mutt_hcache_fetch %s\n", buf); mutt_header_free(&hdr); ctx->hdrs[ctx->msgcount] = hdr = mutt_hcache_restore(hdata); mutt_hcache_free(fc->hc, &hdata); hdr->data = 0; hdr->read = false; hdr->old = false; /* skip header marked as deleted in cache */ if (hdr->deleted && !fc->restore) { if (nntp_data->bcache) { mutt_debug(2, "mutt_bcache_del %s\n", buf); mutt_bcache_del(nntp_data->bcache, buf); } save = false; } } /* not cached yet, store header */ else { mutt_debug(2, "mutt_hcache_store %s\n", buf); mutt_hcache_store(fc->hc, buf, strlen(buf), hdr, 0); } } #endif if (save) { hdr->index = ctx->msgcount++; hdr->read = false; hdr->old = false; hdr->deleted = false; hdr->data = mutt_mem_calloc(1, sizeof(struct NntpHeaderData)); NHDR(hdr)->article_num = anum; if (fc->restore) hdr->changed = true; else { nntp_article_status(ctx, hdr, NULL, anum); if (!hdr->read) nntp_parse_xref(ctx, hdr); } if (anum > nntp_data->last_loaded) nntp_data->last_loaded = anum; } else mutt_header_free(&hdr); /* progress */ if (!ctx->quiet) mutt_progress_update(&fc->progress, anum - fc->first + 1, -1); return 0; } /** * nntp_fetch_headers - Fetch headers * @param ctx Mailbox * @param hc Header cache * @param first Number of first header to fetch * @param last Number of last header to fetch * @param restore Restore message listed as deleted * @retval 0 Success * @retval -1 Failure */ static int nntp_fetch_headers(struct Context *ctx, void *hc, anum_t first, anum_t last, int restore) { struct NntpData *nntp_data = ctx->data; struct FetchCtx fc; struct Header *hdr = NULL; char buf[HUGE_STRING]; int rc = 0; int oldmsgcount = ctx->msgcount; anum_t current; anum_t first_over = first; #ifdef USE_HCACHE void *hdata = NULL; #endif /* if empty group or nothing to do */ if (!last || first > last) return 0; /* init fetch context */ fc.ctx = ctx; fc.first = first; fc.last = last; fc.restore = restore; fc.messages = mutt_mem_calloc(last - first + 1, sizeof(unsigned char)); #ifdef USE_HCACHE fc.hc = hc; #endif /* fetch list of articles */ if (NntpListgroup && nntp_data->nserv->hasLISTGROUP && !nntp_data->deleted) { if (!ctx->quiet) mutt_message(_("Fetching list of articles...")); if (nntp_data->nserv->hasLISTGROUPrange) snprintf(buf, sizeof(buf), "LISTGROUP %s %u-%u\r\n", nntp_data->group, first, last); else snprintf(buf, sizeof(buf), "LISTGROUP %s\r\n", nntp_data->group); rc = nntp_fetch_lines(nntp_data, buf, sizeof(buf), NULL, fetch_numbers, &fc); if (rc > 0) { mutt_error("LISTGROUP: %s", buf); } if (rc == 0) { for (current = first; current <= last && rc == 0; current++) { if (fc.messages[current - first]) continue; snprintf(buf, sizeof(buf), "%u", current); if (nntp_data->bcache) { mutt_debug(2, "#1 mutt_bcache_del %s\n", buf); mutt_bcache_del(nntp_data->bcache, buf); } #ifdef USE_HCACHE if (fc.hc) { mutt_debug(2, "mutt_hcache_delete %s\n", buf); mutt_hcache_delete(fc.hc, buf, strlen(buf)); } #endif } } } else { for (current = first; current <= last; current++) fc.messages[current - first] = 1; } /* fetching header from cache or server, or fallback to fetch overview */ if (!ctx->quiet) { mutt_progress_init(&fc.progress, _("Fetching message headers..."), MUTT_PROGRESS_MSG, ReadInc, last - first + 1); } for (current = first; current <= last && rc == 0; current++) { if (!ctx->quiet) mutt_progress_update(&fc.progress, current - first + 1, -1); #ifdef USE_HCACHE snprintf(buf, sizeof(buf), "%u", current); #endif /* delete header from cache that does not exist on server */ if (!fc.messages[current - first]) continue; /* allocate memory for headers */ if (ctx->msgcount >= ctx->hdrmax) mx_alloc_memory(ctx); #ifdef USE_HCACHE /* try to fetch header from cache */ hdata = mutt_hcache_fetch(fc.hc, buf, strlen(buf)); if (hdata) { mutt_debug(2, "mutt_hcache_fetch %s\n", buf); ctx->hdrs[ctx->msgcount] = hdr = mutt_hcache_restore(hdata); mutt_hcache_free(fc.hc, &hdata); hdr->data = 0; /* skip header marked as deleted in cache */ if (hdr->deleted && !restore) { mutt_header_free(&hdr); if (nntp_data->bcache) { mutt_debug(2, "#2 mutt_bcache_del %s\n", buf); mutt_bcache_del(nntp_data->bcache, buf); } continue; } hdr->read = false; hdr->old = false; } else #endif /* don't try to fetch header from removed newsgroup */ if (nntp_data->deleted) continue; /* fallback to fetch overview */ else if (nntp_data->nserv->hasOVER || nntp_data->nserv->hasXOVER) { if (NntpListgroup && nntp_data->nserv->hasLISTGROUP) break; else continue; } /* fetch header from server */ else { FILE *fp = mutt_file_mkstemp(); if (!fp) { mutt_perror("mutt_file_mkstemp() failed!"); rc = -1; break; } snprintf(buf, sizeof(buf), "HEAD %u\r\n", current); rc = nntp_fetch_lines(nntp_data, buf, sizeof(buf), NULL, fetch_tempfile, fp); if (rc) { mutt_file_fclose(&fp); if (rc < 0) break; /* invalid response */ if (mutt_str_strncmp("423", buf, 3) != 0) { mutt_error("HEAD: %s", buf); break; } /* no such article */ if (nntp_data->bcache) { snprintf(buf, sizeof(buf), "%u", current); mutt_debug(2, "#3 mutt_bcache_del %s\n", buf); mutt_bcache_del(nntp_data->bcache, buf); } rc = 0; continue; } /* parse header */ hdr = ctx->hdrs[ctx->msgcount] = mutt_header_new(); hdr->env = mutt_rfc822_read_header(fp, hdr, 0, 0); hdr->received = hdr->date_sent; mutt_file_fclose(&fp); } /* save header in context */ hdr->index = ctx->msgcount++; hdr->read = false; hdr->old = false; hdr->deleted = false; hdr->data = mutt_mem_calloc(1, sizeof(struct NntpHeaderData)); NHDR(hdr)->article_num = current; if (restore) hdr->changed = true; else { nntp_article_status(ctx, hdr, NULL, NHDR(hdr)->article_num); if (!hdr->read) nntp_parse_xref(ctx, hdr); } if (current > nntp_data->last_loaded) nntp_data->last_loaded = current; first_over = current + 1; } if (!NntpListgroup || !nntp_data->nserv->hasLISTGROUP) current = first_over; /* fetch overview information */ if (current <= last && rc == 0 && !nntp_data->deleted) { char *cmd = nntp_data->nserv->hasOVER ? "OVER" : "XOVER"; snprintf(buf, sizeof(buf), "%s %u-%u\r\n", cmd, current, last); rc = nntp_fetch_lines(nntp_data, buf, sizeof(buf), NULL, parse_overview_line, &fc); if (rc > 0) { mutt_error("%s: %s", cmd, buf); } } if (ctx->msgcount > oldmsgcount) mx_update_context(ctx, ctx->msgcount - oldmsgcount); FREE(&fc.messages); if (rc != 0) return -1; mutt_clear_error(); return 0; } /** * nntp_mbox_open - Implements MxOps::mbox_open() */ static int nntp_mbox_open(struct Context *ctx) { struct NntpServer *nserv = NULL; struct NntpData *nntp_data = NULL; char buf[HUGE_STRING]; char server[LONG_STRING]; char *group = NULL; int rc; void *hc = NULL; anum_t first, last, count = 0; struct Url url; mutt_str_strfcpy(buf, ctx->path, sizeof(buf)); if (url_parse(&url, buf) < 0 || !url.host || !url.path || !(url.scheme == U_NNTP || url.scheme == U_NNTPS)) { url_free(&url); mutt_error(_("%s is an invalid newsgroup specification!"), ctx->path); return -1; } group = url.path; url.path = strchr(url.path, '\0'); url_tostring(&url, server, sizeof(server), 0); nserv = nntp_select_server(server, true); url_free(&url); if (!nserv) return -1; CurrentNewsSrv = nserv; /* find news group data structure */ nntp_data = mutt_hash_find(nserv->groups_hash, group); if (!nntp_data) { nntp_newsrc_close(nserv); mutt_error(_("Newsgroup %s not found on the server."), group); return -1; } mutt_bit_unset(ctx->rights, MUTT_ACL_INSERT); if (!nntp_data->newsrc_ent && !nntp_data->subscribed && !SaveUnsubscribed) ctx->readonly = true; /* select newsgroup */ mutt_message(_("Selecting %s..."), group); buf[0] = '\0'; if (nntp_query(nntp_data, buf, sizeof(buf)) < 0) { nntp_newsrc_close(nserv); return -1; } /* newsgroup not found, remove it */ if (mutt_str_strncmp("411", buf, 3) == 0) { mutt_error(_("Newsgroup %s has been removed from the server."), nntp_data->group); if (!nntp_data->deleted) { nntp_data->deleted = true; nntp_active_save_cache(nserv); } if (nntp_data->newsrc_ent && !nntp_data->subscribed && !SaveUnsubscribed) { FREE(&nntp_data->newsrc_ent); nntp_data->newsrc_len = 0; nntp_delete_group_cache(nntp_data); nntp_newsrc_update(nserv); } } /* parse newsgroup info */ else { if (sscanf(buf, "211 " ANUM " " ANUM " " ANUM, &count, &first, &last) != 3) { nntp_newsrc_close(nserv); mutt_error("GROUP: %s", buf); return -1; } nntp_data->first_message = first; nntp_data->last_message = last; nntp_data->deleted = false; /* get description if empty */ if (NntpLoadDescription && !nntp_data->desc) { if (get_description(nntp_data, NULL, NULL) < 0) { nntp_newsrc_close(nserv); return -1; } if (nntp_data->desc) nntp_active_save_cache(nserv); } } time(&nserv->check_time); ctx->data = nntp_data; if (!nntp_data->bcache && (nntp_data->newsrc_ent || nntp_data->subscribed || SaveUnsubscribed)) nntp_data->bcache = mutt_bcache_open(&nserv->conn->account, nntp_data->group); /* strip off extra articles if adding context is greater than $nntp_context */ first = nntp_data->first_message; if (NntpContext && nntp_data->last_message - first + 1 > NntpContext) first = nntp_data->last_message - NntpContext + 1; nntp_data->last_loaded = first ? first - 1 : 0; count = nntp_data->first_message; nntp_data->first_message = first; nntp_bcache_update(nntp_data); nntp_data->first_message = count; #ifdef USE_HCACHE hc = nntp_hcache_open(nntp_data); nntp_hcache_update(nntp_data, hc); #endif if (!hc) { mutt_bit_unset(ctx->rights, MUTT_ACL_WRITE); mutt_bit_unset(ctx->rights, MUTT_ACL_DELETE); } nntp_newsrc_close(nserv); rc = nntp_fetch_headers(ctx, hc, first, nntp_data->last_message, 0); #ifdef USE_HCACHE mutt_hcache_close(hc); #endif if (rc < 0) return -1; nntp_data->last_loaded = nntp_data->last_message; nserv->newsrc_modified = false; return 0; } /** * nntp_msg_open - Implements MxOps::msg_open() */ static int nntp_msg_open(struct Context *ctx, struct Message *msg, int msgno) { struct NntpData *nntp_data = ctx->data; struct Header *hdr = ctx->hdrs[msgno]; char article[16]; /* try to get article from cache */ struct NntpAcache *acache = &nntp_data->acache[hdr->index % NNTP_ACACHE_LEN]; if (acache->path) { if (acache->index == hdr->index) { msg->fp = mutt_file_fopen(acache->path, "r"); if (msg->fp) return 0; } /* clear previous entry */ else { unlink(acache->path); FREE(&acache->path); } } snprintf(article, sizeof(article), "%d", NHDR(hdr)->article_num); msg->fp = mutt_bcache_get(nntp_data->bcache, article); if (msg->fp) { if (NHDR(hdr)->parsed) return 0; } else { char buf[PATH_MAX]; /* don't try to fetch article from removed newsgroup */ if (nntp_data->deleted) return -1; /* create new cache file */ const char *fetch_msg = _("Fetching message..."); mutt_message(fetch_msg); msg->fp = mutt_bcache_put(nntp_data->bcache, article); if (!msg->fp) { mutt_mktemp(buf, sizeof(buf)); acache->path = mutt_str_strdup(buf); acache->index = hdr->index; msg->fp = mutt_file_fopen(acache->path, "w+"); if (!msg->fp) { mutt_perror(acache->path); unlink(acache->path); FREE(&acache->path); return -1; } } /* fetch message to cache file */ snprintf(buf, sizeof(buf), "ARTICLE %s\r\n", NHDR(hdr)->article_num ? article : hdr->env->message_id); const int rc = nntp_fetch_lines(nntp_data, buf, sizeof(buf), fetch_msg, fetch_tempfile, msg->fp); if (rc) { mutt_file_fclose(&msg->fp); if (acache->path) { unlink(acache->path); FREE(&acache->path); } if (rc > 0) { if (mutt_str_strncmp(NHDR(hdr)->article_num ? "423" : "430", buf, 3) == 0) { mutt_error(_("Article %d not found on the server."), NHDR(hdr)->article_num ? article : hdr->env->message_id); } else mutt_error("ARTICLE: %s", buf); } return -1; } if (!acache->path) mutt_bcache_commit(nntp_data->bcache, article); } /* replace envelope with new one * hash elements must be updated because pointers will be changed */ if (ctx->id_hash && hdr->env->message_id) mutt_hash_delete(ctx->id_hash, hdr->env->message_id, hdr); if (ctx->subj_hash && hdr->env->real_subj) mutt_hash_delete(ctx->subj_hash, hdr->env->real_subj, hdr); mutt_env_free(&hdr->env); hdr->env = mutt_rfc822_read_header(msg->fp, hdr, 0, 0); if (ctx->id_hash && hdr->env->message_id) mutt_hash_insert(ctx->id_hash, hdr->env->message_id, hdr); if (ctx->subj_hash && hdr->env->real_subj) mutt_hash_insert(ctx->subj_hash, hdr->env->real_subj, hdr); /* fix content length */ fseek(msg->fp, 0, SEEK_END); hdr->content->length = ftell(msg->fp) - hdr->content->offset; /* this is called in neomutt before the open which fetches the message, * which is probably wrong, but we just call it again here to handle * the problem instead of fixing it */ NHDR(hdr)->parsed = true; mutt_parse_mime_message(ctx, hdr); /* these would normally be updated in mx_update_context(), but the * full headers aren't parsed with overview, so the information wasn't * available then */ if (WithCrypto) hdr->security = crypt_query(hdr->content); rewind(msg->fp); mutt_clear_error(); return 0; } /** * nntp_msg_close - Implements MxOps::msg_close() * * @note May also return EOF Failure, see errno */ static int nntp_msg_close(struct Context *ctx, struct Message *msg) { return mutt_file_fclose(&msg->fp); } /** * nntp_post - Post article * @param msg Message to post * @retval 0 Success * @retval -1 Failure */ int nntp_post(const char *msg) { struct NntpData *nntp_data, nntp_tmp; char buf[LONG_STRING]; if (Context && Context->magic == MUTT_NNTP) nntp_data = Context->data; else { CurrentNewsSrv = nntp_select_server(NewsServer, false); if (!CurrentNewsSrv) return -1; nntp_data = &nntp_tmp; nntp_data->nserv = CurrentNewsSrv; nntp_data->group = NULL; } FILE *fp = mutt_file_fopen(msg, "r"); if (!fp) { mutt_perror(msg); return -1; } mutt_str_strfcpy(buf, "POST\r\n", sizeof(buf)); if (nntp_query(nntp_data, buf, sizeof(buf)) < 0) { mutt_file_fclose(&fp); return -1; } if (buf[0] != '3') { mutt_error(_("Can't post article: %s"), buf); mutt_file_fclose(&fp); return -1; } buf[0] = '.'; buf[1] = '\0'; while (fgets(buf + 1, sizeof(buf) - 2, fp)) { size_t len = strlen(buf); if (buf[len - 1] == '\n') { buf[len - 1] = '\r'; buf[len] = '\n'; len++; buf[len] = '\0'; } if (mutt_socket_send_d(nntp_data->nserv->conn, buf[1] == '.' ? buf : buf + 1, MUTT_SOCK_LOG_HDR) < 0) { mutt_file_fclose(&fp); return nntp_connect_error(nntp_data->nserv); } } mutt_file_fclose(&fp); if ((buf[strlen(buf) - 1] != '\n' && mutt_socket_send_d(nntp_data->nserv->conn, "\r\n", MUTT_SOCK_LOG_HDR) < 0) || mutt_socket_send_d(nntp_data->nserv->conn, ".\r\n", MUTT_SOCK_LOG_HDR) < 0 || mutt_socket_readln(buf, sizeof(buf), nntp_data->nserv->conn) < 0) { return nntp_connect_error(nntp_data->nserv); } if (buf[0] != '2') { mutt_error(_("Can't post article: %s"), buf); return -1; } return 0; } /** * nntp_group_poll - Check newsgroup for new articles * @param nntp_data NNTP server data * @param update_stat Update the stats? * @retval 1 New articles found * @retval 0 No change * @retval -1 Lost connection */ static int nntp_group_poll(struct NntpData *nntp_data, int update_stat) { char buf[LONG_STRING] = ""; anum_t count, first, last; /* use GROUP command to poll newsgroup */ if (nntp_query(nntp_data, buf, sizeof(buf)) < 0) return -1; if (sscanf(buf, "211 " ANUM " " ANUM " " ANUM, &count, &first, &last) != 3) return 0; if (first == nntp_data->first_message && last == nntp_data->last_message) return 0; /* articles have been renumbered */ if (last < nntp_data->last_message) { nntp_data->last_cached = 0; if (nntp_data->newsrc_len) { mutt_mem_realloc(&nntp_data->newsrc_ent, sizeof(struct NewsrcEntry)); nntp_data->newsrc_len = 1; nntp_data->newsrc_ent[0].first = 1; nntp_data->newsrc_ent[0].last = 0; } } nntp_data->first_message = first; nntp_data->last_message = last; if (!update_stat) return 1; /* update counters */ else if (!last || (!nntp_data->newsrc_ent && !nntp_data->last_cached)) nntp_data->unread = count; else nntp_group_unread_stat(nntp_data); return 1; } /** * check_mailbox - Check current newsgroup for new articles * @param ctx Mailbox * @retval #MUTT_REOPENED Articles have been renumbered or removed from server * @retval #MUTT_NEW_MAIL New articles found * @retval 0 No change * @retval -1 Lost connection * * Leave newsrc locked */ static int check_mailbox(struct Context *ctx) { struct NntpData *nntp_data = ctx->data; struct NntpServer *nserv = nntp_data->nserv; time_t now = time(NULL); int rc, ret = 0; void *hc = NULL; if (nserv->check_time + NntpPoll > now) return 0; mutt_message(_("Checking for new messages...")); if (nntp_newsrc_parse(nserv) < 0) return -1; nserv->check_time = now; rc = nntp_group_poll(nntp_data, 0); if (rc < 0) { nntp_newsrc_close(nserv); return -1; } if (rc) nntp_active_save_cache(nserv); /* articles have been renumbered, remove all headers */ if (nntp_data->last_message < nntp_data->last_loaded) { for (int i = 0; i < ctx->msgcount; i++) mutt_header_free(&ctx->hdrs[i]); ctx->msgcount = 0; ctx->tagged = 0; if (nntp_data->last_message < nntp_data->last_loaded) { nntp_data->last_loaded = nntp_data->first_message - 1; if (NntpContext && nntp_data->last_message - nntp_data->last_loaded > NntpContext) nntp_data->last_loaded = nntp_data->last_message - NntpContext; } ret = MUTT_REOPENED; } /* .newsrc has been externally modified */ if (nserv->newsrc_modified) { #ifdef USE_HCACHE unsigned char *messages = NULL; char buf[16]; void *hdata = NULL; struct Header *hdr = NULL; anum_t first = nntp_data->first_message; if (NntpContext && nntp_data->last_message - first + 1 > NntpContext) first = nntp_data->last_message - NntpContext + 1; messages = mutt_mem_calloc(nntp_data->last_loaded - first + 1, sizeof(unsigned char)); hc = nntp_hcache_open(nntp_data); nntp_hcache_update(nntp_data, hc); #endif /* update flags according to .newsrc */ int j = 0; anum_t anum; for (int i = 0; i < ctx->msgcount; i++) { bool flagged = false; anum = NHDR(ctx->hdrs[i])->article_num; #ifdef USE_HCACHE /* check hcache for flagged and deleted flags */ if (hc) { if (anum >= first && anum <= nntp_data->last_loaded) messages[anum - first] = 1; snprintf(buf, sizeof(buf), "%u", anum); hdata = mutt_hcache_fetch(hc, buf, strlen(buf)); if (hdata) { bool deleted; mutt_debug(2, "#1 mutt_hcache_fetch %s\n", buf); hdr = mutt_hcache_restore(hdata); mutt_hcache_free(hc, &hdata); hdr->data = 0; deleted = hdr->deleted; flagged = hdr->flagged; mutt_header_free(&hdr); /* header marked as deleted, removing from context */ if (deleted) { mutt_set_flag(ctx, ctx->hdrs[i], MUTT_TAG, 0); mutt_header_free(&ctx->hdrs[i]); continue; } } } #endif if (!ctx->hdrs[i]->changed) { ctx->hdrs[i]->flagged = flagged; ctx->hdrs[i]->read = false; ctx->hdrs[i]->old = false; nntp_article_status(ctx, ctx->hdrs[i], NULL, anum); if (!ctx->hdrs[i]->read) nntp_parse_xref(ctx, ctx->hdrs[i]); } ctx->hdrs[j++] = ctx->hdrs[i]; } #ifdef USE_HCACHE ctx->msgcount = j; /* restore headers without "deleted" flag */ for (anum = first; anum <= nntp_data->last_loaded; anum++) { if (messages[anum - first]) continue; snprintf(buf, sizeof(buf), "%u", anum); hdata = mutt_hcache_fetch(hc, buf, strlen(buf)); if (hdata) { mutt_debug(2, "#2 mutt_hcache_fetch %s\n", buf); if (ctx->msgcount >= ctx->hdrmax) mx_alloc_memory(ctx); ctx->hdrs[ctx->msgcount] = hdr = mutt_hcache_restore(hdata); mutt_hcache_free(hc, &hdata); hdr->data = 0; if (hdr->deleted) { mutt_header_free(&hdr); if (nntp_data->bcache) { mutt_debug(2, "mutt_bcache_del %s\n", buf); mutt_bcache_del(nntp_data->bcache, buf); } continue; } ctx->msgcount++; hdr->read = false; hdr->old = false; hdr->data = mutt_mem_calloc(1, sizeof(struct NntpHeaderData)); NHDR(hdr)->article_num = anum; nntp_article_status(ctx, hdr, NULL, anum); if (!hdr->read) nntp_parse_xref(ctx, hdr); } } FREE(&messages); #endif nserv->newsrc_modified = false; ret = MUTT_REOPENED; } /* some headers were removed, context must be updated */ if (ret == MUTT_REOPENED) { if (ctx->subj_hash) mutt_hash_destroy(&ctx->subj_hash); if (ctx->id_hash) mutt_hash_destroy(&ctx->id_hash); mutt_clear_threads(ctx); ctx->vcount = 0; ctx->deleted = 0; ctx->new = 0; ctx->unread = 0; ctx->flagged = 0; ctx->changed = false; ctx->id_hash = NULL; ctx->subj_hash = NULL; mx_update_context(ctx, ctx->msgcount); } /* fetch headers of new articles */ if (nntp_data->last_message > nntp_data->last_loaded) { int oldmsgcount = ctx->msgcount; bool quiet = ctx->quiet; ctx->quiet = true; #ifdef USE_HCACHE if (!hc) { hc = nntp_hcache_open(nntp_data); nntp_hcache_update(nntp_data, hc); } #endif rc = nntp_fetch_headers(ctx, hc, nntp_data->last_loaded + 1, nntp_data->last_message, 0); ctx->quiet = quiet; if (rc >= 0) nntp_data->last_loaded = nntp_data->last_message; if (ret == 0 && ctx->msgcount > oldmsgcount) ret = MUTT_NEW_MAIL; } #ifdef USE_HCACHE mutt_hcache_close(hc); #endif if (ret) nntp_newsrc_close(nserv); mutt_clear_error(); return ret; } /** * nntp_mbox_check - Implements MxOps::mbox_check() * @param ctx Mailbox * @param index_hint Current message (UNUSED) * @retval #MUTT_REOPENED Articles have been renumbered or removed from server * @retval #MUTT_NEW_MAIL New articles found * @retval 0 No change * @retval -1 Lost connection */ static int nntp_mbox_check(struct Context *ctx, int *index_hint) { int ret = check_mailbox(ctx); if (ret == 0) { struct NntpData *nntp_data = ctx->data; struct NntpServer *nserv = nntp_data->nserv; nntp_newsrc_close(nserv); } return ret; } /** * nntp_mbox_sync - Implements MxOps::mbox_sync() * * @note May also return values from check_mailbox() */ static int nntp_mbox_sync(struct Context *ctx, int *index_hint) { struct NntpData *nntp_data = ctx->data; int rc; #ifdef USE_HCACHE header_cache_t *hc = NULL; #endif /* check for new articles */ nntp_data->nserv->check_time = 0; rc = check_mailbox(ctx); if (rc) return rc; #ifdef USE_HCACHE nntp_data->last_cached = 0; hc = nntp_hcache_open(nntp_data); #endif for (int i = 0; i < ctx->msgcount; i++) { struct Header *hdr = ctx->hdrs[i]; char buf[16]; snprintf(buf, sizeof(buf), "%d", NHDR(hdr)->article_num); if (nntp_data->bcache && hdr->deleted) { mutt_debug(2, "mutt_bcache_del %s\n", buf); mutt_bcache_del(nntp_data->bcache, buf); } #ifdef USE_HCACHE if (hc && (hdr->changed || hdr->deleted)) { if (hdr->deleted && !hdr->read) nntp_data->unread--; mutt_debug(2, "mutt_hcache_store %s\n", buf); mutt_hcache_store(hc, buf, strlen(buf), hdr, 0); } #endif } #ifdef USE_HCACHE if (hc) { mutt_hcache_close(hc); nntp_data->last_cached = nntp_data->last_loaded; } #endif /* save .newsrc entries */ nntp_newsrc_gen_entries(ctx); nntp_newsrc_update(nntp_data->nserv); nntp_newsrc_close(nntp_data->nserv); return 0; } /** * nntp_mbox_close - Implements MxOps::mbox_close() * @retval 0 Always */ static int nntp_mbox_close(struct Context *ctx) { struct NntpData *nntp_data = ctx->data, *nntp_tmp = NULL; if (!nntp_data) return 0; nntp_data->unread = ctx->unread; nntp_acache_free(nntp_data); if (!nntp_data->nserv || !nntp_data->nserv->groups_hash || !nntp_data->group) return 0; nntp_tmp = mutt_hash_find(nntp_data->nserv->groups_hash, nntp_data->group); if (nntp_tmp == NULL || nntp_tmp != nntp_data) nntp_data_free(nntp_data); return 0; } /** * nntp_date - Get date and time from server * @param nserv NNTP server * @param now Server time * @retval 0 Success * @retval -1 Failure */ static int nntp_date(struct NntpServer *nserv, time_t *now) { if (nserv->hasDATE) { struct NntpData nntp_data; char buf[LONG_STRING]; struct tm tm; memset(&tm, 0, sizeof(tm)); nntp_data.nserv = nserv; nntp_data.group = NULL; mutt_str_strfcpy(buf, "DATE\r\n", sizeof(buf)); if (nntp_query(&nntp_data, buf, sizeof(buf)) < 0) return -1; if (sscanf(buf, "111 %4d%2d%2d%2d%2d%2d%*s", &tm.tm_year, &tm.tm_mon, &tm.tm_mday, &tm.tm_hour, &tm.tm_min, &tm.tm_sec) == 6) { tm.tm_year -= 1900; tm.tm_mon--; *now = timegm(&tm); if (*now >= 0) { mutt_debug(1, "server time is %lu\n", *now); return 0; } } } time(now); return 0; } /** * nntp_active_fetch - Fetch list of all newsgroups from server * @param nserv NNTP server * @param new Mark the groups as new * @retval 0 Success * @retval -1 Failure */ int nntp_active_fetch(struct NntpServer *nserv, bool new) { struct NntpData nntp_data; char msg[STRING]; char buf[LONG_STRING]; unsigned int i; int rc; snprintf(msg, sizeof(msg), _("Loading list of groups from server %s..."), nserv->conn->account.host); mutt_message(msg); if (nntp_date(nserv, &nserv->newgroups_time) < 0) return -1; nntp_data.nserv = nserv; nntp_data.group = NULL; i = nserv->groups_num; mutt_str_strfcpy(buf, "LIST\r\n", sizeof(buf)); rc = nntp_fetch_lines(&nntp_data, buf, sizeof(buf), msg, nntp_add_group, nserv); if (rc) { if (rc > 0) { mutt_error("LIST: %s", buf); } return -1; } if (new) { for (; i < nserv->groups_num; i++) { struct NntpData *data = nserv->groups_list[i]; data->new = true; } } for (i = 0; i < nserv->groups_num; i++) { struct NntpData *data = nserv->groups_list[i]; if (data && data->deleted && !data->newsrc_ent) { nntp_delete_group_cache(data); mutt_hash_delete(nserv->groups_hash, data->group, NULL); nserv->groups_list[i] = NULL; } } if (NntpLoadDescription) rc = get_description(&nntp_data, "*", _("Loading descriptions...")); nntp_active_save_cache(nserv); if (rc < 0) return -1; mutt_clear_error(); return 0; } /** * nntp_check_new_groups - Check for new groups/articles in subscribed groups * @param nserv NNTP server * @retval 1 New groups found * @retval 0 No new groups * @retval -1 Error */ int nntp_check_new_groups(struct NntpServer *nserv) { struct NntpData nntp_data; time_t now; struct tm *tm = NULL; char buf[LONG_STRING]; char *msg = _("Checking for new newsgroups..."); unsigned int i; int rc, update_active = false; if (!nserv || !nserv->newgroups_time) return -1; /* check subscribed newsgroups for new articles */ if (ShowNewNews) { mutt_message(_("Checking for new messages...")); for (i = 0; i < nserv->groups_num; i++) { struct NntpData *data = nserv->groups_list[i]; if (data && data->subscribed) { rc = nntp_group_poll(data, 1); if (rc < 0) return -1; if (rc > 0) update_active = true; } } /* select current newsgroup */ if (Context && Context->magic == MUTT_NNTP) { buf[0] = '\0'; if (nntp_query((struct NntpData *) Context->data, buf, sizeof(buf)) < 0) return -1; } } else if (nserv->newgroups_time) return 0; /* get list of new groups */ mutt_message(msg); if (nntp_date(nserv, &now) < 0) return -1; nntp_data.nserv = nserv; if (Context && Context->magic == MUTT_NNTP) nntp_data.group = ((struct NntpData *) Context->data)->group; else nntp_data.group = NULL; i = nserv->groups_num; tm = gmtime(&nserv->newgroups_time); snprintf(buf, sizeof(buf), "NEWGROUPS %02d%02d%02d %02d%02d%02d GMT\r\n", tm->tm_year % 100, tm->tm_mon + 1, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec); rc = nntp_fetch_lines(&nntp_data, buf, sizeof(buf), msg, nntp_add_group, nserv); if (rc) { if (rc > 0) { mutt_error("NEWGROUPS: %s", buf); } return -1; } /* new groups found */ rc = 0; if (nserv->groups_num != i) { int groups_num = i; nserv->newgroups_time = now; for (; i < nserv->groups_num; i++) { struct NntpData *data = nserv->groups_list[i]; data->new = true; } /* loading descriptions */ if (NntpLoadDescription) { unsigned int count = 0; struct Progress progress; mutt_progress_init(&progress, _("Loading descriptions..."), MUTT_PROGRESS_MSG, ReadInc, nserv->groups_num - i); for (i = groups_num; i < nserv->groups_num; i++) { struct NntpData *data = nserv->groups_list[i]; if (get_description(data, NULL, NULL) < 0) return -1; mutt_progress_update(&progress, ++count, -1); } } update_active = true; rc = 1; } if (update_active) nntp_active_save_cache(nserv); mutt_clear_error(); return rc; } /** * nntp_check_msgid - Fetch article by Message-ID * @param ctx Mailbox * @param msgid Message ID * @retval 0 Success * @retval 1 No such article * @retval -1 Error */ int nntp_check_msgid(struct Context *ctx, const char *msgid) { struct NntpData *nntp_data = ctx->data; char buf[LONG_STRING]; FILE *fp = mutt_file_mkstemp(); if (!fp) { mutt_perror("mutt_file_mkstemp() failed!"); return -1; } snprintf(buf, sizeof(buf), "HEAD %s\r\n", msgid); int rc = nntp_fetch_lines(nntp_data, buf, sizeof(buf), NULL, fetch_tempfile, fp); if (rc) { mutt_file_fclose(&fp); if (rc < 0) return -1; if (mutt_str_strncmp("430", buf, 3) == 0) return 1; mutt_error("HEAD: %s", buf); return -1; } /* parse header */ if (ctx->msgcount == ctx->hdrmax) mx_alloc_memory(ctx); struct Header *hdr = ctx->hdrs[ctx->msgcount] = mutt_header_new(); hdr->data = mutt_mem_calloc(1, sizeof(struct NntpHeaderData)); hdr->env = mutt_rfc822_read_header(fp, hdr, 0, 0); mutt_file_fclose(&fp); /* get article number */ if (hdr->env->xref) nntp_parse_xref(ctx, hdr); else { snprintf(buf, sizeof(buf), "STAT %s\r\n", msgid); if (nntp_query(nntp_data, buf, sizeof(buf)) < 0) { mutt_header_free(&hdr); return -1; } sscanf(buf + 4, ANUM, &NHDR(hdr)->article_num); } /* reset flags */ hdr->read = false; hdr->old = false; hdr->deleted = false; hdr->changed = true; hdr->received = hdr->date_sent; hdr->index = ctx->msgcount++; mx_update_context(ctx, 1); return 0; } /** * struct ChildCtx - Keep track of the children of an article */ struct ChildCtx { struct Context *ctx; unsigned int num; unsigned int max; anum_t *child; }; /** * fetch_children - Parse XPAT line * @param line String to parse * @param data ChildCtx * @retval 0 Always */ static int fetch_children(char *line, void *data) { struct ChildCtx *cc = data; anum_t anum; if (!line || sscanf(line, ANUM, &anum) != 1) return 0; for (unsigned int i = 0; i < cc->ctx->msgcount; i++) if (NHDR(cc->ctx->hdrs[i])->article_num == anum) return 0; if (cc->num >= cc->max) { cc->max *= 2; mutt_mem_realloc(&cc->child, sizeof(anum_t) * cc->max); } cc->child[cc->num++] = anum; return 0; } /** * nntp_check_children - Fetch children of article with the Message-ID * @param ctx Mailbox * @param msgid Message ID to find * @retval 0 Success * @retval -1 Failure */ int nntp_check_children(struct Context *ctx, const char *msgid) { struct NntpData *nntp_data = ctx->data; struct ChildCtx cc; char buf[STRING]; int rc; bool quiet; void *hc = NULL; if (!nntp_data || !nntp_data->nserv) return -1; if (nntp_data->first_message > nntp_data->last_loaded) return 0; /* init context */ cc.ctx = ctx; cc.num = 0; cc.max = 10; cc.child = mutt_mem_malloc(sizeof(anum_t) * cc.max); /* fetch numbers of child messages */ snprintf(buf, sizeof(buf), "XPAT References %u-%u *%s*\r\n", nntp_data->first_message, nntp_data->last_loaded, msgid); rc = nntp_fetch_lines(nntp_data, buf, sizeof(buf), NULL, fetch_children, &cc); if (rc) { FREE(&cc.child); if (rc > 0) { if (mutt_str_strncmp("500", buf, 3) != 0) mutt_error("XPAT: %s", buf); else { mutt_error(_("Unable to find child articles because server does not " "support XPAT command.")); } } return -1; } /* fetch all found messages */ quiet = ctx->quiet; ctx->quiet = true; #ifdef USE_HCACHE hc = nntp_hcache_open(nntp_data); #endif for (int i = 0; i < cc.num; i++) { rc = nntp_fetch_headers(ctx, hc, cc.child[i], cc.child[i], 1); if (rc < 0) break; } #ifdef USE_HCACHE mutt_hcache_close(hc); #endif ctx->quiet = quiet; FREE(&cc.child); return (rc < 0) ? -1 : 0; } // clang-format off /** * struct mx_nntp_ops - Mailbox callback functions for NNTP mailboxes */ struct MxOps mx_nntp_ops = { .mbox_open = nntp_mbox_open, .mbox_open_append = NULL, .mbox_check = nntp_mbox_check, .mbox_sync = nntp_mbox_sync, .mbox_close = nntp_mbox_close, .msg_open = nntp_msg_open, .msg_open_new = NULL, .msg_commit = NULL, .msg_close = nntp_msg_close, .tags_edit = NULL, .tags_commit = NULL, }; // clang-format on
./CrossVul/dataset_final_sorted/CWE-20/c/bad_254_0
crossvul-cpp_data_good_4685_1
/*- * Copyright (c) 2018 Grzegorz Antoniak (http://antoniak.org) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "archive_platform.h" #include "archive_endian.h" #ifdef HAVE_ERRNO_H #include <errno.h> #endif #include <time.h> #ifdef HAVE_ZLIB_H #include <zlib.h> /* crc32 */ #endif #ifdef HAVE_LIMITS_H #include <limits.h> #endif #include "archive.h" #ifndef HAVE_ZLIB_H #include "archive_crc32.h" #endif #include "archive_entry.h" #include "archive_entry_locale.h" #include "archive_ppmd7_private.h" #include "archive_entry_private.h" #ifdef HAVE_BLAKE2_H #include <blake2.h> #else #include "archive_blake2.h" #endif /*#define CHECK_CRC_ON_SOLID_SKIP*/ /*#define DONT_FAIL_ON_CRC_ERROR*/ /*#define DEBUG*/ #define rar5_min(a, b) (((a) > (b)) ? (b) : (a)) #define rar5_max(a, b) (((a) > (b)) ? (a) : (b)) #define rar5_countof(X) ((const ssize_t) (sizeof(X) / sizeof(*X))) #if defined DEBUG #define DEBUG_CODE if(1) #define LOG(...) do { printf("rar5: " __VA_ARGS__); puts(""); } while(0) #else #define DEBUG_CODE if(0) #endif /* Real RAR5 magic number is: * * 0x52, 0x61, 0x72, 0x21, 0x1a, 0x07, 0x01, 0x00 * "Rar!→•☺·\x00" * * It's stored in `rar5_signature` after XOR'ing it with 0xA1, because I don't * want to put this magic sequence in each binary that uses libarchive, so * applications that scan through the file for this marker won't trigger on * this "false" one. * * The array itself is decrypted in `rar5_init` function. */ static unsigned char rar5_signature[] = { 243, 192, 211, 128, 187, 166, 160, 161 }; static const ssize_t rar5_signature_size = sizeof(rar5_signature); static const size_t g_unpack_window_size = 0x20000; /* These could have been static const's, but they aren't, because of * Visual Studio. */ #define MAX_NAME_IN_CHARS 2048 #define MAX_NAME_IN_BYTES (4 * MAX_NAME_IN_CHARS) struct file_header { ssize_t bytes_remaining; ssize_t unpacked_size; int64_t last_offset; /* Used in sanity checks. */ int64_t last_size; /* Used in sanity checks. */ uint8_t solid : 1; /* Is this a solid stream? */ uint8_t service : 1; /* Is this file a service data? */ uint8_t eof : 1; /* Did we finish unpacking the file? */ uint8_t dir : 1; /* Is this file entry a directory? */ /* Optional time fields. */ uint64_t e_mtime; uint64_t e_ctime; uint64_t e_atime; uint32_t e_unix_ns; /* Optional hash fields. */ uint32_t stored_crc32; uint32_t calculated_crc32; uint8_t blake2sp[32]; blake2sp_state b2state; char has_blake2; /* Optional redir fields */ uint64_t redir_type; uint64_t redir_flags; ssize_t solid_window_size; /* Used in file format check. */ }; enum EXTRA { EX_CRYPT = 0x01, EX_HASH = 0x02, EX_HTIME = 0x03, EX_VERSION = 0x04, EX_REDIR = 0x05, EX_UOWNER = 0x06, EX_SUBDATA = 0x07 }; #define REDIR_SYMLINK_IS_DIR 1 enum REDIR_TYPE { REDIR_TYPE_NONE = 0, REDIR_TYPE_UNIXSYMLINK = 1, REDIR_TYPE_WINSYMLINK = 2, REDIR_TYPE_JUNCTION = 3, REDIR_TYPE_HARDLINK = 4, REDIR_TYPE_FILECOPY = 5, }; #define OWNER_USER_NAME 0x01 #define OWNER_GROUP_NAME 0x02 #define OWNER_USER_UID 0x04 #define OWNER_GROUP_GID 0x08 #define OWNER_MAXNAMELEN 256 enum FILTER_TYPE { FILTER_DELTA = 0, /* Generic pattern. */ FILTER_E8 = 1, /* Intel x86 code. */ FILTER_E8E9 = 2, /* Intel x86 code. */ FILTER_ARM = 3, /* ARM code. */ FILTER_AUDIO = 4, /* Audio filter, not used in RARv5. */ FILTER_RGB = 5, /* Color palette, not used in RARv5. */ FILTER_ITANIUM = 6, /* Intel's Itanium, not used in RARv5. */ FILTER_PPM = 7, /* Predictive pattern matching, not used in RARv5. */ FILTER_NONE = 8, }; struct filter_info { int type; int channels; int pos_r; int64_t block_start; ssize_t block_length; uint16_t width; }; struct data_ready { char used; const uint8_t* buf; size_t size; int64_t offset; }; struct cdeque { uint16_t beg_pos; uint16_t end_pos; uint16_t cap_mask; uint16_t size; size_t* arr; }; struct decode_table { uint32_t size; int32_t decode_len[16]; uint32_t decode_pos[16]; uint32_t quick_bits; uint8_t quick_len[1 << 10]; uint16_t quick_num[1 << 10]; uint16_t decode_num[306]; }; struct comp_state { /* Flag used to specify if unpacker needs to reinitialize the uncompression context. */ uint8_t initialized : 1; /* Flag used when applying filters. */ uint8_t all_filters_applied : 1; /* Flag used to skip file context reinitialization, used when unpacker is skipping through different multivolume archives. */ uint8_t switch_multivolume : 1; /* Flag used to specify if unpacker has processed the whole data block or just a part of it. */ uint8_t block_parsing_finished : 1; signed int notused : 4; int flags; /* Uncompression flags. */ int method; /* Uncompression algorithm method. */ int version; /* Uncompression algorithm version. */ ssize_t window_size; /* Size of window_buf. */ uint8_t* window_buf; /* Circular buffer used during decompression. */ uint8_t* filtered_buf; /* Buffer used when applying filters. */ const uint8_t* block_buf; /* Buffer used when merging blocks. */ size_t window_mask; /* Convenience field; window_size - 1. */ int64_t write_ptr; /* This amount of data has been unpacked in the window buffer. */ int64_t last_write_ptr; /* This amount of data has been stored in the output file. */ int64_t last_unstore_ptr; /* Counter of bytes extracted during unstoring. This is separate from last_write_ptr because of how SERVICE base blocks are handled during skipping in solid multiarchive archives. */ int64_t solid_offset; /* Additional offset inside the window buffer, used in unpacking solid archives. */ ssize_t cur_block_size; /* Size of current data block. */ int last_len; /* Flag used in lzss decompression. */ /* Decode tables used during lzss uncompression. */ #define HUFF_BC 20 struct decode_table bd; /* huffman bit lengths */ #define HUFF_NC 306 struct decode_table ld; /* literals */ #define HUFF_DC 64 struct decode_table dd; /* distances */ #define HUFF_LDC 16 struct decode_table ldd; /* lower bits of distances */ #define HUFF_RC 44 struct decode_table rd; /* repeating distances */ #define HUFF_TABLE_SIZE (HUFF_NC + HUFF_DC + HUFF_RC + HUFF_LDC) /* Circular deque for storing filters. */ struct cdeque filters; int64_t last_block_start; /* Used for sanity checking. */ ssize_t last_block_length; /* Used for sanity checking. */ /* Distance cache used during lzss uncompression. */ int dist_cache[4]; /* Data buffer stack. */ struct data_ready dready[2]; }; /* Bit reader state. */ struct bit_reader { int8_t bit_addr; /* Current bit pointer inside current byte. */ int in_addr; /* Current byte pointer. */ }; /* RARv5 block header structure. Use bf_* functions to get values from * block_flags_u8 field. I.e. bf_byte_count, etc. */ struct compressed_block_header { /* block_flags_u8 contain fields encoded in little-endian bitfield: * * - table present flag (shr 7, and 1), * - last block flag (shr 6, and 1), * - byte_count (shr 3, and 7), * - bit_size (shr 0, and 7). */ uint8_t block_flags_u8; uint8_t block_cksum; }; /* RARv5 main header structure. */ struct main_header { /* Does the archive contain solid streams? */ uint8_t solid : 1; /* If this a multi-file archive? */ uint8_t volume : 1; uint8_t endarc : 1; uint8_t notused : 5; unsigned int vol_no; }; struct generic_header { uint8_t split_after : 1; uint8_t split_before : 1; uint8_t padding : 6; int size; int last_header_id; }; struct multivolume { unsigned int expected_vol_no; uint8_t* push_buf; }; /* Main context structure. */ struct rar5 { int header_initialized; /* Set to 1 if current file is positioned AFTER the magic value * of the archive file. This is used in header reading functions. */ int skipped_magic; /* Set to not zero if we're in skip mode (either by calling * rar5_data_skip function or when skipping over solid streams). * Set to 0 when in * extraction mode. This is used during checksum * calculation functions. */ int skip_mode; /* Set to not zero if we're in block merging mode (i.e. when switching * to another file in multivolume archive, last block from 1st archive * needs to be merged with 1st block from 2nd archive). This flag * guards against recursive use of the merging function, which doesn't * support recursive calls. */ int merge_mode; /* An offset to QuickOpen list. This is not supported by this unpacker, * because we're focusing on streaming interface. QuickOpen is designed * to make things quicker for non-stream interfaces, so it's not our * use case. */ uint64_t qlist_offset; /* An offset to additional Recovery data. This is not supported by this * unpacker. Recovery data are additional Reed-Solomon codes that could * be used to calculate bytes that are missing in archive or are * corrupted. */ uint64_t rr_offset; /* Various context variables grouped to different structures. */ struct generic_header generic; struct main_header main; struct comp_state cstate; struct file_header file; struct bit_reader bits; struct multivolume vol; /* The header of currently processed RARv5 block. Used in main * decompression logic loop. */ struct compressed_block_header last_block_hdr; }; /* Forward function declarations. */ static int verify_global_checksums(struct archive_read* a); static int rar5_read_data_skip(struct archive_read *a); static int push_data_ready(struct archive_read* a, struct rar5* rar, const uint8_t* buf, size_t size, int64_t offset); /* CDE_xxx = Circular Double Ended (Queue) return values. */ enum CDE_RETURN_VALUES { CDE_OK, CDE_ALLOC, CDE_PARAM, CDE_OUT_OF_BOUNDS, }; /* Clears the contents of this circular deque. */ static void cdeque_clear(struct cdeque* d) { d->size = 0; d->beg_pos = 0; d->end_pos = 0; } /* Creates a new circular deque object. Capacity must be power of 2: 8, 16, 32, * 64, 256, etc. When the user will add another item above current capacity, * the circular deque will overwrite the oldest entry. */ static int cdeque_init(struct cdeque* d, int max_capacity_power_of_2) { if(d == NULL || max_capacity_power_of_2 == 0) return CDE_PARAM; d->cap_mask = max_capacity_power_of_2 - 1; d->arr = NULL; if((max_capacity_power_of_2 & d->cap_mask) != 0) return CDE_PARAM; cdeque_clear(d); d->arr = malloc(sizeof(void*) * max_capacity_power_of_2); return d->arr ? CDE_OK : CDE_ALLOC; } /* Return the current size (not capacity) of circular deque `d`. */ static size_t cdeque_size(struct cdeque* d) { return d->size; } /* Returns the first element of current circular deque. Note that this function * doesn't perform any bounds checking. If you need bounds checking, use * `cdeque_front()` function instead. */ static void cdeque_front_fast(struct cdeque* d, void** value) { *value = (void*) d->arr[d->beg_pos]; } /* Returns the first element of current circular deque. This function * performs bounds checking. */ static int cdeque_front(struct cdeque* d, void** value) { if(d->size > 0) { cdeque_front_fast(d, value); return CDE_OK; } else return CDE_OUT_OF_BOUNDS; } /* Pushes a new element into the end of this circular deque object. If current * size will exceed capacity, the oldest element will be overwritten. */ static int cdeque_push_back(struct cdeque* d, void* item) { if(d == NULL) return CDE_PARAM; if(d->size == d->cap_mask + 1) return CDE_OUT_OF_BOUNDS; d->arr[d->end_pos] = (size_t) item; d->end_pos = (d->end_pos + 1) & d->cap_mask; d->size++; return CDE_OK; } /* Pops a front element of this circular deque object and returns its value. * This function doesn't perform any bounds checking. */ static void cdeque_pop_front_fast(struct cdeque* d, void** value) { *value = (void*) d->arr[d->beg_pos]; d->beg_pos = (d->beg_pos + 1) & d->cap_mask; d->size--; } /* Pops a front element of this circular deque object and returns its value. * This function performs bounds checking. */ static int cdeque_pop_front(struct cdeque* d, void** value) { if(!d || !value) return CDE_PARAM; if(d->size == 0) return CDE_OUT_OF_BOUNDS; cdeque_pop_front_fast(d, value); return CDE_OK; } /* Convenience function to cast filter_info** to void **. */ static void** cdeque_filter_p(struct filter_info** f) { return (void**) (size_t) f; } /* Convenience function to cast filter_info* to void *. */ static void* cdeque_filter(struct filter_info* f) { return (void**) (size_t) f; } /* Destroys this circular deque object. Deallocates the memory of the * collection buffer, but doesn't deallocate the memory of any pointer passed * to this deque as a value. */ static void cdeque_free(struct cdeque* d) { if(!d) return; if(!d->arr) return; free(d->arr); d->arr = NULL; d->beg_pos = -1; d->end_pos = -1; d->cap_mask = 0; } static inline uint8_t bf_bit_size(const struct compressed_block_header* hdr) { return hdr->block_flags_u8 & 7; } static inline uint8_t bf_byte_count(const struct compressed_block_header* hdr) { return (hdr->block_flags_u8 >> 3) & 7; } static inline uint8_t bf_is_table_present(const struct compressed_block_header* hdr) { return (hdr->block_flags_u8 >> 7) & 1; } static inline struct rar5* get_context(struct archive_read* a) { return (struct rar5*) a->format->data; } /* Convenience functions used by filter implementations. */ static void circular_memcpy(uint8_t* dst, uint8_t* window, const uint64_t mask, int64_t start, int64_t end) { if((start & mask) > (end & mask)) { ssize_t len1 = mask + 1 - (start & mask); ssize_t len2 = end & mask; memcpy(dst, &window[start & mask], len1); memcpy(dst + len1, window, len2); } else { memcpy(dst, &window[start & mask], (size_t) (end - start)); } } static uint32_t read_filter_data(struct rar5* rar, uint32_t offset) { uint8_t linear_buf[4]; circular_memcpy(linear_buf, rar->cstate.window_buf, rar->cstate.window_mask, offset, offset + 4); return archive_le32dec(linear_buf); } static void write_filter_data(struct rar5* rar, uint32_t offset, uint32_t value) { archive_le32enc(&rar->cstate.filtered_buf[offset], value); } /* Allocates a new filter descriptor and adds it to the filter array. */ static struct filter_info* add_new_filter(struct rar5* rar) { struct filter_info* f = (struct filter_info*) calloc(1, sizeof(struct filter_info)); if(!f) { return NULL; } cdeque_push_back(&rar->cstate.filters, cdeque_filter(f)); return f; } static int run_delta_filter(struct rar5* rar, struct filter_info* flt) { int i; ssize_t dest_pos, src_pos = 0; for(i = 0; i < flt->channels; i++) { uint8_t prev_byte = 0; for(dest_pos = i; dest_pos < flt->block_length; dest_pos += flt->channels) { uint8_t byte; byte = rar->cstate.window_buf[ (rar->cstate.solid_offset + flt->block_start + src_pos) & rar->cstate.window_mask]; prev_byte -= byte; rar->cstate.filtered_buf[dest_pos] = prev_byte; src_pos++; } } return ARCHIVE_OK; } static int run_e8e9_filter(struct rar5* rar, struct filter_info* flt, int extended) { const uint32_t file_size = 0x1000000; ssize_t i; circular_memcpy(rar->cstate.filtered_buf, rar->cstate.window_buf, rar->cstate.window_mask, rar->cstate.solid_offset + flt->block_start, rar->cstate.solid_offset + flt->block_start + flt->block_length); for(i = 0; i < flt->block_length - 4;) { uint8_t b = rar->cstate.window_buf[ (rar->cstate.solid_offset + flt->block_start + i++) & rar->cstate.window_mask]; /* * 0xE8 = x86's call <relative_addr_uint32> (function call) * 0xE9 = x86's jmp <relative_addr_uint32> (unconditional jump) */ if(b == 0xE8 || (extended && b == 0xE9)) { uint32_t addr; uint32_t offset = (i + flt->block_start) % file_size; addr = read_filter_data(rar, (uint32_t)(rar->cstate.solid_offset + flt->block_start + i) & rar->cstate.window_mask); if(addr & 0x80000000) { if(((addr + offset) & 0x80000000) == 0) { write_filter_data(rar, (uint32_t)i, addr + file_size); } } else { if((addr - file_size) & 0x80000000) { uint32_t naddr = addr - offset; write_filter_data(rar, (uint32_t)i, naddr); } } i += 4; } } return ARCHIVE_OK; } static int run_arm_filter(struct rar5* rar, struct filter_info* flt) { ssize_t i = 0; uint32_t offset; circular_memcpy(rar->cstate.filtered_buf, rar->cstate.window_buf, rar->cstate.window_mask, rar->cstate.solid_offset + flt->block_start, rar->cstate.solid_offset + flt->block_start + flt->block_length); for(i = 0; i < flt->block_length - 3; i += 4) { uint8_t* b = &rar->cstate.window_buf[ (rar->cstate.solid_offset + flt->block_start + i + 3) & rar->cstate.window_mask]; if(*b == 0xEB) { /* 0xEB = ARM's BL (branch + link) instruction. */ offset = read_filter_data(rar, (rar->cstate.solid_offset + flt->block_start + i) & rar->cstate.window_mask) & 0x00ffffff; offset -= (uint32_t) ((i + flt->block_start) / 4); offset = (offset & 0x00ffffff) | 0xeb000000; write_filter_data(rar, (uint32_t)i, offset); } } return ARCHIVE_OK; } static int run_filter(struct archive_read* a, struct filter_info* flt) { int ret; struct rar5* rar = get_context(a); free(rar->cstate.filtered_buf); rar->cstate.filtered_buf = malloc(flt->block_length); if(!rar->cstate.filtered_buf) { archive_set_error(&a->archive, ENOMEM, "Can't allocate memory for filter data."); return ARCHIVE_FATAL; } switch(flt->type) { case FILTER_DELTA: ret = run_delta_filter(rar, flt); break; case FILTER_E8: /* fallthrough */ case FILTER_E8E9: ret = run_e8e9_filter(rar, flt, flt->type == FILTER_E8E9); break; case FILTER_ARM: ret = run_arm_filter(rar, flt); break; default: archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Unsupported filter type: 0x%x", flt->type); return ARCHIVE_FATAL; } if(ret != ARCHIVE_OK) { /* Filter has failed. */ return ret; } if(ARCHIVE_OK != push_data_ready(a, rar, rar->cstate.filtered_buf, flt->block_length, rar->cstate.last_write_ptr)) { archive_set_error(&a->archive, ARCHIVE_ERRNO_PROGRAMMER, "Stack overflow when submitting unpacked data"); return ARCHIVE_FATAL; } rar->cstate.last_write_ptr += flt->block_length; return ARCHIVE_OK; } /* The `push_data` function submits the selected data range to the user. * Next call of `use_data` will use the pointer, size and offset arguments * that are specified here. These arguments are pushed to the FIFO stack here, * and popped from the stack by the `use_data` function. */ static void push_data(struct archive_read* a, struct rar5* rar, const uint8_t* buf, int64_t idx_begin, int64_t idx_end) { const uint64_t wmask = rar->cstate.window_mask; const ssize_t solid_write_ptr = (rar->cstate.solid_offset + rar->cstate.last_write_ptr) & wmask; idx_begin += rar->cstate.solid_offset; idx_end += rar->cstate.solid_offset; /* Check if our unpacked data is wrapped inside the window circular * buffer. If it's not wrapped, it can be copied out by using * a single memcpy, but when it's wrapped, we need to copy the first * part with one memcpy, and the second part with another memcpy. */ if((idx_begin & wmask) > (idx_end & wmask)) { /* The data is wrapped (begin offset sis bigger than end * offset). */ const ssize_t frag1_size = rar->cstate.window_size - (idx_begin & wmask); const ssize_t frag2_size = idx_end & wmask; /* Copy the first part of the buffer first. */ push_data_ready(a, rar, buf + solid_write_ptr, frag1_size, rar->cstate.last_write_ptr); /* Copy the second part of the buffer. */ push_data_ready(a, rar, buf, frag2_size, rar->cstate.last_write_ptr + frag1_size); rar->cstate.last_write_ptr += frag1_size + frag2_size; } else { /* Data is not wrapped, so we can just use one call to copy the * data. */ push_data_ready(a, rar, buf + solid_write_ptr, (idx_end - idx_begin) & wmask, rar->cstate.last_write_ptr); rar->cstate.last_write_ptr += idx_end - idx_begin; } } /* Convenience function that submits the data to the user. It uses the * unpack window buffer as a source location. */ static void push_window_data(struct archive_read* a, struct rar5* rar, int64_t idx_begin, int64_t idx_end) { push_data(a, rar, rar->cstate.window_buf, idx_begin, idx_end); } static int apply_filters(struct archive_read* a) { struct filter_info* flt; struct rar5* rar = get_context(a); int ret; rar->cstate.all_filters_applied = 0; /* Get the first filter that can be applied to our data. The data * needs to be fully unpacked before the filter can be run. */ if(CDE_OK == cdeque_front(&rar->cstate.filters, cdeque_filter_p(&flt))) { /* Check if our unpacked data fully covers this filter's * range. */ if(rar->cstate.write_ptr > flt->block_start && rar->cstate.write_ptr >= flt->block_start + flt->block_length) { /* Check if we have some data pending to be written * right before the filter's start offset. */ if(rar->cstate.last_write_ptr == flt->block_start) { /* Run the filter specified by descriptor * `flt`. */ ret = run_filter(a, flt); if(ret != ARCHIVE_OK) { /* Filter failure, return error. */ return ret; } /* Filter descriptor won't be needed anymore * after it's used, * so remove it from the * filter list and free its memory. */ (void) cdeque_pop_front(&rar->cstate.filters, cdeque_filter_p(&flt)); free(flt); } else { /* We can't run filters yet, dump the memory * right before the filter. */ push_window_data(a, rar, rar->cstate.last_write_ptr, flt->block_start); } /* Return 'filter applied or not needed' state to the * caller. */ return ARCHIVE_RETRY; } } rar->cstate.all_filters_applied = 1; return ARCHIVE_OK; } static void dist_cache_push(struct rar5* rar, int value) { int* q = rar->cstate.dist_cache; q[3] = q[2]; q[2] = q[1]; q[1] = q[0]; q[0] = value; } static int dist_cache_touch(struct rar5* rar, int idx) { int* q = rar->cstate.dist_cache; int i, dist = q[idx]; for(i = idx; i > 0; i--) q[i] = q[i - 1]; q[0] = dist; return dist; } static void free_filters(struct rar5* rar) { struct cdeque* d = &rar->cstate.filters; /* Free any remaining filters. All filters should be naturally * consumed by the unpacking function, so remaining filters after * unpacking normally mean that unpacking wasn't successful. * But still of course we shouldn't leak memory in such case. */ /* cdeque_size() is a fast operation, so we can use it as a loop * expression. */ while(cdeque_size(d) > 0) { struct filter_info* f = NULL; /* Pop_front will also decrease the collection's size. */ if (CDE_OK == cdeque_pop_front(d, cdeque_filter_p(&f))) free(f); } cdeque_clear(d); /* Also clear out the variables needed for sanity checking. */ rar->cstate.last_block_start = 0; rar->cstate.last_block_length = 0; } static void reset_file_context(struct rar5* rar) { memset(&rar->file, 0, sizeof(rar->file)); blake2sp_init(&rar->file.b2state, 32); if(rar->main.solid) { rar->cstate.solid_offset += rar->cstate.write_ptr; } else { rar->cstate.solid_offset = 0; } rar->cstate.write_ptr = 0; rar->cstate.last_write_ptr = 0; rar->cstate.last_unstore_ptr = 0; rar->file.redir_type = REDIR_TYPE_NONE; rar->file.redir_flags = 0; free_filters(rar); } static inline int get_archive_read(struct archive* a, struct archive_read** ar) { *ar = (struct archive_read*) a; archive_check_magic(a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW, "archive_read_support_format_rar5"); return ARCHIVE_OK; } static int read_ahead(struct archive_read* a, size_t how_many, const uint8_t** ptr) { ssize_t avail = -1; if(!ptr) return 0; *ptr = __archive_read_ahead(a, how_many, &avail); if(*ptr == NULL) { return 0; } return 1; } static int consume(struct archive_read* a, int64_t how_many) { int ret; ret = how_many == __archive_read_consume(a, how_many) ? ARCHIVE_OK : ARCHIVE_FATAL; return ret; } /** * Read a RAR5 variable sized numeric value. This value will be stored in * `pvalue`. The `pvalue_len` argument points to a variable that will receive * the byte count that was consumed in order to decode the `pvalue` value, plus * one. * * pvalue_len is optional and can be NULL. * * NOTE: if `pvalue_len` is NOT NULL, the caller needs to manually consume * the number of bytes that `pvalue_len` value contains. If the `pvalue_len` * is NULL, this consuming operation is done automatically. * * Returns 1 if *pvalue was successfully read. * Returns 0 if there was an error. In this case, *pvalue contains an * invalid value. */ static int read_var(struct archive_read* a, uint64_t* pvalue, uint64_t* pvalue_len) { uint64_t result = 0; size_t shift, i; const uint8_t* p; uint8_t b; /* We will read maximum of 8 bytes. We don't have to handle the * situation to read the RAR5 variable-sized value stored at the end of * the file, because such situation will never happen. */ if(!read_ahead(a, 8, &p)) return 0; for(shift = 0, i = 0; i < 8; i++, shift += 7) { b = p[i]; /* Strip the MSB from the input byte and add the resulting * number to the `result`. */ result += (b & (uint64_t)0x7F) << shift; /* MSB set to 1 means we need to continue decoding process. * MSB set to 0 means we're done. * * This conditional checks for the second case. */ if((b & 0x80) == 0) { if(pvalue) { *pvalue = result; } /* If the caller has passed the `pvalue_len` pointer, * store the number of consumed bytes in it and do NOT * consume those bytes, since the caller has all the * information it needs to perform */ if(pvalue_len) { *pvalue_len = 1 + i; } else { /* If the caller did not provide the * `pvalue_len` pointer, it will not have the * possibility to advance the file pointer, * because it will not know how many bytes it * needs to consume. This is why we handle * such situation here automatically. */ if(ARCHIVE_OK != consume(a, 1 + i)) { return 0; } } /* End of decoding process, return success. */ return 1; } } /* The decoded value takes the maximum number of 8 bytes. * It's a maximum number of bytes, so end decoding process here * even if the first bit of last byte is 1. */ if(pvalue) { *pvalue = result; } if(pvalue_len) { *pvalue_len = 9; } else { if(ARCHIVE_OK != consume(a, 9)) { return 0; } } return 1; } static int read_var_sized(struct archive_read* a, size_t* pvalue, size_t* pvalue_len) { uint64_t v; uint64_t v_size = 0; const int ret = pvalue_len ? read_var(a, &v, &v_size) : read_var(a, &v, NULL); if(ret == 1 && pvalue) { *pvalue = (size_t) v; } if(pvalue_len) { /* Possible data truncation should be safe. */ *pvalue_len = (size_t) v_size; } return ret; } static int read_bits_32(struct rar5* rar, const uint8_t* p, uint32_t* value) { uint32_t bits = ((uint32_t) p[rar->bits.in_addr]) << 24; bits |= p[rar->bits.in_addr + 1] << 16; bits |= p[rar->bits.in_addr + 2] << 8; bits |= p[rar->bits.in_addr + 3]; bits <<= rar->bits.bit_addr; bits |= p[rar->bits.in_addr + 4] >> (8 - rar->bits.bit_addr); *value = bits; return ARCHIVE_OK; } static int read_bits_16(struct rar5* rar, const uint8_t* p, uint16_t* value) { int bits = (int) ((uint32_t) p[rar->bits.in_addr]) << 16; bits |= (int) p[rar->bits.in_addr + 1] << 8; bits |= (int) p[rar->bits.in_addr + 2]; bits >>= (8 - rar->bits.bit_addr); *value = bits & 0xffff; return ARCHIVE_OK; } static void skip_bits(struct rar5* rar, int bits) { const int new_bits = rar->bits.bit_addr + bits; rar->bits.in_addr += new_bits >> 3; rar->bits.bit_addr = new_bits & 7; } /* n = up to 16 */ static int read_consume_bits(struct rar5* rar, const uint8_t* p, int n, int* value) { uint16_t v; int ret, num; if(n == 0 || n > 16) { /* This is a programmer error and should never happen * in runtime. */ return ARCHIVE_FATAL; } ret = read_bits_16(rar, p, &v); if(ret != ARCHIVE_OK) return ret; num = (int) v; num >>= 16 - n; skip_bits(rar, n); if(value) *value = num; return ARCHIVE_OK; } static int read_u32(struct archive_read* a, uint32_t* pvalue) { const uint8_t* p; if(!read_ahead(a, 4, &p)) return 0; *pvalue = archive_le32dec(p); return ARCHIVE_OK == consume(a, 4) ? 1 : 0; } static int read_u64(struct archive_read* a, uint64_t* pvalue) { const uint8_t* p; if(!read_ahead(a, 8, &p)) return 0; *pvalue = archive_le64dec(p); return ARCHIVE_OK == consume(a, 8) ? 1 : 0; } static int bid_standard(struct archive_read* a) { const uint8_t* p; if(!read_ahead(a, rar5_signature_size, &p)) return -1; if(!memcmp(rar5_signature, p, rar5_signature_size)) return 30; return -1; } static int rar5_bid(struct archive_read* a, int best_bid) { int my_bid; if(best_bid > 30) return -1; my_bid = bid_standard(a); if(my_bid > -1) { return my_bid; } return -1; } static int rar5_options(struct archive_read *a, const char *key, const char *val) { (void) a; (void) key; (void) val; /* No options supported in this version. Return the ARCHIVE_WARN code * to signal the options supervisor that the unpacker didn't handle * setting this option. */ return ARCHIVE_WARN; } static void init_header(struct archive_read* a) { a->archive.archive_format = ARCHIVE_FORMAT_RAR_V5; a->archive.archive_format_name = "RAR5"; } static void init_window_mask(struct rar5* rar) { if (rar->cstate.window_size) rar->cstate.window_mask = rar->cstate.window_size - 1; else rar->cstate.window_mask = 0; } enum HEADER_FLAGS { HFL_EXTRA_DATA = 0x0001, HFL_DATA = 0x0002, HFL_SKIP_IF_UNKNOWN = 0x0004, HFL_SPLIT_BEFORE = 0x0008, HFL_SPLIT_AFTER = 0x0010, HFL_CHILD = 0x0020, HFL_INHERITED = 0x0040 }; static int process_main_locator_extra_block(struct archive_read* a, struct rar5* rar) { uint64_t locator_flags; enum LOCATOR_FLAGS { QLIST = 0x01, RECOVERY = 0x02, }; if(!read_var(a, &locator_flags, NULL)) { return ARCHIVE_EOF; } if(locator_flags & QLIST) { if(!read_var(a, &rar->qlist_offset, NULL)) { return ARCHIVE_EOF; } /* qlist is not used */ } if(locator_flags & RECOVERY) { if(!read_var(a, &rar->rr_offset, NULL)) { return ARCHIVE_EOF; } /* rr is not used */ } return ARCHIVE_OK; } static int parse_file_extra_hash(struct archive_read* a, struct rar5* rar, ssize_t* extra_data_size) { size_t hash_type = 0; size_t value_len; enum HASH_TYPE { BLAKE2sp = 0x00 }; if(!read_var_sized(a, &hash_type, &value_len)) return ARCHIVE_EOF; *extra_data_size -= value_len; if(ARCHIVE_OK != consume(a, value_len)) { return ARCHIVE_EOF; } /* The file uses BLAKE2sp checksum algorithm instead of plain old * CRC32. */ if(hash_type == BLAKE2sp) { const uint8_t* p; const int hash_size = sizeof(rar->file.blake2sp); if(!read_ahead(a, hash_size, &p)) return ARCHIVE_EOF; rar->file.has_blake2 = 1; memcpy(&rar->file.blake2sp, p, hash_size); if(ARCHIVE_OK != consume(a, hash_size)) { return ARCHIVE_EOF; } *extra_data_size -= hash_size; } else { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Unsupported hash type (0x%x)", (int) hash_type); return ARCHIVE_FATAL; } return ARCHIVE_OK; } static uint64_t time_win_to_unix(uint64_t win_time) { const size_t ns_in_sec = 10000000; const uint64_t sec_to_unix = 11644473600LL; return win_time / ns_in_sec - sec_to_unix; } static int parse_htime_item(struct archive_read* a, char unix_time, uint64_t* where, ssize_t* extra_data_size) { if(unix_time) { uint32_t time_val; if(!read_u32(a, &time_val)) return ARCHIVE_EOF; *extra_data_size -= 4; *where = (uint64_t) time_val; } else { uint64_t windows_time; if(!read_u64(a, &windows_time)) return ARCHIVE_EOF; *where = time_win_to_unix(windows_time); *extra_data_size -= 8; } return ARCHIVE_OK; } static int parse_file_extra_version(struct archive_read* a, struct archive_entry* e, ssize_t* extra_data_size) { size_t flags = 0; size_t version = 0; size_t value_len = 0; struct archive_string version_string; struct archive_string name_utf8_string; const char* cur_filename; /* Flags are ignored. */ if(!read_var_sized(a, &flags, &value_len)) return ARCHIVE_EOF; *extra_data_size -= value_len; if(ARCHIVE_OK != consume(a, value_len)) return ARCHIVE_EOF; if(!read_var_sized(a, &version, &value_len)) return ARCHIVE_EOF; *extra_data_size -= value_len; if(ARCHIVE_OK != consume(a, value_len)) return ARCHIVE_EOF; /* extra_data_size should be zero here. */ cur_filename = archive_entry_pathname_utf8(e); if(cur_filename == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_PROGRAMMER, "Version entry without file name"); return ARCHIVE_FATAL; } archive_string_init(&version_string); archive_string_init(&name_utf8_string); /* Prepare a ;123 suffix for the filename, where '123' is the version * value of this file. */ archive_string_sprintf(&version_string, ";%zu", version); /* Build the new filename. */ archive_strcat(&name_utf8_string, cur_filename); archive_strcat(&name_utf8_string, version_string.s); /* Apply the new filename into this file's context. */ archive_entry_update_pathname_utf8(e, name_utf8_string.s); /* Free buffers. */ archive_string_free(&version_string); archive_string_free(&name_utf8_string); return ARCHIVE_OK; } static int parse_file_extra_htime(struct archive_read* a, struct archive_entry* e, struct rar5* rar, ssize_t* extra_data_size) { char unix_time = 0; size_t flags = 0; size_t value_len; enum HTIME_FLAGS { IS_UNIX = 0x01, HAS_MTIME = 0x02, HAS_CTIME = 0x04, HAS_ATIME = 0x08, HAS_UNIX_NS = 0x10, }; if(!read_var_sized(a, &flags, &value_len)) return ARCHIVE_EOF; *extra_data_size -= value_len; if(ARCHIVE_OK != consume(a, value_len)) { return ARCHIVE_EOF; } unix_time = flags & IS_UNIX; if(flags & HAS_MTIME) { parse_htime_item(a, unix_time, &rar->file.e_mtime, extra_data_size); archive_entry_set_mtime(e, rar->file.e_mtime, 0); } if(flags & HAS_CTIME) { parse_htime_item(a, unix_time, &rar->file.e_ctime, extra_data_size); archive_entry_set_ctime(e, rar->file.e_ctime, 0); } if(flags & HAS_ATIME) { parse_htime_item(a, unix_time, &rar->file.e_atime, extra_data_size); archive_entry_set_atime(e, rar->file.e_atime, 0); } if(flags & HAS_UNIX_NS) { if(!read_u32(a, &rar->file.e_unix_ns)) return ARCHIVE_EOF; *extra_data_size -= 4; } return ARCHIVE_OK; } static int parse_file_extra_redir(struct archive_read* a, struct archive_entry* e, struct rar5* rar, ssize_t* extra_data_size) { uint64_t value_size = 0; size_t target_size = 0; char target_utf8_buf[MAX_NAME_IN_BYTES]; const uint8_t* p; if(!read_var(a, &rar->file.redir_type, &value_size)) return ARCHIVE_EOF; if(ARCHIVE_OK != consume(a, (int64_t)value_size)) return ARCHIVE_EOF; *extra_data_size -= value_size; if(!read_var(a, &rar->file.redir_flags, &value_size)) return ARCHIVE_EOF; if(ARCHIVE_OK != consume(a, (int64_t)value_size)) return ARCHIVE_EOF; *extra_data_size -= value_size; if(!read_var_sized(a, &target_size, NULL)) return ARCHIVE_EOF; *extra_data_size -= target_size + 1; if(!read_ahead(a, target_size, &p)) return ARCHIVE_EOF; if(target_size > (MAX_NAME_IN_CHARS - 1)) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Link target is too long"); return ARCHIVE_FATAL; } if(target_size == 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "No link target specified"); return ARCHIVE_FATAL; } memcpy(target_utf8_buf, p, target_size); target_utf8_buf[target_size] = 0; if(ARCHIVE_OK != consume(a, (int64_t)target_size)) return ARCHIVE_EOF; switch(rar->file.redir_type) { case REDIR_TYPE_UNIXSYMLINK: case REDIR_TYPE_WINSYMLINK: archive_entry_set_filetype(e, AE_IFLNK); archive_entry_update_symlink_utf8(e, target_utf8_buf); if (rar->file.redir_flags & REDIR_SYMLINK_IS_DIR) { archive_entry_set_symlink_type(e, AE_SYMLINK_TYPE_DIRECTORY); } else { archive_entry_set_symlink_type(e, AE_SYMLINK_TYPE_FILE); } break; case REDIR_TYPE_HARDLINK: archive_entry_set_filetype(e, AE_IFREG); archive_entry_update_hardlink_utf8(e, target_utf8_buf); break; default: /* Unknown redir type, skip it. */ break; } return ARCHIVE_OK; } static int parse_file_extra_owner(struct archive_read* a, struct archive_entry* e, ssize_t* extra_data_size) { uint64_t flags = 0; uint64_t value_size = 0; uint64_t id = 0; size_t name_len = 0; size_t name_size = 0; char namebuf[OWNER_MAXNAMELEN]; const uint8_t* p; if(!read_var(a, &flags, &value_size)) return ARCHIVE_EOF; if(ARCHIVE_OK != consume(a, (int64_t)value_size)) return ARCHIVE_EOF; *extra_data_size -= value_size; if ((flags & OWNER_USER_NAME) != 0) { if(!read_var_sized(a, &name_size, NULL)) return ARCHIVE_EOF; *extra_data_size -= name_size + 1; if(!read_ahead(a, name_size, &p)) return ARCHIVE_EOF; if (name_size >= OWNER_MAXNAMELEN) { name_len = OWNER_MAXNAMELEN - 1; } else { name_len = name_size; } memcpy(namebuf, p, name_len); namebuf[name_len] = 0; if(ARCHIVE_OK != consume(a, (int64_t)name_size)) return ARCHIVE_EOF; archive_entry_set_uname(e, namebuf); } if ((flags & OWNER_GROUP_NAME) != 0) { if(!read_var_sized(a, &name_size, NULL)) return ARCHIVE_EOF; *extra_data_size -= name_size + 1; if(!read_ahead(a, name_size, &p)) return ARCHIVE_EOF; if (name_size >= OWNER_MAXNAMELEN) { name_len = OWNER_MAXNAMELEN - 1; } else { name_len = name_size; } memcpy(namebuf, p, name_len); namebuf[name_len] = 0; if(ARCHIVE_OK != consume(a, (int64_t)name_size)) return ARCHIVE_EOF; archive_entry_set_gname(e, namebuf); } if ((flags & OWNER_USER_UID) != 0) { if(!read_var(a, &id, &value_size)) return ARCHIVE_EOF; if(ARCHIVE_OK != consume(a, (int64_t)value_size)) return ARCHIVE_EOF; *extra_data_size -= value_size; archive_entry_set_uid(e, (la_int64_t)id); } if ((flags & OWNER_GROUP_GID) != 0) { if(!read_var(a, &id, &value_size)) return ARCHIVE_EOF; if(ARCHIVE_OK != consume(a, (int64_t)value_size)) return ARCHIVE_EOF; *extra_data_size -= value_size; archive_entry_set_gid(e, (la_int64_t)id); } return ARCHIVE_OK; } static int process_head_file_extra(struct archive_read* a, struct archive_entry* e, struct rar5* rar, ssize_t extra_data_size) { size_t extra_field_size; size_t extra_field_id = 0; int ret = ARCHIVE_FATAL; size_t var_size; while(extra_data_size > 0) { if(!read_var_sized(a, &extra_field_size, &var_size)) return ARCHIVE_EOF; extra_data_size -= var_size; if(ARCHIVE_OK != consume(a, var_size)) { return ARCHIVE_EOF; } if(!read_var_sized(a, &extra_field_id, &var_size)) return ARCHIVE_EOF; extra_data_size -= var_size; if(ARCHIVE_OK != consume(a, var_size)) { return ARCHIVE_EOF; } switch(extra_field_id) { case EX_HASH: ret = parse_file_extra_hash(a, rar, &extra_data_size); break; case EX_HTIME: ret = parse_file_extra_htime(a, e, rar, &extra_data_size); break; case EX_REDIR: ret = parse_file_extra_redir(a, e, rar, &extra_data_size); break; case EX_UOWNER: ret = parse_file_extra_owner(a, e, &extra_data_size); break; case EX_VERSION: ret = parse_file_extra_version(a, e, &extra_data_size); break; case EX_CRYPT: /* fallthrough */ case EX_SUBDATA: /* fallthrough */ default: /* Skip unsupported entry. */ return consume(a, extra_data_size); } } if(ret != ARCHIVE_OK) { /* Attribute not implemented. */ return ret; } return ARCHIVE_OK; } static int process_head_file(struct archive_read* a, struct rar5* rar, struct archive_entry* entry, size_t block_flags) { ssize_t extra_data_size = 0; size_t data_size = 0; size_t file_flags = 0; size_t file_attr = 0; size_t compression_info = 0; size_t host_os = 0; size_t name_size = 0; uint64_t unpacked_size, window_size; uint32_t mtime = 0, crc = 0; int c_method = 0, c_version = 0; char name_utf8_buf[MAX_NAME_IN_BYTES]; const uint8_t* p; enum FILE_FLAGS { DIRECTORY = 0x0001, UTIME = 0x0002, CRC32 = 0x0004, UNKNOWN_UNPACKED_SIZE = 0x0008, }; enum FILE_ATTRS { ATTR_READONLY = 0x1, ATTR_HIDDEN = 0x2, ATTR_SYSTEM = 0x4, ATTR_DIRECTORY = 0x10, }; enum COMP_INFO_FLAGS { SOLID = 0x0040, }; enum HOST_OS { HOST_WINDOWS = 0, HOST_UNIX = 1, }; archive_entry_clear(entry); /* Do not reset file context if we're switching archives. */ if(!rar->cstate.switch_multivolume) { reset_file_context(rar); } if(block_flags & HFL_EXTRA_DATA) { size_t edata_size = 0; if(!read_var_sized(a, &edata_size, NULL)) return ARCHIVE_EOF; /* Intentional type cast from unsigned to signed. */ extra_data_size = (ssize_t) edata_size; } if(block_flags & HFL_DATA) { if(!read_var_sized(a, &data_size, NULL)) return ARCHIVE_EOF; rar->file.bytes_remaining = data_size; } else { rar->file.bytes_remaining = 0; archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "no data found in file/service block"); return ARCHIVE_FATAL; } if(!read_var_sized(a, &file_flags, NULL)) return ARCHIVE_EOF; if(!read_var(a, &unpacked_size, NULL)) return ARCHIVE_EOF; if(file_flags & UNKNOWN_UNPACKED_SIZE) { archive_set_error(&a->archive, ARCHIVE_ERRNO_PROGRAMMER, "Files with unknown unpacked size are not supported"); return ARCHIVE_FATAL; } rar->file.dir = (uint8_t) ((file_flags & DIRECTORY) > 0); if(!read_var_sized(a, &file_attr, NULL)) return ARCHIVE_EOF; if(file_flags & UTIME) { if(!read_u32(a, &mtime)) return ARCHIVE_EOF; } if(file_flags & CRC32) { if(!read_u32(a, &crc)) return ARCHIVE_EOF; } if(!read_var_sized(a, &compression_info, NULL)) return ARCHIVE_EOF; c_method = (int) (compression_info >> 7) & 0x7; c_version = (int) (compression_info & 0x3f); /* RAR5 seems to limit the dictionary size to 64MB. */ window_size = (rar->file.dir > 0) ? 0 : g_unpack_window_size << ((compression_info >> 10) & 15); rar->cstate.method = c_method; rar->cstate.version = c_version + 50; rar->file.solid = (compression_info & SOLID) > 0; /* Archives which declare solid files without initializing the window * buffer first are invalid. */ if(rar->file.solid > 0 && rar->cstate.window_buf == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Declared solid file, but no window buffer " "initialized yet."); return ARCHIVE_FATAL; } /* Check if window_size is a sane value. Also, if the file is not * declared as a directory, disallow window_size == 0. */ if(window_size > (64 * 1024 * 1024) || (rar->file.dir == 0 && window_size == 0)) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Declared dictionary size is not supported."); return ARCHIVE_FATAL; } if(rar->file.solid > 0) { /* Re-check if current window size is the same as previous * window size (for solid files only). */ if(rar->file.solid_window_size > 0 && rar->file.solid_window_size != (ssize_t) window_size) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Window size for this solid file doesn't match " "the window size used in previous solid file. "); return ARCHIVE_FATAL; } } /* If we're currently switching volumes, ignore the new definition of * window_size. */ if(rar->cstate.switch_multivolume == 0) { /* Values up to 64M should fit into ssize_t on every * architecture. */ rar->cstate.window_size = (ssize_t) window_size; } if(rar->file.solid > 0 && rar->file.solid_window_size == 0) { /* Solid files have to have the same window_size across whole archive. Remember the window_size parameter for first solid file found. */ rar->file.solid_window_size = rar->cstate.window_size; } init_window_mask(rar); rar->file.service = 0; if(!read_var_sized(a, &host_os, NULL)) return ARCHIVE_EOF; if(host_os == HOST_WINDOWS) { /* Host OS is Windows */ __LA_MODE_T mode; if(file_attr & ATTR_DIRECTORY) { if (file_attr & ATTR_READONLY) { mode = 0555 | AE_IFDIR; } else { mode = 0755 | AE_IFDIR; } } else { if (file_attr & ATTR_READONLY) { mode = 0444 | AE_IFREG; } else { mode = 0644 | AE_IFREG; } } archive_entry_set_mode(entry, mode); if (file_attr & (ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM)) { char *fflags_text, *ptr; /* allocate for "rdonly,hidden,system," */ fflags_text = malloc(22 * sizeof(char)); if (fflags_text != NULL) { ptr = fflags_text; if (file_attr & ATTR_READONLY) { strcpy(ptr, "rdonly,"); ptr = ptr + 7; } if (file_attr & ATTR_HIDDEN) { strcpy(ptr, "hidden,"); ptr = ptr + 7; } if (file_attr & ATTR_SYSTEM) { strcpy(ptr, "system,"); ptr = ptr + 7; } if (ptr > fflags_text) { /* Delete trailing comma */ *(ptr - 1) = '\0'; archive_entry_copy_fflags_text(entry, fflags_text); } free(fflags_text); } } } else if(host_os == HOST_UNIX) { /* Host OS is Unix */ archive_entry_set_mode(entry, (__LA_MODE_T) file_attr); } else { /* Unknown host OS */ archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Unsupported Host OS: 0x%x", (int) host_os); return ARCHIVE_FATAL; } if(!read_var_sized(a, &name_size, NULL)) return ARCHIVE_EOF; if(!read_ahead(a, name_size, &p)) return ARCHIVE_EOF; if(name_size > (MAX_NAME_IN_CHARS - 1)) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Filename is too long"); return ARCHIVE_FATAL; } if(name_size == 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "No filename specified"); return ARCHIVE_FATAL; } memcpy(name_utf8_buf, p, name_size); name_utf8_buf[name_size] = 0; if(ARCHIVE_OK != consume(a, name_size)) { return ARCHIVE_EOF; } archive_entry_update_pathname_utf8(entry, name_utf8_buf); if(extra_data_size > 0) { int ret = process_head_file_extra(a, entry, rar, extra_data_size); /* * TODO: rewrite or remove useless sanity check * as extra_data_size is not passed as a pointer * if(extra_data_size < 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_PROGRAMMER, "File extra data size is not zero"); return ARCHIVE_FATAL; } */ if(ret != ARCHIVE_OK) return ret; } if((file_flags & UNKNOWN_UNPACKED_SIZE) == 0) { rar->file.unpacked_size = (ssize_t) unpacked_size; if(rar->file.redir_type == REDIR_TYPE_NONE) archive_entry_set_size(entry, unpacked_size); } if(file_flags & UTIME) { archive_entry_set_mtime(entry, (time_t) mtime, 0); } if(file_flags & CRC32) { rar->file.stored_crc32 = crc; } if(!rar->cstate.switch_multivolume) { /* Do not reinitialize unpacking state if we're switching * archives. */ rar->cstate.block_parsing_finished = 1; rar->cstate.all_filters_applied = 1; rar->cstate.initialized = 0; } if(rar->generic.split_before > 0) { /* If now we're standing on a header that has a 'split before' * mark, it means we're standing on a 'continuation' file * header. Signal the caller that if it wants to move to * another file, it must call rar5_read_header() function * again. */ return ARCHIVE_RETRY; } else { return ARCHIVE_OK; } } static int process_head_service(struct archive_read* a, struct rar5* rar, struct archive_entry* entry, size_t block_flags) { /* Process this SERVICE block the same way as FILE blocks. */ int ret = process_head_file(a, rar, entry, block_flags); if(ret != ARCHIVE_OK) return ret; rar->file.service = 1; /* But skip the data part automatically. It's no use for the user * anyway. It contains only service data, not even needed to * properly unpack the file. */ ret = rar5_read_data_skip(a); if(ret != ARCHIVE_OK) return ret; /* After skipping, try parsing another block automatically. */ return ARCHIVE_RETRY; } static int process_head_main(struct archive_read* a, struct rar5* rar, struct archive_entry* entry, size_t block_flags) { int ret; size_t extra_data_size = 0; size_t extra_field_size = 0; size_t extra_field_id = 0; size_t archive_flags = 0; enum MAIN_FLAGS { VOLUME = 0x0001, /* multi-volume archive */ VOLUME_NUMBER = 0x0002, /* volume number, first vol doesn't * have it */ SOLID = 0x0004, /* solid archive */ PROTECT = 0x0008, /* contains Recovery info */ LOCK = 0x0010, /* readonly flag, not used */ }; enum MAIN_EXTRA { // Just one attribute here. LOCATOR = 0x01, }; (void) entry; if(block_flags & HFL_EXTRA_DATA) { if(!read_var_sized(a, &extra_data_size, NULL)) return ARCHIVE_EOF; } else { extra_data_size = 0; } if(!read_var_sized(a, &archive_flags, NULL)) { return ARCHIVE_EOF; } rar->main.volume = (archive_flags & VOLUME) > 0; rar->main.solid = (archive_flags & SOLID) > 0; if(archive_flags & VOLUME_NUMBER) { size_t v = 0; if(!read_var_sized(a, &v, NULL)) { return ARCHIVE_EOF; } if (v > UINT_MAX) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid volume number"); return ARCHIVE_FATAL; } rar->main.vol_no = (unsigned int) v; } else { rar->main.vol_no = 0; } if(rar->vol.expected_vol_no > 0 && rar->main.vol_no != rar->vol.expected_vol_no) { /* Returning EOF instead of FATAL because of strange * libarchive behavior. When opening multiple files via * archive_read_open_filenames(), after reading up the whole * last file, the __archive_read_ahead function wraps up to * the first archive instead of returning EOF. */ return ARCHIVE_EOF; } if(extra_data_size == 0) { /* Early return. */ return ARCHIVE_OK; } if(!read_var_sized(a, &extra_field_size, NULL)) { return ARCHIVE_EOF; } if(!read_var_sized(a, &extra_field_id, NULL)) { return ARCHIVE_EOF; } if(extra_field_size == 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid extra field size"); return ARCHIVE_FATAL; } switch(extra_field_id) { case LOCATOR: ret = process_main_locator_extra_block(a, rar); if(ret != ARCHIVE_OK) { /* Error while parsing main locator extra * block. */ return ret; } break; default: archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Unsupported extra type (0x%x)", (int) extra_field_id); return ARCHIVE_FATAL; } return ARCHIVE_OK; } static int skip_unprocessed_bytes(struct archive_read* a) { struct rar5* rar = get_context(a); int ret; if(rar->file.bytes_remaining) { /* Use different skipping method in block merging mode than in * normal mode. If merge mode is active, rar5_read_data_skip * can't be used, because it could allow recursive use of * merge_block() * function, and this function doesn't support * recursive use. */ if(rar->merge_mode) { /* Discard whole merged block. This is valid in solid * mode as well, because the code will discard blocks * only if those blocks are safe to discard (i.e. * they're not FILE blocks). */ ret = consume(a, rar->file.bytes_remaining); if(ret != ARCHIVE_OK) { return ret; } rar->file.bytes_remaining = 0; } else { /* If we're not in merge mode, use safe skipping code. * This will ensure we'll handle solid archives * properly. */ ret = rar5_read_data_skip(a); if(ret != ARCHIVE_OK) { return ret; } } } return ARCHIVE_OK; } static int scan_for_signature(struct archive_read* a); /* Base block processing function. A 'base block' is a RARv5 header block * that tells the reader what kind of data is stored inside the block. * * From the birds-eye view a RAR file looks file this: * * <magic><base_block_1><base_block_2>...<base_block_n> * * There are a few types of base blocks. Those types are specified inside * the 'switch' statement in this function. For example purposes, I'll write * how a standard RARv5 file could look like here: * * <magic><MAIN><FILE><FILE><FILE><SERVICE><ENDARC> * * The structure above could describe an archive file with 3 files in it, * one service "QuickOpen" block (that is ignored by this parser), and an * end of file base block marker. * * If the file is stored in multiple archive files ("multiarchive"), it might * look like this: * * .part01.rar: <magic><MAIN><FILE><ENDARC> * .part02.rar: <magic><MAIN><FILE><ENDARC> * .part03.rar: <magic><MAIN><FILE><ENDARC> * * This example could describe 3 RAR files that contain ONE archived file. * Or it could describe 3 RAR files that contain 3 different files. Or 3 * RAR files than contain 2 files. It all depends what metadata is stored in * the headers of <FILE> blocks. * * Each <FILE> block contains info about its size, the name of the file it's * storing inside, and whether this FILE block is a continuation block of * previous archive ('split before'), and is this FILE block should be * continued in another archive ('split after'). By parsing the 'split before' * and 'split after' flags, we're able to tell if multiple <FILE> base blocks * are describing one file, or multiple files (with the same filename, for * example). * * One thing to note is that if we're parsing the first <FILE> block, and * we see 'split after' flag, then we need to jump over to another <FILE> * block to be able to decompress rest of the data. To do this, we need * to skip the <ENDARC> block, then switch to another file, then skip the * <magic> block, <MAIN> block, and then we're standing on the proper * <FILE> block. */ static int process_base_block(struct archive_read* a, struct archive_entry* entry) { const size_t SMALLEST_RAR5_BLOCK_SIZE = 3; struct rar5* rar = get_context(a); uint32_t hdr_crc, computed_crc; size_t raw_hdr_size = 0, hdr_size_len, hdr_size; size_t header_id = 0; size_t header_flags = 0; const uint8_t* p; int ret; enum HEADER_TYPE { HEAD_MARK = 0x00, HEAD_MAIN = 0x01, HEAD_FILE = 0x02, HEAD_SERVICE = 0x03, HEAD_CRYPT = 0x04, HEAD_ENDARC = 0x05, HEAD_UNKNOWN = 0xff, }; /* Skip any unprocessed data for this file. */ ret = skip_unprocessed_bytes(a); if(ret != ARCHIVE_OK) return ret; /* Read the expected CRC32 checksum. */ if(!read_u32(a, &hdr_crc)) { return ARCHIVE_EOF; } /* Read header size. */ if(!read_var_sized(a, &raw_hdr_size, &hdr_size_len)) { return ARCHIVE_EOF; } hdr_size = raw_hdr_size + hdr_size_len; /* Sanity check, maximum header size for RAR5 is 2MB. */ if(hdr_size > (2 * 1024 * 1024)) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Base block header is too large"); return ARCHIVE_FATAL; } /* Additional sanity checks to weed out invalid files. */ if(raw_hdr_size == 0 || hdr_size_len == 0 || hdr_size < SMALLEST_RAR5_BLOCK_SIZE) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Too small block encountered (%ld bytes)", raw_hdr_size); return ARCHIVE_FATAL; } /* Read the whole header data into memory, maximum memory use here is * 2MB. */ if(!read_ahead(a, hdr_size, &p)) { return ARCHIVE_EOF; } /* Verify the CRC32 of the header data. */ computed_crc = (uint32_t) crc32(0, p, (int) hdr_size); if(computed_crc != hdr_crc) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Header CRC error"); return ARCHIVE_FATAL; } /* If the checksum is OK, we proceed with parsing. */ if(ARCHIVE_OK != consume(a, hdr_size_len)) { return ARCHIVE_EOF; } if(!read_var_sized(a, &header_id, NULL)) return ARCHIVE_EOF; if(!read_var_sized(a, &header_flags, NULL)) return ARCHIVE_EOF; rar->generic.split_after = (header_flags & HFL_SPLIT_AFTER) > 0; rar->generic.split_before = (header_flags & HFL_SPLIT_BEFORE) > 0; rar->generic.size = (int)hdr_size; rar->generic.last_header_id = (int)header_id; rar->main.endarc = 0; /* Those are possible header ids in RARv5. */ switch(header_id) { case HEAD_MAIN: ret = process_head_main(a, rar, entry, header_flags); /* Main header doesn't have any files in it, so it's * pointless to return to the caller. Retry to next * header, which should be HEAD_FILE/HEAD_SERVICE. */ if(ret == ARCHIVE_OK) return ARCHIVE_RETRY; return ret; case HEAD_SERVICE: ret = process_head_service(a, rar, entry, header_flags); return ret; case HEAD_FILE: ret = process_head_file(a, rar, entry, header_flags); return ret; case HEAD_CRYPT: archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Encryption is not supported"); return ARCHIVE_FATAL; case HEAD_ENDARC: rar->main.endarc = 1; /* After encountering an end of file marker, we need * to take into consideration if this archive is * continued in another file (i.e. is it part01.rar: * is there a part02.rar?) */ if(rar->main.volume) { /* In case there is part02.rar, position the * read pointer in a proper place, so we can * resume parsing. */ ret = scan_for_signature(a); if(ret == ARCHIVE_FATAL) { return ARCHIVE_EOF; } else { if(rar->vol.expected_vol_no == UINT_MAX) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Header error"); return ARCHIVE_FATAL; } rar->vol.expected_vol_no = rar->main.vol_no + 1; return ARCHIVE_OK; } } else { return ARCHIVE_EOF; } case HEAD_MARK: return ARCHIVE_EOF; default: if((header_flags & HFL_SKIP_IF_UNKNOWN) == 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Header type error"); return ARCHIVE_FATAL; } else { /* If the block is marked as 'skip if unknown', * do as the flag says: skip the block * instead on failing on it. */ return ARCHIVE_RETRY; } } #if !defined WIN32 // Not reached. archive_set_error(&a->archive, ARCHIVE_ERRNO_PROGRAMMER, "Internal unpacker error"); return ARCHIVE_FATAL; #endif } static int skip_base_block(struct archive_read* a) { int ret; struct rar5* rar = get_context(a); /* Create a new local archive_entry structure that will be operated on * by header reader; operations on this archive_entry will be discarded. */ struct archive_entry* entry = archive_entry_new(); ret = process_base_block(a, entry); /* Discard operations on this archive_entry structure. */ archive_entry_free(entry); if(ret == ARCHIVE_FATAL) return ret; if(rar->generic.last_header_id == 2 && rar->generic.split_before > 0) return ARCHIVE_OK; if(ret == ARCHIVE_OK) return ARCHIVE_RETRY; else return ret; } static int rar5_read_header(struct archive_read *a, struct archive_entry *entry) { struct rar5* rar = get_context(a); int ret; if(rar->header_initialized == 0) { init_header(a); rar->header_initialized = 1; } if(rar->skipped_magic == 0) { if(ARCHIVE_OK != consume(a, rar5_signature_size)) { return ARCHIVE_EOF; } rar->skipped_magic = 1; } do { ret = process_base_block(a, entry); } while(ret == ARCHIVE_RETRY || (rar->main.endarc > 0 && ret == ARCHIVE_OK)); return ret; } static void init_unpack(struct rar5* rar) { rar->file.calculated_crc32 = 0; init_window_mask(rar); free(rar->cstate.window_buf); free(rar->cstate.filtered_buf); if(rar->cstate.window_size > 0) { rar->cstate.window_buf = calloc(1, rar->cstate.window_size); rar->cstate.filtered_buf = calloc(1, rar->cstate.window_size); } else { rar->cstate.window_buf = NULL; rar->cstate.filtered_buf = NULL; } rar->cstate.write_ptr = 0; rar->cstate.last_write_ptr = 0; memset(&rar->cstate.bd, 0, sizeof(rar->cstate.bd)); memset(&rar->cstate.ld, 0, sizeof(rar->cstate.ld)); memset(&rar->cstate.dd, 0, sizeof(rar->cstate.dd)); memset(&rar->cstate.ldd, 0, sizeof(rar->cstate.ldd)); memset(&rar->cstate.rd, 0, sizeof(rar->cstate.rd)); } static void update_crc(struct rar5* rar, const uint8_t* p, size_t to_read) { int verify_crc; if(rar->skip_mode) { #if defined CHECK_CRC_ON_SOLID_SKIP verify_crc = 1; #else verify_crc = 0; #endif } else verify_crc = 1; if(verify_crc) { /* Don't update CRC32 if the file doesn't have the * `stored_crc32` info filled in. */ if(rar->file.stored_crc32 > 0) { rar->file.calculated_crc32 = crc32(rar->file.calculated_crc32, p, to_read); } /* Check if the file uses an optional BLAKE2sp checksum * algorithm. */ if(rar->file.has_blake2 > 0) { /* Return value of the `update` function is always 0, * so we can explicitly ignore it here. */ (void) blake2sp_update(&rar->file.b2state, p, to_read); } } } static int create_decode_tables(uint8_t* bit_length, struct decode_table* table, int size) { int code, upper_limit = 0, i, lc[16]; uint32_t decode_pos_clone[rar5_countof(table->decode_pos)]; ssize_t cur_len, quick_data_size; memset(&lc, 0, sizeof(lc)); memset(table->decode_num, 0, sizeof(table->decode_num)); table->size = size; table->quick_bits = size == HUFF_NC ? 10 : 7; for(i = 0; i < size; i++) { lc[bit_length[i] & 15]++; } lc[0] = 0; table->decode_pos[0] = 0; table->decode_len[0] = 0; for(i = 1; i < 16; i++) { upper_limit += lc[i]; table->decode_len[i] = upper_limit << (16 - i); table->decode_pos[i] = table->decode_pos[i - 1] + lc[i - 1]; upper_limit <<= 1; } memcpy(decode_pos_clone, table->decode_pos, sizeof(decode_pos_clone)); for(i = 0; i < size; i++) { uint8_t clen = bit_length[i] & 15; if(clen > 0) { int last_pos = decode_pos_clone[clen]; table->decode_num[last_pos] = i; decode_pos_clone[clen]++; } } quick_data_size = (int64_t)1 << table->quick_bits; cur_len = 1; for(code = 0; code < quick_data_size; code++) { int bit_field = code << (16 - table->quick_bits); int dist, pos; while(cur_len < rar5_countof(table->decode_len) && bit_field >= table->decode_len[cur_len]) { cur_len++; } table->quick_len[code] = (uint8_t) cur_len; dist = bit_field - table->decode_len[cur_len - 1]; dist >>= (16 - cur_len); pos = table->decode_pos[cur_len & 15] + dist; if(cur_len < rar5_countof(table->decode_pos) && pos < size) { table->quick_num[code] = table->decode_num[pos]; } else { table->quick_num[code] = 0; } } return ARCHIVE_OK; } static int decode_number(struct archive_read* a, struct decode_table* table, const uint8_t* p, uint16_t* num) { int i, bits, dist; uint16_t bitfield; uint32_t pos; struct rar5* rar = get_context(a); if(ARCHIVE_OK != read_bits_16(rar, p, &bitfield)) { return ARCHIVE_EOF; } bitfield &= 0xfffe; if(bitfield < table->decode_len[table->quick_bits]) { int code = bitfield >> (16 - table->quick_bits); skip_bits(rar, table->quick_len[code]); *num = table->quick_num[code]; return ARCHIVE_OK; } bits = 15; for(i = table->quick_bits + 1; i < 15; i++) { if(bitfield < table->decode_len[i]) { bits = i; break; } } skip_bits(rar, bits); dist = bitfield - table->decode_len[bits - 1]; dist >>= (16 - bits); pos = table->decode_pos[bits] + dist; if(pos >= table->size) pos = 0; *num = table->decode_num[pos]; return ARCHIVE_OK; } /* Reads and parses Huffman tables from the beginning of the block. */ static int parse_tables(struct archive_read* a, struct rar5* rar, const uint8_t* p) { int ret, value, i, w, idx = 0; uint8_t bit_length[HUFF_BC], table[HUFF_TABLE_SIZE], nibble_mask = 0xF0, nibble_shift = 4; enum { ESCAPE = 15 }; /* The data for table generation is compressed using a simple RLE-like * algorithm when storing zeroes, so we need to unpack it first. */ for(w = 0, i = 0; w < HUFF_BC;) { if(i >= rar->cstate.cur_block_size) { /* Truncated data, can't continue. */ archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated data in huffman tables"); return ARCHIVE_FATAL; } value = (p[i] & nibble_mask) >> nibble_shift; if(nibble_mask == 0x0F) ++i; nibble_mask ^= 0xFF; nibble_shift ^= 4; /* Values smaller than 15 is data, so we write it directly. * Value 15 is a flag telling us that we need to unpack more * bytes. */ if(value == ESCAPE) { value = (p[i] & nibble_mask) >> nibble_shift; if(nibble_mask == 0x0F) ++i; nibble_mask ^= 0xFF; nibble_shift ^= 4; if(value == 0) { /* We sometimes need to write the actual value * of 15, so this case handles that. */ bit_length[w++] = ESCAPE; } else { int k; /* Fill zeroes. */ for(k = 0; (k < value + 2) && (w < HUFF_BC); k++) { bit_length[w++] = 0; } } } else { bit_length[w++] = value; } } rar->bits.in_addr = i; rar->bits.bit_addr = nibble_shift ^ 4; ret = create_decode_tables(bit_length, &rar->cstate.bd, HUFF_BC); if(ret != ARCHIVE_OK) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Decoding huffman tables failed"); return ARCHIVE_FATAL; } for(i = 0; i < HUFF_TABLE_SIZE;) { uint16_t num; if((rar->bits.in_addr + 6) >= rar->cstate.cur_block_size) { /* Truncated data, can't continue. */ archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated data in huffman tables (#2)"); return ARCHIVE_FATAL; } ret = decode_number(a, &rar->cstate.bd, p, &num); if(ret != ARCHIVE_OK) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Decoding huffman tables failed"); return ARCHIVE_FATAL; } if(num < 16) { /* 0..15: store directly */ table[i] = (uint8_t) num; i++; } else if(num < 18) { /* 16..17: repeat previous code */ uint16_t n; if(ARCHIVE_OK != read_bits_16(rar, p, &n)) return ARCHIVE_EOF; if(num == 16) { n >>= 13; n += 3; skip_bits(rar, 3); } else { n >>= 9; n += 11; skip_bits(rar, 7); } if(i > 0) { while(n-- > 0 && i < HUFF_TABLE_SIZE) { table[i] = table[i - 1]; i++; } } else { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Unexpected error when decoding " "huffman tables"); return ARCHIVE_FATAL; } } else { /* other codes: fill with zeroes `n` times */ uint16_t n; if(ARCHIVE_OK != read_bits_16(rar, p, &n)) return ARCHIVE_EOF; if(num == 18) { n >>= 13; n += 3; skip_bits(rar, 3); } else { n >>= 9; n += 11; skip_bits(rar, 7); } while(n-- > 0 && i < HUFF_TABLE_SIZE) table[i++] = 0; } } ret = create_decode_tables(&table[idx], &rar->cstate.ld, HUFF_NC); if(ret != ARCHIVE_OK) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Failed to create literal table"); return ARCHIVE_FATAL; } idx += HUFF_NC; ret = create_decode_tables(&table[idx], &rar->cstate.dd, HUFF_DC); if(ret != ARCHIVE_OK) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Failed to create distance table"); return ARCHIVE_FATAL; } idx += HUFF_DC; ret = create_decode_tables(&table[idx], &rar->cstate.ldd, HUFF_LDC); if(ret != ARCHIVE_OK) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Failed to create lower bits of distances table"); return ARCHIVE_FATAL; } idx += HUFF_LDC; ret = create_decode_tables(&table[idx], &rar->cstate.rd, HUFF_RC); if(ret != ARCHIVE_OK) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Failed to create repeating distances table"); return ARCHIVE_FATAL; } return ARCHIVE_OK; } /* Parses the block header, verifies its CRC byte, and saves the header * fields inside the `hdr` pointer. */ static int parse_block_header(struct archive_read* a, const uint8_t* p, ssize_t* block_size, struct compressed_block_header* hdr) { uint8_t calculated_cksum; memcpy(hdr, p, sizeof(struct compressed_block_header)); if(bf_byte_count(hdr) > 2) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Unsupported block header size (was %d, max is 2)", bf_byte_count(hdr)); return ARCHIVE_FATAL; } /* This should probably use bit reader interface in order to be more * future-proof. */ *block_size = 0; switch(bf_byte_count(hdr)) { /* 1-byte block size */ case 0: *block_size = *(const uint8_t*) &p[2]; break; /* 2-byte block size */ case 1: *block_size = archive_le16dec(&p[2]); break; /* 3-byte block size */ case 2: *block_size = archive_le32dec(&p[2]); *block_size &= 0x00FFFFFF; break; /* Other block sizes are not supported. This case is not * reached, because we have an 'if' guard before the switch * that makes sure of it. */ default: return ARCHIVE_FATAL; } /* Verify the block header checksum. 0x5A is a magic value and is * always * constant. */ calculated_cksum = 0x5A ^ (uint8_t) hdr->block_flags_u8 ^ (uint8_t) *block_size ^ (uint8_t) (*block_size >> 8) ^ (uint8_t) (*block_size >> 16); if(calculated_cksum != hdr->block_cksum) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Block checksum error: got 0x%x, expected 0x%x", hdr->block_cksum, calculated_cksum); return ARCHIVE_FATAL; } return ARCHIVE_OK; } /* Convenience function used during filter processing. */ static int parse_filter_data(struct rar5* rar, const uint8_t* p, uint32_t* filter_data) { int i, bytes; uint32_t data = 0; if(ARCHIVE_OK != read_consume_bits(rar, p, 2, &bytes)) return ARCHIVE_EOF; bytes++; for(i = 0; i < bytes; i++) { uint16_t byte; if(ARCHIVE_OK != read_bits_16(rar, p, &byte)) { return ARCHIVE_EOF; } /* Cast to uint32_t will ensure the shift operation will not * produce undefined result. */ data += ((uint32_t) byte >> 8) << (i * 8); skip_bits(rar, 8); } *filter_data = data; return ARCHIVE_OK; } /* Function is used during sanity checking. */ static int is_valid_filter_block_start(struct rar5* rar, uint32_t start) { const int64_t block_start = (ssize_t) start + rar->cstate.write_ptr; const int64_t last_bs = rar->cstate.last_block_start; const ssize_t last_bl = rar->cstate.last_block_length; if(last_bs == 0 || last_bl == 0) { /* We didn't have any filters yet, so accept this offset. */ return 1; } if(block_start >= last_bs + last_bl) { /* Current offset is bigger than last block's end offset, so * accept current offset. */ return 1; } /* Any other case is not a normal situation and we should fail. */ return 0; } /* The function will create a new filter, read its parameters from the input * stream and add it to the filter collection. */ static int parse_filter(struct archive_read* ar, const uint8_t* p) { uint32_t block_start, block_length; uint16_t filter_type; struct filter_info* filt = NULL; struct rar5* rar = get_context(ar); /* Read the parameters from the input stream. */ if(ARCHIVE_OK != parse_filter_data(rar, p, &block_start)) return ARCHIVE_EOF; if(ARCHIVE_OK != parse_filter_data(rar, p, &block_length)) return ARCHIVE_EOF; if(ARCHIVE_OK != read_bits_16(rar, p, &filter_type)) return ARCHIVE_EOF; filter_type >>= 13; skip_bits(rar, 3); /* Perform some sanity checks on this filter parameters. Note that we * allow only DELTA, E8/E9 and ARM filters here, because rest of * filters are not used in RARv5. */ if(block_length < 4 || block_length > 0x400000 || filter_type > FILTER_ARM || !is_valid_filter_block_start(rar, block_start)) { archive_set_error(&ar->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Invalid filter encountered"); return ARCHIVE_FATAL; } /* Allocate a new filter. */ filt = add_new_filter(rar); if(filt == NULL) { archive_set_error(&ar->archive, ENOMEM, "Can't allocate memory for a filter descriptor."); return ARCHIVE_FATAL; } filt->type = filter_type; filt->block_start = rar->cstate.write_ptr + block_start; filt->block_length = block_length; rar->cstate.last_block_start = filt->block_start; rar->cstate.last_block_length = filt->block_length; /* Read some more data in case this is a DELTA filter. Other filter * types don't require any additional data over what was already * read. */ if(filter_type == FILTER_DELTA) { int channels; if(ARCHIVE_OK != read_consume_bits(rar, p, 5, &channels)) return ARCHIVE_EOF; filt->channels = channels + 1; } return ARCHIVE_OK; } static int decode_code_length(struct rar5* rar, const uint8_t* p, uint16_t code) { int lbits, length = 2; if(code < 8) { lbits = 0; length += code; } else { lbits = code / 4 - 1; length += (4 | (code & 3)) << lbits; } if(lbits > 0) { int add; if(ARCHIVE_OK != read_consume_bits(rar, p, lbits, &add)) return -1; length += add; } return length; } static int copy_string(struct archive_read* a, int len, int dist) { struct rar5* rar = get_context(a); const uint64_t cmask = rar->cstate.window_mask; const uint64_t write_ptr = rar->cstate.write_ptr + rar->cstate.solid_offset; int i; if (rar->cstate.window_buf == NULL) return ARCHIVE_FATAL; /* The unpacker spends most of the time in this function. It would be * a good idea to introduce some optimizations here. * * Just remember that this loop treats buffers that overlap differently * than buffers that do not overlap. This is why a simple memcpy(3) * call will not be enough. */ for(i = 0; i < len; i++) { const ssize_t write_idx = (write_ptr + i) & cmask; const ssize_t read_idx = (write_ptr + i - dist) & cmask; rar->cstate.window_buf[write_idx] = rar->cstate.window_buf[read_idx]; } rar->cstate.write_ptr += len; return ARCHIVE_OK; } static int do_uncompress_block(struct archive_read* a, const uint8_t* p) { struct rar5* rar = get_context(a); uint16_t num; int ret; const uint64_t cmask = rar->cstate.window_mask; const struct compressed_block_header* hdr = &rar->last_block_hdr; const uint8_t bit_size = 1 + bf_bit_size(hdr); while(1) { if(rar->cstate.write_ptr - rar->cstate.last_write_ptr > (rar->cstate.window_size >> 1)) { /* Don't allow growing data by more than half of the * window size at a time. In such case, break the loop; * next call to this function will continue processing * from this moment. */ break; } if(rar->bits.in_addr > rar->cstate.cur_block_size - 1 || (rar->bits.in_addr == rar->cstate.cur_block_size - 1 && rar->bits.bit_addr >= bit_size)) { /* If the program counter is here, it means the * function has finished processing the block. */ rar->cstate.block_parsing_finished = 1; break; } /* Decode the next literal. */ if(ARCHIVE_OK != decode_number(a, &rar->cstate.ld, p, &num)) { return ARCHIVE_EOF; } /* Num holds a decompression literal, or 'command code'. * * - Values lower than 256 are just bytes. Those codes * can be stored in the output buffer directly. * * - Code 256 defines a new filter, which is later used to * ransform the data block accordingly to the filter type. * The data block needs to be fully uncompressed first. * * - Code bigger than 257 and smaller than 262 define * a repetition pattern that should be copied from * an already uncompressed chunk of data. */ if(num < 256) { /* Directly store the byte. */ int64_t write_idx = rar->cstate.solid_offset + rar->cstate.write_ptr++; rar->cstate.window_buf[write_idx & cmask] = (uint8_t) num; continue; } else if(num >= 262) { uint16_t dist_slot; int len = decode_code_length(rar, p, num - 262), dbits, dist = 1; if(len == -1) { archive_set_error(&a->archive, ARCHIVE_ERRNO_PROGRAMMER, "Failed to decode the code length"); return ARCHIVE_FATAL; } if(ARCHIVE_OK != decode_number(a, &rar->cstate.dd, p, &dist_slot)) { archive_set_error(&a->archive, ARCHIVE_ERRNO_PROGRAMMER, "Failed to decode the distance slot"); return ARCHIVE_FATAL; } if(dist_slot < 4) { dbits = 0; dist += dist_slot; } else { dbits = dist_slot / 2 - 1; /* Cast to uint32_t will make sure the shift * left operation won't produce undefined * result. Then, the uint32_t type will * be implicitly casted to int. */ dist += (uint32_t) (2 | (dist_slot & 1)) << dbits; } if(dbits > 0) { if(dbits >= 4) { uint32_t add = 0; uint16_t low_dist; if(dbits > 4) { if(ARCHIVE_OK != read_bits_32( rar, p, &add)) { /* Return EOF if we * can't read more * data. */ return ARCHIVE_EOF; } skip_bits(rar, dbits - 4); add = (add >> ( 36 - dbits)) << 4; dist += add; } if(ARCHIVE_OK != decode_number(a, &rar->cstate.ldd, p, &low_dist)) { archive_set_error(&a->archive, ARCHIVE_ERRNO_PROGRAMMER, "Failed to decode the " "distance slot"); return ARCHIVE_FATAL; } if(dist >= INT_MAX - low_dist - 1) { /* This only happens in * invalid archives. */ archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Distance pointer " "overflow"); return ARCHIVE_FATAL; } dist += low_dist; } else { /* dbits is one of [0,1,2,3] */ int add; if(ARCHIVE_OK != read_consume_bits(rar, p, dbits, &add)) { /* Return EOF if we can't read * more data. */ return ARCHIVE_EOF; } dist += add; } } if(dist > 0x100) { len++; if(dist > 0x2000) { len++; if(dist > 0x40000) { len++; } } } dist_cache_push(rar, dist); rar->cstate.last_len = len; if(ARCHIVE_OK != copy_string(a, len, dist)) return ARCHIVE_FATAL; continue; } else if(num == 256) { /* Create a filter. */ ret = parse_filter(a, p); if(ret != ARCHIVE_OK) return ret; continue; } else if(num == 257) { if(rar->cstate.last_len != 0) { if(ARCHIVE_OK != copy_string(a, rar->cstate.last_len, rar->cstate.dist_cache[0])) { return ARCHIVE_FATAL; } } continue; } else { /* num < 262 */ const int idx = num - 258; const int dist = dist_cache_touch(rar, idx); uint16_t len_slot; int len; if(ARCHIVE_OK != decode_number(a, &rar->cstate.rd, p, &len_slot)) { return ARCHIVE_FATAL; } len = decode_code_length(rar, p, len_slot); rar->cstate.last_len = len; if(ARCHIVE_OK != copy_string(a, len, dist)) return ARCHIVE_FATAL; continue; } /* The program counter shouldn't reach here. */ archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Unsupported block code: 0x%x", num); return ARCHIVE_FATAL; } return ARCHIVE_OK; } /* Binary search for the RARv5 signature. */ static int scan_for_signature(struct archive_read* a) { const uint8_t* p; const int chunk_size = 512; ssize_t i; /* If we're here, it means we're on an 'unknown territory' data. * There's no indication what kind of data we're reading here. * It could be some text comment, any kind of binary data, * digital sign, dragons, etc. * * We want to find a valid RARv5 magic header inside this unknown * data. */ /* Is it possible in libarchive to just skip everything until the * end of the file? If so, it would be a better approach than the * current implementation of this function. */ while(1) { if(!read_ahead(a, chunk_size, &p)) return ARCHIVE_EOF; for(i = 0; i < chunk_size - rar5_signature_size; i++) { if(memcmp(&p[i], rar5_signature, rar5_signature_size) == 0) { /* Consume the number of bytes we've used to * search for the signature, as well as the * number of bytes used by the signature * itself. After this we should be standing * on a valid base block header. */ (void) consume(a, i + rar5_signature_size); return ARCHIVE_OK; } } consume(a, chunk_size); } return ARCHIVE_FATAL; } /* This function will switch the multivolume archive file to another file, * i.e. from part03 to part 04. */ static int advance_multivolume(struct archive_read* a) { int lret; struct rar5* rar = get_context(a); /* A small state machine that will skip unnecessary data, needed to * switch from one multivolume to another. Such skipping is needed if * we want to be an stream-oriented (instead of file-oriented) * unpacker. * * The state machine starts with `rar->main.endarc` == 0. It also * assumes that current stream pointer points to some base block * header. * * The `endarc` field is being set when the base block parsing * function encounters the 'end of archive' marker. */ while(1) { if(rar->main.endarc == 1) { int looping = 1; rar->main.endarc = 0; while(looping) { lret = skip_base_block(a); switch(lret) { case ARCHIVE_RETRY: /* Continue looping. */ break; case ARCHIVE_OK: /* Break loop. */ looping = 0; break; default: /* Forward any errors to the * caller. */ return lret; } } break; } else { /* Skip current base block. In order to properly skip * it, we really need to simply parse it and discard * the results. */ lret = skip_base_block(a); if(lret == ARCHIVE_FATAL || lret == ARCHIVE_FAILED) return lret; /* The `skip_base_block` function tells us if we * should continue with skipping, or we should stop * skipping. We're trying to skip everything up to * a base FILE block. */ if(lret != ARCHIVE_RETRY) { /* If there was an error during skipping, or we * have just skipped a FILE base block... */ if(rar->main.endarc == 0) { return lret; } else { continue; } } } } return ARCHIVE_OK; } /* Merges the partial block from the first multivolume archive file, and * partial block from the second multivolume archive file. The result is * a chunk of memory containing the whole block, and the stream pointer * is advanced to the next block in the second multivolume archive file. */ static int merge_block(struct archive_read* a, ssize_t block_size, const uint8_t** p) { struct rar5* rar = get_context(a); ssize_t cur_block_size, partial_offset = 0; const uint8_t* lp; int ret; if(rar->merge_mode) { archive_set_error(&a->archive, ARCHIVE_ERRNO_PROGRAMMER, "Recursive merge is not allowed"); return ARCHIVE_FATAL; } /* Set a flag that we're in the switching mode. */ rar->cstate.switch_multivolume = 1; /* Reallocate the memory which will hold the whole block. */ if(rar->vol.push_buf) free((void*) rar->vol.push_buf); /* Increasing the allocation block by 8 is due to bit reading functions, * which are using additional 2 or 4 bytes. Allocating the block size * by exact value would make bit reader perform reads from invalid * memory block when reading the last byte from the buffer. */ rar->vol.push_buf = malloc(block_size + 8); if(!rar->vol.push_buf) { archive_set_error(&a->archive, ENOMEM, "Can't allocate memory for a merge block buffer."); return ARCHIVE_FATAL; } /* Valgrind complains if the extension block for bit reader is not * initialized, so initialize it. */ memset(&rar->vol.push_buf[block_size], 0, 8); /* A single block can span across multiple multivolume archive files, * so we use a loop here. This loop will consume enough multivolume * archive files until the whole block is read. */ while(1) { /* Get the size of current block chunk in this multivolume * archive file and read it. */ cur_block_size = rar5_min(rar->file.bytes_remaining, block_size - partial_offset); if(cur_block_size == 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Encountered block size == 0 during block merge"); return ARCHIVE_FATAL; } if(!read_ahead(a, cur_block_size, &lp)) return ARCHIVE_EOF; /* Sanity check; there should never be a situation where this * function reads more data than the block's size. */ if(partial_offset + cur_block_size > block_size) { archive_set_error(&a->archive, ARCHIVE_ERRNO_PROGRAMMER, "Consumed too much data when merging blocks."); return ARCHIVE_FATAL; } /* Merge previous block chunk with current block chunk, * or create first block chunk if this is our first * iteration. */ memcpy(&rar->vol.push_buf[partial_offset], lp, cur_block_size); /* Advance the stream read pointer by this block chunk size. */ if(ARCHIVE_OK != consume(a, cur_block_size)) return ARCHIVE_EOF; /* Update the pointers. `partial_offset` contains information * about the sum of merged block chunks. */ partial_offset += cur_block_size; rar->file.bytes_remaining -= cur_block_size; /* If `partial_offset` is the same as `block_size`, this means * we've merged all block chunks and we have a valid full * block. */ if(partial_offset == block_size) { break; } /* If we don't have any bytes to read, this means we should * switch to another multivolume archive file. */ if(rar->file.bytes_remaining == 0) { rar->merge_mode++; ret = advance_multivolume(a); rar->merge_mode--; if(ret != ARCHIVE_OK) { return ret; } } } *p = rar->vol.push_buf; /* If we're here, we can resume unpacking by processing the block * pointed to by the `*p` memory pointer. */ return ARCHIVE_OK; } static int process_block(struct archive_read* a) { const uint8_t* p; struct rar5* rar = get_context(a); int ret; /* If we don't have any data to be processed, this most probably means * we need to switch to the next volume. */ if(rar->main.volume && rar->file.bytes_remaining == 0) { ret = advance_multivolume(a); if(ret != ARCHIVE_OK) return ret; } if(rar->cstate.block_parsing_finished) { ssize_t block_size; ssize_t to_skip; ssize_t cur_block_size; /* The header size won't be bigger than 6 bytes. */ if(!read_ahead(a, 6, &p)) { /* Failed to prefetch data block header. */ return ARCHIVE_EOF; } /* * Read block_size by parsing block header. Validate the header * by calculating CRC byte stored inside the header. Size of * the header is not constant (block size can be stored either * in 1 or 2 bytes), that's why block size is left out from the * `compressed_block_header` structure and returned by * `parse_block_header` as the second argument. */ ret = parse_block_header(a, p, &block_size, &rar->last_block_hdr); if(ret != ARCHIVE_OK) { return ret; } /* Skip block header. Next data is huffman tables, * if present. */ to_skip = sizeof(struct compressed_block_header) + bf_byte_count(&rar->last_block_hdr) + 1; if(ARCHIVE_OK != consume(a, to_skip)) return ARCHIVE_EOF; rar->file.bytes_remaining -= to_skip; /* The block size gives information about the whole block size, * but the block could be stored in split form when using * multi-volume archives. In this case, the block size will be * bigger than the actual data stored in this file. Remaining * part of the data will be in another file. */ cur_block_size = rar5_min(rar->file.bytes_remaining, block_size); if(block_size > rar->file.bytes_remaining) { /* If current blocks' size is bigger than our data * size, this means we have a multivolume archive. * In this case, skip all base headers until the end * of the file, proceed to next "partXXX.rar" volume, * find its signature, skip all headers up to the first * FILE base header, and continue from there. * * Note that `merge_block` will update the `rar` * context structure quite extensively. */ ret = merge_block(a, block_size, &p); if(ret != ARCHIVE_OK) { return ret; } cur_block_size = block_size; /* Current stream pointer should be now directly * *after* the block that spanned through multiple * archive files. `p` pointer should have the data of * the *whole* block (merged from partial blocks * stored in multiple archives files). */ } else { rar->cstate.switch_multivolume = 0; /* Read the whole block size into memory. This can take * up to 8 megabytes of memory in theoretical cases. * Might be worth to optimize this and use a standard * chunk of 4kb's. */ if(!read_ahead(a, 4 + cur_block_size, &p)) { /* Failed to prefetch block data. */ return ARCHIVE_EOF; } } rar->cstate.block_buf = p; rar->cstate.cur_block_size = cur_block_size; rar->cstate.block_parsing_finished = 0; rar->bits.in_addr = 0; rar->bits.bit_addr = 0; if(bf_is_table_present(&rar->last_block_hdr)) { /* Load Huffman tables. */ ret = parse_tables(a, rar, p); if(ret != ARCHIVE_OK) { /* Error during decompression of Huffman * tables. */ return ret; } } } else { /* Block parsing not finished, reuse previous memory buffer. */ p = rar->cstate.block_buf; } /* Uncompress the block, or a part of it, depending on how many bytes * will be generated by uncompressing the block. * * In case too many bytes will be generated, calling this function * again will resume the uncompression operation. */ ret = do_uncompress_block(a, p); if(ret != ARCHIVE_OK) { return ret; } if(rar->cstate.block_parsing_finished && rar->cstate.switch_multivolume == 0 && rar->cstate.cur_block_size > 0) { /* If we're processing a normal block, consume the whole * block. We can do this because we've already read the whole * block to memory. */ if(ARCHIVE_OK != consume(a, rar->cstate.cur_block_size)) return ARCHIVE_FATAL; rar->file.bytes_remaining -= rar->cstate.cur_block_size; } else if(rar->cstate.switch_multivolume) { /* Don't consume the block if we're doing multivolume * processing. The volume switching function will consume * the proper count of bytes instead. */ rar->cstate.switch_multivolume = 0; } return ARCHIVE_OK; } /* Pops the `buf`, `size` and `offset` from the "data ready" stack. * * Returns ARCHIVE_OK when those arguments can be used, ARCHIVE_RETRY * when there is no data on the stack. */ static int use_data(struct rar5* rar, const void** buf, size_t* size, int64_t* offset) { int i; for(i = 0; i < rar5_countof(rar->cstate.dready); i++) { struct data_ready *d = &rar->cstate.dready[i]; if(d->used) { if(buf) *buf = d->buf; if(size) *size = d->size; if(offset) *offset = d->offset; d->used = 0; return ARCHIVE_OK; } } return ARCHIVE_RETRY; } /* Pushes the `buf`, `size` and `offset` arguments to the rar->cstate.dready * FIFO stack. Those values will be popped from this stack by the `use_data` * function. */ static int push_data_ready(struct archive_read* a, struct rar5* rar, const uint8_t* buf, size_t size, int64_t offset) { int i; /* Don't push if we're in skip mode. This is needed because solid * streams need full processing even if we're skipping data. After * fully processing the stream, we need to discard the generated bytes, * because we're interested only in the side effect: building up the * internal window circular buffer. This window buffer will be used * later during unpacking of requested data. */ if(rar->skip_mode) return ARCHIVE_OK; /* Sanity check. */ if(offset != rar->file.last_offset + rar->file.last_size) { archive_set_error(&a->archive, ARCHIVE_ERRNO_PROGRAMMER, "Sanity check error: output stream is not continuous"); return ARCHIVE_FATAL; } for(i = 0; i < rar5_countof(rar->cstate.dready); i++) { struct data_ready* d = &rar->cstate.dready[i]; if(!d->used) { d->used = 1; d->buf = buf; d->size = size; d->offset = offset; /* These fields are used only in sanity checking. */ rar->file.last_offset = offset; rar->file.last_size = size; /* Calculate the checksum of this new block before * submitting data to libarchive's engine. */ update_crc(rar, d->buf, d->size); return ARCHIVE_OK; } } /* Program counter will reach this code if the `rar->cstate.data_ready` * stack will be filled up so that no new entries will be allowed. The * code shouldn't allow such situation to occur. So we treat this case * as an internal error. */ archive_set_error(&a->archive, ARCHIVE_ERRNO_PROGRAMMER, "Error: premature end of data_ready stack"); return ARCHIVE_FATAL; } /* This function uncompresses the data that is stored in the <FILE> base * block. * * The FILE base block looks like this: * * <header><huffman tables><block_1><block_2>...<block_n> * * The <header> is a block header, that is parsed in parse_block_header(). * It's a "compressed_block_header" structure, containing metadata needed * to know when we should stop looking for more <block_n> blocks. * * <huffman tables> contain data needed to set up the huffman tables, needed * for the actual decompression. * * Each <block_n> consists of series of literals: * * <literal><literal><literal>...<literal> * * Those literals generate the uncompression data. They operate on a circular * buffer, sometimes writing raw data into it, sometimes referencing * some previous data inside this buffer, and sometimes declaring a filter * that will need to be executed on the data stored in the circular buffer. * It all depends on the literal that is used. * * Sometimes blocks produce output data, sometimes they don't. For example, for * some huge files that use lots of filters, sometimes a block is filled with * only filter declaration literals. Such blocks won't produce any data in the * circular buffer. * * Sometimes blocks will produce 4 bytes of data, and sometimes 1 megabyte, * because a literal can reference previously decompressed data. For example, * there can be a literal that says: 'append a byte 0xFE here', and after * it another literal can say 'append 1 megabyte of data from circular buffer * offset 0x12345'. This is how RAR format handles compressing repeated * patterns. * * The RAR compressor creates those literals and the actual efficiency of * compression depends on what those literals are. The literals can also * be seen as a kind of a non-turing-complete virtual machine that simply * tells the decompressor what it should do. * */ static int do_uncompress_file(struct archive_read* a) { struct rar5* rar = get_context(a); int ret; int64_t max_end_pos; if(!rar->cstate.initialized) { /* Don't perform full context reinitialization if we're * processing a solid archive. */ if(!rar->main.solid || !rar->cstate.window_buf) { init_unpack(rar); } rar->cstate.initialized = 1; } if(rar->cstate.all_filters_applied == 1) { /* We use while(1) here, but standard case allows for just 1 * iteration. The loop will iterate if process_block() didn't * generate any data at all. This can happen if the block * contains only filter definitions (this is common in big * files). */ while(1) { ret = process_block(a); if(ret == ARCHIVE_EOF || ret == ARCHIVE_FATAL) return ret; if(rar->cstate.last_write_ptr == rar->cstate.write_ptr) { /* The block didn't generate any new data, * so just process a new block. */ continue; } /* The block has generated some new data, so break * the loop. */ break; } } /* Try to run filters. If filters won't be applied, it means that * insufficient data was generated. */ ret = apply_filters(a); if(ret == ARCHIVE_RETRY) { return ARCHIVE_OK; } else if(ret == ARCHIVE_FATAL) { return ARCHIVE_FATAL; } /* If apply_filters() will return ARCHIVE_OK, we can continue here. */ if(cdeque_size(&rar->cstate.filters) > 0) { /* Check if we can write something before hitting first * filter. */ struct filter_info* flt; /* Get the block_start offset from the first filter. */ if(CDE_OK != cdeque_front(&rar->cstate.filters, cdeque_filter_p(&flt))) { archive_set_error(&a->archive, ARCHIVE_ERRNO_PROGRAMMER, "Can't read first filter"); return ARCHIVE_FATAL; } max_end_pos = rar5_min(flt->block_start, rar->cstate.write_ptr); } else { /* There are no filters defined, or all filters were applied. * This means we can just store the data without any * postprocessing. */ max_end_pos = rar->cstate.write_ptr; } if(max_end_pos == rar->cstate.last_write_ptr) { /* We can't write anything yet. The block uncompression * function did not generate enough data, and no filter can be * applied. At the same time we don't have any data that can be * stored without filter postprocessing. This means we need to * wait for more data to be generated, so we can apply the * filters. * * Signal the caller that we need more data to be able to do * anything. */ return ARCHIVE_RETRY; } else { /* We can write the data before hitting the first filter. * So let's do it. The push_window_data() function will * effectively return the selected data block to the user * application. */ push_window_data(a, rar, rar->cstate.last_write_ptr, max_end_pos); rar->cstate.last_write_ptr = max_end_pos; } return ARCHIVE_OK; } static int uncompress_file(struct archive_read* a) { int ret; while(1) { /* Sometimes the uncompression function will return a * 'retry' signal. If this will happen, we have to retry * the function. */ ret = do_uncompress_file(a); if(ret != ARCHIVE_RETRY) return ret; } } static int do_unstore_file(struct archive_read* a, struct rar5* rar, const void** buf, size_t* size, int64_t* offset) { size_t to_read; const uint8_t* p; if(rar->file.bytes_remaining == 0 && rar->main.volume > 0 && rar->generic.split_after > 0) { int ret; rar->cstate.switch_multivolume = 1; ret = advance_multivolume(a); rar->cstate.switch_multivolume = 0; if(ret != ARCHIVE_OK) { /* Failed to advance to next multivolume archive * file. */ return ret; } } to_read = rar5_min(rar->file.bytes_remaining, 64 * 1024); if(to_read == 0) { return ARCHIVE_EOF; } if(!read_ahead(a, to_read, &p)) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "I/O error when unstoring file"); return ARCHIVE_FATAL; } if(ARCHIVE_OK != consume(a, to_read)) { return ARCHIVE_EOF; } if(buf) *buf = p; if(size) *size = to_read; if(offset) *offset = rar->cstate.last_unstore_ptr; rar->file.bytes_remaining -= to_read; rar->cstate.last_unstore_ptr += to_read; update_crc(rar, p, to_read); return ARCHIVE_OK; } static int do_unpack(struct archive_read* a, struct rar5* rar, const void** buf, size_t* size, int64_t* offset) { enum COMPRESSION_METHOD { STORE = 0, FASTEST = 1, FAST = 2, NORMAL = 3, GOOD = 4, BEST = 5 }; if(rar->file.service > 0) { return do_unstore_file(a, rar, buf, size, offset); } else { switch(rar->cstate.method) { case STORE: return do_unstore_file(a, rar, buf, size, offset); case FASTEST: /* fallthrough */ case FAST: /* fallthrough */ case NORMAL: /* fallthrough */ case GOOD: /* fallthrough */ case BEST: return uncompress_file(a); default: archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Compression method not supported: 0x%x", rar->cstate.method); return ARCHIVE_FATAL; } } #if !defined WIN32 /* Not reached. */ return ARCHIVE_OK; #endif } static int verify_checksums(struct archive_read* a) { int verify_crc; struct rar5* rar = get_context(a); /* Check checksums only when actually unpacking the data. There's no * need to calculate checksum when we're skipping data in solid archives * (skipping in solid archives is the same thing as unpacking compressed * data and discarding the result). */ if(!rar->skip_mode) { /* Always check checksums if we're not in skip mode */ verify_crc = 1; } else { /* We can override the logic above with a compile-time option * NO_CRC_ON_SOLID_SKIP. This option is used during debugging, * and it will check checksums of unpacked data even when * we're skipping it. */ #if defined CHECK_CRC_ON_SOLID_SKIP /* Debug case */ verify_crc = 1; #else /* Normal case */ verify_crc = 0; #endif } if(verify_crc) { /* During unpacking, on each unpacked block we're calling the * update_crc() function. Since we are here, the unpacking * process is already over and we can check if calculated * checksum (CRC32 or BLAKE2sp) is the same as what is stored * in the archive. */ if(rar->file.stored_crc32 > 0) { /* Check CRC32 only when the file contains a CRC32 * value for this file. */ if(rar->file.calculated_crc32 != rar->file.stored_crc32) { /* Checksums do not match; the unpacked file * is corrupted. */ DEBUG_CODE { printf("Checksum error: CRC32 " "(was: %08x, expected: %08x)\n", rar->file.calculated_crc32, rar->file.stored_crc32); } #ifndef DONT_FAIL_ON_CRC_ERROR archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Checksum error: CRC32"); return ARCHIVE_FATAL; #endif } else { DEBUG_CODE { printf("Checksum OK: CRC32 " "(%08x/%08x)\n", rar->file.stored_crc32, rar->file.calculated_crc32); } } } if(rar->file.has_blake2 > 0) { /* BLAKE2sp is an optional checksum algorithm that is * added to RARv5 archives when using the `-htb` switch * during creation of archive. * * We now finalize the hash calculation by calling the * `final` function. This will generate the final hash * value we can use to compare it with the BLAKE2sp * checksum that is stored in the archive. * * The return value of this `final` function is not * very helpful, as it guards only against improper use. * This is why we're explicitly ignoring it. */ uint8_t b2_buf[32]; (void) blake2sp_final(&rar->file.b2state, b2_buf, 32); if(memcmp(&rar->file.blake2sp, b2_buf, 32) != 0) { #ifndef DONT_FAIL_ON_CRC_ERROR archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Checksum error: BLAKE2"); return ARCHIVE_FATAL; #endif } } } /* Finalization for this file has been successfully completed. */ return ARCHIVE_OK; } static int verify_global_checksums(struct archive_read* a) { return verify_checksums(a); } static int rar5_read_data(struct archive_read *a, const void **buff, size_t *size, int64_t *offset) { int ret; struct rar5* rar = get_context(a); if(rar->file.dir > 0) { /* Don't process any data if this file entry was declared * as a directory. This is needed, because entries marked as * directory doesn't have any dictionary buffer allocated, so * it's impossible to perform any decompression. */ archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Can't decompress an entry marked as a directory"); return ARCHIVE_FAILED; } if(!rar->skip_mode && (rar->cstate.last_write_ptr > rar->file.unpacked_size)) { archive_set_error(&a->archive, ARCHIVE_ERRNO_PROGRAMMER, "Unpacker has written too many bytes"); return ARCHIVE_FATAL; } ret = use_data(rar, buff, size, offset); if(ret == ARCHIVE_OK) { return ret; } if(rar->file.eof == 1) { return ARCHIVE_EOF; } ret = do_unpack(a, rar, buff, size, offset); if(ret != ARCHIVE_OK) { return ret; } if(rar->file.bytes_remaining == 0 && rar->cstate.last_write_ptr == rar->file.unpacked_size) { /* If all bytes of current file were processed, run * finalization. * * Finalization will check checksum against proper values. If * some of the checksums will not match, we'll return an error * value in the last `archive_read_data` call to signal an error * to the user. */ rar->file.eof = 1; return verify_global_checksums(a); } return ARCHIVE_OK; } static int rar5_read_data_skip(struct archive_read *a) { struct rar5* rar = get_context(a); if(rar->main.solid) { /* In solid archives, instead of skipping the data, we need to * extract it, and dispose the result. The side effect of this * operation will be setting up the initial window buffer state * needed to be able to extract the selected file. */ int ret; /* Make sure to process all blocks in the compressed stream. */ while(rar->file.bytes_remaining > 0) { /* Setting the "skip mode" will allow us to skip * checksum checks during data skipping. Checking the * checksum of skipped data isn't really necessary and * it's only slowing things down. * * This is incremented instead of setting to 1 because * this data skipping function can be called * recursively. */ rar->skip_mode++; /* We're disposing 1 block of data, so we use triple * NULLs in arguments. */ ret = rar5_read_data(a, NULL, NULL, NULL); /* Turn off "skip mode". */ rar->skip_mode--; if(ret < 0 || ret == ARCHIVE_EOF) { /* Propagate any potential error conditions * to the caller. */ return ret; } } } else { /* In standard archives, we can just jump over the compressed * stream. Each file in non-solid archives starts from an empty * window buffer. */ if(ARCHIVE_OK != consume(a, rar->file.bytes_remaining)) { return ARCHIVE_FATAL; } rar->file.bytes_remaining = 0; } return ARCHIVE_OK; } static int64_t rar5_seek_data(struct archive_read *a, int64_t offset, int whence) { (void) a; (void) offset; (void) whence; /* We're a streaming unpacker, and we don't support seeking. */ return ARCHIVE_FATAL; } static int rar5_cleanup(struct archive_read *a) { struct rar5* rar = get_context(a); free(rar->cstate.window_buf); free(rar->cstate.filtered_buf); free(rar->vol.push_buf); free_filters(rar); cdeque_free(&rar->cstate.filters); free(rar); a->format->data = NULL; return ARCHIVE_OK; } static int rar5_capabilities(struct archive_read * a) { (void) a; return 0; } static int rar5_has_encrypted_entries(struct archive_read *_a) { (void) _a; /* Unsupported for now. */ return ARCHIVE_READ_FORMAT_ENCRYPTION_UNSUPPORTED; } static int rar5_init(struct rar5* rar) { ssize_t i; memset(rar, 0, sizeof(struct rar5)); /* Decrypt the magic signature pattern. Check the comment near the * `rar5_signature` symbol to read the rationale behind this. */ if(rar5_signature[0] == 243) { for(i = 0; i < rar5_signature_size; i++) { rar5_signature[i] ^= 0xA1; } } if(CDE_OK != cdeque_init(&rar->cstate.filters, 8192)) return ARCHIVE_FATAL; return ARCHIVE_OK; } int archive_read_support_format_rar5(struct archive *_a) { struct archive_read* ar; int ret; struct rar5* rar; if(ARCHIVE_OK != (ret = get_archive_read(_a, &ar))) return ret; rar = malloc(sizeof(*rar)); if(rar == NULL) { archive_set_error(&ar->archive, ENOMEM, "Can't allocate rar5 data"); return ARCHIVE_FATAL; } if(ARCHIVE_OK != rar5_init(rar)) { archive_set_error(&ar->archive, ENOMEM, "Can't allocate rar5 filter buffer"); return ARCHIVE_FATAL; } ret = __archive_read_register_format(ar, rar, "rar5", rar5_bid, rar5_options, rar5_read_header, rar5_read_data, rar5_read_data_skip, rar5_seek_data, rar5_cleanup, rar5_capabilities, rar5_has_encrypted_entries); if(ret != ARCHIVE_OK) { (void) rar5_cleanup(ar); } return ret; }
./CrossVul/dataset_final_sorted/CWE-20/c/good_4685_1
crossvul-cpp_data_bad_4829_0
/* * gd_gd2.c * * Implements the I/O and support for the GD2 format. * * Changing the definition of GD2_DBG (below) will cause copious messages * to be displayed while it processes requests. * * Designed, Written & Copyright 1999, Philip Warner. * */ /** * File: GD2 IO * * Read and write GD2 images. * * The GD2 image format is a proprietary image format of libgd. *It has to be* * *regarded as being obsolete, and should only be used for development and* * *testing purposes.* * * Structure of a GD2 image file: * - file header * - chunk headers (only for compressed data) * - color header (either truecolor or palette) * - chunks of image data (chunk-row-major, top to bottom, left to right) * * All numbers are stored in big-endian format. * * File header structure: * signature - 4 bytes (always "gd2\0") * version - 1 word (e.g. "\0\002") * width - 1 word * height - 1 word * chunk_size - 1 word * format - 1 word * x_chunk_count - 1 word * y_chunk_count - 1 word * * Recognized formats: * 1 - raw palette image data * 2 - compressed palette image data * 3 - raw truecolor image data * 4 - compressed truecolor image data * * Chunk header: * offset - 1 dword * size - 1 dword * * There are x_chunk_count * y_chunk_count chunk headers. * * Truecolor image color header: * truecolor - 1 byte (always "\001") * transparent - 1 dword (ARGB color) * * Palette image color header: * truecolor - 1 byte (always "\0") * count - 1 word (the number of used palette colors) * transparent - 1 dword (ARGB color) * palette - 256 dwords (RGBA colors) * * Chunk structure: * Sequential pixel data of a rectangular area (chunk_size x chunk_size), * row-major from top to bottom, left to right: * - 1 byte per pixel for palette images * - 1 dword (ARGB) per pixel for truecolor images * * Depending on format, the chunk may be ZLIB compressed. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif /* 2.0.29: no more errno.h, makes windows happy */ #include <math.h> #include <string.h> #include "gd.h" #include "gd_errors.h" #include "gdhelpers.h" /* 2.03: gd2 is no longer mandatory */ /* JCE - test after including gd.h so that HAVE_LIBZ can be set in * a config.h file included by gd.h */ #ifdef HAVE_LIBZ #include <zlib.h> #define TRUE 1 #define FALSE 0 /* 2.11: not part of the API, as the save routine can figure it out from im->trueColor, and the load routine doesn't need to tell the end user the saved format. NOTE: adding 2 is assumed to result in the correct format value for truecolor! */ #define GD2_FMT_TRUECOLOR_RAW 3 #define GD2_FMT_TRUECOLOR_COMPRESSED 4 #define gd2_compressed(fmt) (((fmt) == GD2_FMT_COMPRESSED) || \ ((fmt) == GD2_FMT_TRUECOLOR_COMPRESSED)) #define gd2_truecolor(fmt) (((fmt) == GD2_FMT_TRUECOLOR_RAW) || \ ((fmt) == GD2_FMT_TRUECOLOR_COMPRESSED)) /* Use this for commenting out debug-print statements. */ /* Just use the first '#define' to allow all the prints... */ /*#define GD2_DBG(s) (s) */ #define GD2_DBG(s) typedef struct { int offset; int size; } t_chunk_info; extern int _gdGetColors (gdIOCtx * in, gdImagePtr im, int gd2xFlag); extern void _gdPutColors (gdImagePtr im, gdIOCtx * out); /* */ /* Read the extra info in the gd2 header. */ /* */ static int _gd2GetHeader (gdIOCtxPtr in, int *sx, int *sy, int *cs, int *vers, int *fmt, int *ncx, int *ncy, t_chunk_info ** chunkIdx) { int i; int ch; char id[5]; t_chunk_info *cidx; int sidx; int nc; GD2_DBG (printf ("Reading gd2 header info\n")); for (i = 0; i < 4; i++) { ch = gdGetC (in); if (ch == EOF) { goto fail1; }; id[i] = ch; }; id[4] = 0; GD2_DBG (printf ("Got file code: %s\n", id)); /* Equiv. of 'magick'. */ if (strcmp (id, GD2_ID) != 0) { GD2_DBG (printf ("Not a valid gd2 file\n")); goto fail1; }; /* Version */ if (gdGetWord (vers, in) != 1) { goto fail1; }; GD2_DBG (printf ("Version: %d\n", *vers)); if ((*vers != 1) && (*vers != 2)) { GD2_DBG (printf ("Bad version: %d\n", *vers)); goto fail1; }; /* Image Size */ if (!gdGetWord (sx, in)) { GD2_DBG (printf ("Could not get x-size\n")); goto fail1; } if (!gdGetWord (sy, in)) { GD2_DBG (printf ("Could not get y-size\n")); goto fail1; } GD2_DBG (printf ("Image is %dx%d\n", *sx, *sy)); /* Chunk Size (pixels, not bytes!) */ if (gdGetWord (cs, in) != 1) { goto fail1; }; GD2_DBG (printf ("ChunkSize: %d\n", *cs)); if ((*cs < GD2_CHUNKSIZE_MIN) || (*cs > GD2_CHUNKSIZE_MAX)) { GD2_DBG (printf ("Bad chunk size: %d\n", *cs)); goto fail1; }; /* Data Format */ if (gdGetWord (fmt, in) != 1) { goto fail1; }; GD2_DBG (printf ("Format: %d\n", *fmt)); if ((*fmt != GD2_FMT_RAW) && (*fmt != GD2_FMT_COMPRESSED) && (*fmt != GD2_FMT_TRUECOLOR_RAW) && (*fmt != GD2_FMT_TRUECOLOR_COMPRESSED)) { GD2_DBG (printf ("Bad data format: %d\n", *fmt)); goto fail1; }; /* # of chunks wide */ if (gdGetWord (ncx, in) != 1) { goto fail1; }; GD2_DBG (printf ("%d Chunks Wide\n", *ncx)); /* # of chunks high */ if (gdGetWord (ncy, in) != 1) { goto fail1; }; GD2_DBG (printf ("%d Chunks vertically\n", *ncy)); if (gd2_compressed (*fmt)) { nc = (*ncx) * (*ncy); GD2_DBG (printf ("Reading %d chunk index entries\n", nc)); if (overflow2(sizeof(t_chunk_info), nc)) { goto fail1; } sidx = sizeof (t_chunk_info) * nc; if (sidx <= 0) { goto fail1; } cidx = gdCalloc (sidx, 1); if (cidx == NULL) { goto fail1; } for (i = 0; i < nc; i++) { if (gdGetInt (&cidx[i].offset, in) != 1) { goto fail2; }; if (gdGetInt (&cidx[i].size, in) != 1) { goto fail2; }; if (cidx[i].offset < 0 || cidx[i].size < 0) goto fail2; }; *chunkIdx = cidx; }; GD2_DBG (printf ("gd2 header complete\n")); return 1; fail2: gdFree(cidx); fail1: return 0; } static gdImagePtr _gd2CreateFromFile (gdIOCtxPtr in, int *sx, int *sy, int *cs, int *vers, int *fmt, int *ncx, int *ncy, t_chunk_info ** cidx) { gdImagePtr im; if (_gd2GetHeader (in, sx, sy, cs, vers, fmt, ncx, ncy, cidx) != 1) { GD2_DBG (printf ("Bad GD2 header\n")); goto fail1; } if (gd2_truecolor (*fmt)) { im = gdImageCreateTrueColor (*sx, *sy); } else { im = gdImageCreate (*sx, *sy); } if (im == NULL) { GD2_DBG (printf ("Could not create gdImage\n")); goto fail2; }; if (!_gdGetColors (in, im, (*vers) == 2)) { GD2_DBG (printf ("Could not read color palette\n")); goto fail3; } GD2_DBG (printf ("Image palette completed: %d colours\n", im->colorsTotal)); return im; fail3: gdImageDestroy (im); fail2: gdFree(*cidx); fail1: return 0; } static int _gd2ReadChunk (int offset, char *compBuf, int compSize, char *chunkBuf, uLongf * chunkLen, gdIOCtx * in) { int zerr; if (gdTell (in) != offset) { GD2_DBG (printf ("Positioning in file to %d\n", offset)); gdSeek (in, offset); } else { GD2_DBG (printf ("Already Positioned in file to %d\n", offset)); }; /* Read and uncompress an entire chunk. */ GD2_DBG (printf ("Reading file\n")); if (gdGetBuf (compBuf, compSize, in) != compSize) { return FALSE; }; GD2_DBG (printf ("Got %d bytes. Uncompressing into buffer of %d bytes\n", compSize, *chunkLen)); zerr = uncompress ((unsigned char *) chunkBuf, chunkLen, (unsigned char *) compBuf, compSize); if (zerr != Z_OK) { GD2_DBG (printf ("Error %d from uncompress\n", zerr)); return FALSE; }; GD2_DBG (printf ("Got chunk\n")); return TRUE; } /* Function: gdImageCreateFromGd2 <gdImageCreateFromGd2> is called to load images from gd2 format files. Invoke <gdImageCreateFromGd2> with an already opened pointer to a file containing the desired image in the gd2 file format, which is specific to gd2 and intended for fast loading of parts of large images. (It is a compressed format, but generally not as good as maximum compression of the entire image would be.) <gdImageCreateFromGd2> returns a <gdImagePtr> to the new image, or NULL if unable to load the image (most often because the file is corrupt or does not contain a gd format image). <gdImageCreateFromGd2> does not close the file. You can inspect the sx and sy members of the image to determine its size. The image must eventually be destroyed using <gdImageDestroy>. Variants: <gdImageCreateFromGd2Ptr> creates an image from GD data (i.e. the contents of a GD2 file) already in memory. <gdImageCreateFromGd2Ctx> reads in an image using the functions in a <gdIOCtx> struct. Parameters: infile - The input FILE pointer Returns: A pointer to the new image or NULL if an error occurred. Example: > gdImagePtr im; > FILE *in; > in = fopen("mygd.gd2", "rb"); > im = gdImageCreateFromGd2(in); > fclose(in); > // ... Use the image ... > gdImageDestroy(im); */ BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2 (FILE * inFile) { gdIOCtx *in = gdNewFileCtx (inFile); gdImagePtr im; if (in == NULL) return NULL; im = gdImageCreateFromGd2Ctx (in); in->gd_free (in); return im; } /* Function: gdImageCreateFromGd2Ptr Parameters: size - size of GD2 data in bytes. data - GD2 data (i.e. contents of a GIF file). See <gdImageCreateFromGd2>. */ BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2Ptr (int size, void *data) { gdImagePtr im; gdIOCtx *in = gdNewDynamicCtxEx (size, data, 0); if(!in) return 0; im = gdImageCreateFromGd2Ctx (in); in->gd_free (in); return im; } /* Function: gdImageCreateFromGd2Ctx Reads in a GD2 image via a <gdIOCtx> struct. See <gdImageCreateFromGd2>. */ BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2Ctx (gdIOCtxPtr in) { int sx, sy; int i; int ncx, ncy, nc, cs, cx, cy; int x, y, ylo, yhi, xlo, xhi; int vers, fmt; t_chunk_info *chunkIdx = NULL; /* So we can gdFree it with impunity. */ unsigned char *chunkBuf = NULL; /* So we can gdFree it with impunity. */ int chunkNum = 0; int chunkMax = 0; uLongf chunkLen; int chunkPos = 0; int compMax = 0; int bytesPerPixel; char *compBuf = NULL; /* So we can gdFree it with impunity. */ gdImagePtr im; /* Get the header */ im = _gd2CreateFromFile (in, &sx, &sy, &cs, &vers, &fmt, &ncx, &ncy, &chunkIdx); if (im == NULL) { /* No need to free chunkIdx as _gd2CreateFromFile does it for us. */ return 0; } bytesPerPixel = im->trueColor ? 4 : 1; nc = ncx * ncy; if (gd2_compressed (fmt)) { /* Find the maximum compressed chunk size. */ compMax = 0; for (i = 0; (i < nc); i++) { if (chunkIdx[i].size > compMax) { compMax = chunkIdx[i].size; }; }; compMax++; /* Allocate buffers */ chunkMax = cs * bytesPerPixel * cs; chunkBuf = gdCalloc (chunkMax, 1); if (!chunkBuf) { goto fail; } compBuf = gdCalloc (compMax, 1); if (!compBuf) { goto fail; } GD2_DBG (printf ("Largest compressed chunk is %d bytes\n", compMax)); }; /* if ( (ncx != sx / cs) || (ncy != sy / cs)) { */ /* goto fail2; */ /* }; */ /* Read the data... */ for (cy = 0; (cy < ncy); cy++) { for (cx = 0; (cx < ncx); cx++) { ylo = cy * cs; yhi = ylo + cs; if (yhi > im->sy) { yhi = im->sy; }; GD2_DBG (printf ("Processing Chunk %d (%d, %d), y from %d to %d\n", chunkNum, cx, cy, ylo, yhi)); if (gd2_compressed (fmt)) { chunkLen = chunkMax; if (!_gd2ReadChunk (chunkIdx[chunkNum].offset, compBuf, chunkIdx[chunkNum].size, (char *) chunkBuf, &chunkLen, in)) { GD2_DBG (printf ("Error reading comproessed chunk\n")); goto fail; }; chunkPos = 0; }; for (y = ylo; (y < yhi); y++) { xlo = cx * cs; xhi = xlo + cs; if (xhi > im->sx) { xhi = im->sx; }; /*GD2_DBG(printf("y=%d: ",y)); */ if (!gd2_compressed (fmt)) { for (x = xlo; x < xhi; x++) { if (im->trueColor) { if (!gdGetInt (&im->tpixels[y][x], in)) { /*printf("EOF while reading\n"); */ /*gdImageDestroy(im); */ /*return 0; */ im->tpixels[y][x] = 0; } } else { int ch; if (!gdGetByte (&ch, in)) { /*printf("EOF while reading\n"); */ /*gdImageDestroy(im); */ /*return 0; */ ch = 0; } im->pixels[y][x] = ch; } } } else { for (x = xlo; x < xhi; x++) { if (im->trueColor) { /* 2.0.1: work around a gcc bug by being verbose. TBB */ int a = chunkBuf[chunkPos++] << 24; int r = chunkBuf[chunkPos++] << 16; int g = chunkBuf[chunkPos++] << 8; int b = chunkBuf[chunkPos++]; /* 2.0.11: tpixels */ im->tpixels[y][x] = a + r + g + b; } else { im->pixels[y][x] = chunkBuf[chunkPos++]; } }; }; /*GD2_DBG(printf("\n")); */ }; chunkNum++; }; }; GD2_DBG (printf ("Freeing memory\n")); gdFree (chunkBuf); gdFree (compBuf); gdFree (chunkIdx); GD2_DBG (printf ("Done\n")); return im; fail: gdImageDestroy (im); if (chunkBuf) { gdFree (chunkBuf); } if (compBuf) { gdFree (compBuf); } if (chunkIdx) { gdFree (chunkIdx); } return 0; } /* Function: gdImageCreateFromGd2Part <gdImageCreateFromGd2Part> is called to load parts of images from gd2 format files. Invoked in the same way as <gdImageCreateFromGd2>, but with extra parameters indicating the source (x, y) and width/height of the desired image. <gdImageCreateFromGd2Part> returns a <gdImagePtr> to the new image, or NULL if unable to load the image. The image must eventually be destroyed using <gdImageDestroy>. Variants: <gdImageCreateFromGd2PartPtr> creates an image from GD2 data (i.e. the contents of a GD2 file) already in memory. <gdImageCreateFromGd2Ctx> reads in an image using the functions in a <gdIOCtx> struct. Parameters: infile - The input FILE pointer srcx, srcy - The source X and Y coordinates w, h - The resulting image's width and height Returns: A pointer to the new image or NULL if an error occurred. */ BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2Part (FILE * inFile, int srcx, int srcy, int w, int h) { gdImagePtr im; gdIOCtx *in = gdNewFileCtx (inFile); if (in == NULL) return NULL; im = gdImageCreateFromGd2PartCtx (in, srcx, srcy, w, h); in->gd_free (in); return im; } /* Function: gdImageCreateFromGd2PartPtr Parameters: size - size of GD data in bytes. data - GD data (i.e. contents of a GIF file). srcx, srcy - The source X and Y coordinates w, h - The resulting image's width and height Reads in part of a GD2 image file stored from memory. See <gdImageCreateFromGd2Part>. */ BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2PartPtr (int size, void *data, int srcx, int srcy, int w, int h) { gdImagePtr im; gdIOCtx *in = gdNewDynamicCtxEx (size, data, 0); if(!in) return 0; im = gdImageCreateFromGd2PartCtx (in, srcx, srcy, w, h); in->gd_free (in); return im; } /* Function: gdImageCreateFromGd2PartCtx Parameters: in - The data source. srcx, srcy - The source X and Y coordinates w, h - The resulting image's width and height Reads in part of a GD2 data image file via a <gdIOCtx> struct. See <gdImageCreateFromGd2Part>. */ BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2PartCtx (gdIOCtx * in, int srcx, int srcy, int w, int h) { int scx, scy, ecx, ecy, fsx, fsy; int nc, ncx, ncy, cs, cx, cy; int x, y, ylo, yhi, xlo, xhi; int dstart, dpos; int i; /* 2.0.12: unsigned is correct; fixes problems with color munging. Thanks to Steven Brown. */ unsigned int ch; int vers, fmt; t_chunk_info *chunkIdx = NULL; unsigned char *chunkBuf = NULL; int chunkNum; int chunkMax = 0; uLongf chunkLen; int chunkPos = 0; int compMax; char *compBuf = NULL; gdImagePtr im; /* */ /* The next few lines are basically copied from gd2CreateFromFile */ /* - we change the file size, so don't want to use the code directly. */ /* but we do need to know the file size. */ /* */ if (_gd2GetHeader (in, &fsx, &fsy, &cs, &vers, &fmt, &ncx, &ncy, &chunkIdx) != 1) { goto fail1; } GD2_DBG (printf ("File size is %dx%d\n", fsx, fsy)); /* This is the difference - make a file based on size of chunks. */ if (gd2_truecolor (fmt)) { im = gdImageCreateTrueColor (w, h); } else { im = gdImageCreate (w, h); } if (im == NULL) { goto fail1; }; if (!_gdGetColors (in, im, vers == 2)) { goto fail2; } GD2_DBG (printf ("Image palette completed: %d colours\n", im->colorsTotal)); /* Process the header info */ nc = ncx * ncy; if (gd2_compressed (fmt)) { /* Find the maximum compressed chunk size. */ compMax = 0; for (i = 0; (i < nc); i++) { if (chunkIdx[i].size > compMax) { compMax = chunkIdx[i].size; }; }; compMax++; if (im->trueColor) { chunkMax = cs * cs * 4; } else { chunkMax = cs * cs; } chunkBuf = gdCalloc (chunkMax, 1); if (!chunkBuf) { goto fail2; } compBuf = gdCalloc (compMax, 1); if (!compBuf) { goto fail2; } }; /* Don't bother with this... */ /* if ( (ncx != sx / cs) || (ncy != sy / cs)) { */ /* goto fail2; */ /* }; */ /* Work out start/end chunks */ scx = srcx / cs; scy = srcy / cs; if (scx < 0) { scx = 0; }; if (scy < 0) { scy = 0; }; ecx = (srcx + w) / cs; ecy = (srcy + h) / cs; if (ecx >= ncx) { ecx = ncx - 1; }; if (ecy >= ncy) { ecy = ncy - 1; }; /* Remember file position of image data. */ dstart = gdTell (in); GD2_DBG (printf ("Data starts at %d\n", dstart)); /* Loop through the chunks. */ for (cy = scy; (cy <= ecy); cy++) { ylo = cy * cs; yhi = ylo + cs; if (yhi > fsy) { yhi = fsy; }; for (cx = scx; (cx <= ecx); cx++) { xlo = cx * cs; xhi = xlo + cs; if (xhi > fsx) { xhi = fsx; }; GD2_DBG (printf ("Processing Chunk (%d, %d), from %d to %d\n", cx, cy, ylo, yhi)); if (!gd2_compressed (fmt)) { GD2_DBG (printf ("Using raw format data\n")); if (im->trueColor) { dpos = (cy * (cs * fsx) * 4 + cx * cs * (yhi - ylo) * 4) + dstart; } else { dpos = cy * (cs * fsx) + cx * cs * (yhi - ylo) + dstart; } /* gd 2.0.11: gdSeek returns TRUE on success, not 0. Longstanding bug. 01/16/03 */ if (!gdSeek (in, dpos)) { gd_error("Seek error\n"); goto fail2; }; GD2_DBG (printf ("Reading (%d, %d) from position %d\n", cx, cy, dpos - dstart)); } else { chunkNum = cx + cy * ncx; chunkLen = chunkMax; if (!_gd2ReadChunk (chunkIdx[chunkNum].offset, compBuf, chunkIdx[chunkNum].size, (char *) chunkBuf, &chunkLen, in)) { printf ("Error reading comproessed chunk\n"); goto fail2; }; chunkPos = 0; GD2_DBG (printf ("Reading (%d, %d) from chunk %d\n", cx, cy, chunkNum)); }; GD2_DBG (printf (" into (%d, %d) - (%d, %d)\n", xlo, ylo, xhi, yhi)); for (y = ylo; (y < yhi); y++) { for (x = xlo; x < xhi; x++) { if (!gd2_compressed (fmt)) { if (im->trueColor) { if (!gdGetInt ((int *) &ch, in)) { ch = 0; /*printf("EOF while reading file\n"); */ /*goto fail2; */ } } else { ch = gdGetC (in); if ((int) ch == EOF) { ch = 0; /*printf("EOF while reading file\n"); */ /*goto fail2; */ } } } else { if (im->trueColor) { ch = chunkBuf[chunkPos++]; ch = (ch << 8) + chunkBuf[chunkPos++]; ch = (ch << 8) + chunkBuf[chunkPos++]; ch = (ch << 8) + chunkBuf[chunkPos++]; } else { ch = chunkBuf[chunkPos++]; } }; /* Only use a point that is in the image. */ if ((x >= srcx) && (x < (srcx + w)) && (x < fsx) && (x >= 0) && (y >= srcy) && (y < (srcy + h)) && (y < fsy) && (y >= 0)) { /* 2.0.11: tpixels */ if (im->trueColor) { im->tpixels[y - srcy][x - srcx] = ch; } else { im->pixels[y - srcy][x - srcx] = ch; } } }; }; }; }; gdFree (chunkBuf); gdFree (compBuf); gdFree (chunkIdx); return im; fail2: gdImageDestroy (im); fail1: if (chunkBuf) { gdFree (chunkBuf); } if (compBuf) { gdFree (compBuf); } if (chunkIdx) { gdFree (chunkIdx); } return 0; } static void _gd2PutHeader (gdImagePtr im, gdIOCtx * out, int cs, int fmt, int cx, int cy) { int i; /* Send the gd2 id, to verify file format. */ for (i = 0; i < 4; i++) { gdPutC ((unsigned char) (GD2_ID[i]), out); }; /* */ /* We put the version info first, so future versions can easily change header info. */ /* */ gdPutWord (GD2_VERS, out); gdPutWord (im->sx, out); gdPutWord (im->sy, out); gdPutWord (cs, out); gdPutWord (fmt, out); gdPutWord (cx, out); gdPutWord (cy, out); } static void _gdImageGd2 (gdImagePtr im, gdIOCtx * out, int cs, int fmt) { int ncx, ncy, cx, cy; int x, y, ylo, yhi, xlo, xhi; int chunkLen; int chunkNum = 0; char *chunkData = NULL; /* So we can gdFree it with impunity. */ char *compData = NULL; /* So we can gdFree it with impunity. */ uLongf compLen; int idxPos = 0; int idxSize; t_chunk_info *chunkIdx = NULL; int posSave; int bytesPerPixel = im->trueColor ? 4 : 1; int compMax = 0; /*printf("Trying to write GD2 file\n"); */ /* */ /* Force fmt to a valid value since we don't return anything. */ /* */ if ((fmt != GD2_FMT_RAW) && (fmt != GD2_FMT_COMPRESSED)) { fmt = GD2_FMT_COMPRESSED; }; if (im->trueColor) { fmt += 2; } /* */ /* Make sure chunk size is valid. These are arbitrary values; 64 because it seems */ /* a little silly to expect performance improvements on a 64x64 bit scale, and */ /* 4096 because we buffer one chunk, and a 16MB buffer seems a little large - it may be */ /* OK for one user, but for another to read it, they require the buffer. */ /* */ if (cs == 0) { cs = GD2_CHUNKSIZE; } else if (cs < GD2_CHUNKSIZE_MIN) { cs = GD2_CHUNKSIZE_MIN; } else if (cs > GD2_CHUNKSIZE_MAX) { cs = GD2_CHUNKSIZE_MAX; }; /* Work out number of chunks. */ ncx = (im->sx + cs - 1) / cs; ncy = (im->sy + cs - 1) / cs; /* Write the standard header. */ _gd2PutHeader (im, out, cs, fmt, ncx, ncy); if (gd2_compressed (fmt)) { /* */ /* Work out size of buffer for compressed data, If CHUNKSIZE is large, */ /* then these will be large! */ /* */ /* The zlib notes say output buffer size should be (input size) * 1.01 * 12 */ /* - we'll use 1.02 to be paranoid. */ /* */ compMax = cs * bytesPerPixel * cs * 1.02 + 12; /* */ /* Allocate the buffers. */ /* */ chunkData = gdCalloc (cs * bytesPerPixel * cs, 1); if (!chunkData) { goto fail; } compData = gdCalloc (compMax, 1); if (!compData) { goto fail; } /* */ /* Save the file position of chunk index, and allocate enough space for */ /* each chunk_info block . */ /* */ idxPos = gdTell (out); idxSize = ncx * ncy * sizeof (t_chunk_info); GD2_DBG (printf ("Index size is %d\n", idxSize)); gdSeek (out, idxPos + idxSize); chunkIdx = gdCalloc (idxSize * sizeof (t_chunk_info), 1); if (!chunkIdx) { goto fail; } }; _gdPutColors (im, out); GD2_DBG (printf ("Size: %dx%d\n", im->sx, im->sy)); GD2_DBG (printf ("Chunks: %dx%d\n", ncx, ncy)); for (cy = 0; (cy < ncy); cy++) { for (cx = 0; (cx < ncx); cx++) { ylo = cy * cs; yhi = ylo + cs; if (yhi > im->sy) { yhi = im->sy; }; GD2_DBG (printf ("Processing Chunk (%dx%d), y from %d to %d\n", cx, cy, ylo, yhi)); chunkLen = 0; for (y = ylo; (y < yhi); y++) { /*GD2_DBG(printf("y=%d: ",y)); */ xlo = cx * cs; xhi = xlo + cs; if (xhi > im->sx) { xhi = im->sx; }; if (gd2_compressed (fmt)) { for (x = xlo; x < xhi; x++) { /* 2.0.11: use truecolor pixel array. TBB */ /*GD2_DBG(printf("%d...",x)); */ if (im->trueColor) { int p = im->tpixels[y][x]; chunkData[chunkLen++] = gdTrueColorGetAlpha (p); chunkData[chunkLen++] = gdTrueColorGetRed (p); chunkData[chunkLen++] = gdTrueColorGetGreen (p); chunkData[chunkLen++] = gdTrueColorGetBlue (p); } else { int p = im->pixels[y][x]; chunkData[chunkLen++] = p; } }; } else { for (x = xlo; x < xhi; x++) { /*GD2_DBG(printf("%d, ",x)); */ if (im->trueColor) { gdPutInt (im->tpixels[y][x], out); } else { gdPutC ((unsigned char) im->pixels[y][x], out); } }; }; /*GD2_DBG(printf("y=%d done.\n",y)); */ }; if (gd2_compressed (fmt)) { compLen = compMax; if (compress ((unsigned char *) &compData[0], &compLen, (unsigned char *) &chunkData[0], chunkLen) != Z_OK) { printf ("Error from compressing\n"); } else { chunkIdx[chunkNum].offset = gdTell (out); chunkIdx[chunkNum++].size = compLen; GD2_DBG (printf ("Chunk %d size %d offset %d\n", chunkNum, chunkIdx[chunkNum - 1].size, chunkIdx[chunkNum - 1].offset)); if (gdPutBuf (compData, compLen, out) <= 0) { gd_error("gd write error\n"); }; }; }; }; }; if (gd2_compressed (fmt)) { /* Save the position, write the index, restore position (paranoia). */ GD2_DBG (printf ("Seeking %d to write index\n", idxPos)); posSave = gdTell (out); gdSeek (out, idxPos); GD2_DBG (printf ("Writing index\n")); for (x = 0; x < chunkNum; x++) { GD2_DBG (printf ("Chunk %d size %d offset %d\n", x, chunkIdx[x].size, chunkIdx[x].offset)); gdPutInt (chunkIdx[x].offset, out); gdPutInt (chunkIdx[x].size, out); }; /* We don't use fwrite for *endian reasons. */ /*fwrite(chunkIdx, sizeof(int)*2, chunkNum, out); */ gdSeek (out, posSave); }; /*printf("Memory block size is %d\n",gdTell(out)); */ fail: GD2_DBG (printf ("Freeing memory\n")); if (chunkData) { gdFree (chunkData); } if (compData) { gdFree (compData); } if (chunkIdx) { gdFree (chunkIdx); } GD2_DBG (printf ("Done\n")); } /* Function: gdImageGd2 */ BGD_DECLARE(void) gdImageGd2 (gdImagePtr im, FILE * outFile, int cs, int fmt) { gdIOCtx *out = gdNewFileCtx (outFile); if (out == NULL) return; _gdImageGd2 (im, out, cs, fmt); out->gd_free (out); } /* Function: gdImageGd2Ptr */ BGD_DECLARE(void *) gdImageGd2Ptr (gdImagePtr im, int cs, int fmt, int *size) { void *rv; gdIOCtx *out = gdNewDynamicCtx (2048, NULL); if (out == NULL) return NULL; _gdImageGd2 (im, out, cs, fmt); rv = gdDPExtractData (out, size); out->gd_free (out); return rv; } #else /* no HAVE_LIBZ */ static void _noLibzError (void) { gd_error("GD2 support is not available - no libz\n"); } BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2 (FILE * inFile) { _noLibzError(); return NULL; } BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2Ctx (gdIOCtxPtr in) { _noLibzError(); return NULL; } BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2Part (FILE * inFile, int srcx, int srcy, int w, int h) { _noLibzError(); return NULL; } BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2Ptr (int size, void *data) { _noLibzError(); return NULL; } BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2PartCtx (gdIOCtx * in, int srcx, int srcy, int w, int h) { _noLibzError(); return NULL; } BGD_DECLARE(gdImagePtr) gdImageCreateFromGd2PartPtr (int size, void *data, int srcx, int srcy, int w, int h) { _noLibzError(); return NULL; } BGD_DECLARE(void) gdImageGd2 (gdImagePtr im, FILE * outFile, int cs, int fmt) { _noLibzError(); } BGD_DECLARE(void *) gdImageGd2Ptr (gdImagePtr im, int cs, int fmt, int *size) { _noLibzError(); return NULL; } #endif /* HAVE_LIBZ */
./CrossVul/dataset_final_sorted/CWE-20/c/bad_4829_0
crossvul-cpp_data_bad_1563_1
/* Copyright (C) ABRT Team Copyright (C) RedHat inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <glib.h> #include <sys/time.h> #include "problem_api.h" /* * Goes through all problems and for problems accessible by caller_uid * calls callback. If callback returns non-0, returns that value. */ int for_each_problem_in_dir(const char *path, uid_t caller_uid, int (*callback)(struct dump_dir *dd, void *arg), void *arg) { DIR *dp = opendir(path); if (!dp) { /* We don't want to yell if, say, $XDG_CACHE_DIR/abrt/spool doesn't exist */ //perror_msg("Can't open directory '%s'", path); return 0; } int brk = 0; struct dirent *dent; while ((dent = readdir(dp)) != NULL) { if (dot_or_dotdot(dent->d_name)) continue; /* skip "." and ".." */ char *full_name = concat_path_file(path, dent->d_name); if (caller_uid == -1 || dump_dir_accessible_by_uid(full_name, caller_uid)) { /* Silently ignore *any* errors, not only EACCES. * We saw "lock file is locked by process PID" error * when we raced with wizard. */ int sv_logmode = logmode; logmode = 0; struct dump_dir *dd = dd_opendir(full_name, DD_OPEN_READONLY | DD_FAIL_QUIETLY_EACCES | DD_DONT_WAIT_FOR_LOCK); logmode = sv_logmode; if (dd) { brk = callback ? callback(dd, arg) : 0; dd_close(dd); } } free(full_name); if (brk) break; } closedir(dp); return brk; } /* get_problem_dirs_for_uid and its helpers */ static int add_dirname_to_GList(struct dump_dir *dd, void *arg) { GList **list = arg; *list = g_list_prepend(*list, xstrdup(dd->dd_dirname)); return 0; } GList *get_problem_dirs_for_uid(uid_t uid, const char *dump_location) { GList *list = NULL; for_each_problem_in_dir(dump_location, uid, add_dirname_to_GList, &list); /* * Why reverse? * Because N*prepend+reverse is faster than N*append */ return g_list_reverse(list); } /* get_problem_dirs_not_accessible_by_uid and its helpers */ struct add_dirname_to_GList_if_not_accessible_args { uid_t uid; GList *list; }; static int add_dirname_to_GList_if_not_accessible(struct dump_dir *dd, void *args) { struct add_dirname_to_GList_if_not_accessible_args *param = (struct add_dirname_to_GList_if_not_accessible_args *)args; /* Append if not accessible */ if (!dump_dir_accessible_by_uid(dd->dd_dirname, param->uid)) param->list = g_list_prepend(param->list, xstrdup(dd->dd_dirname)); return 0; } GList *get_problem_dirs_not_accessible_by_uid(uid_t uid, const char *dump_location) { struct add_dirname_to_GList_if_not_accessible_args args = { .uid = uid, .list = NULL, }; for_each_problem_in_dir(dump_location, /*disable default uid check*/-1, add_dirname_to_GList_if_not_accessible, &args); return g_list_reverse(args.list); } /* get_problem_storages */ GList *get_problem_storages(void) { GList *pths = NULL; load_abrt_conf(); pths = g_list_append(pths, xstrdup(g_settings_dump_location)); //not needed, we don't steal directories anymore pths = g_list_append(pths, concat_path_file(g_get_user_cache_dir(), "abrt/spool")); free_abrt_conf_data(); return pths; } int problem_dump_dir_is_complete(struct dump_dir *dd) { return dd_exist(dd, FILENAME_COUNT); }
./CrossVul/dataset_final_sorted/CWE-20/c/bad_1563_1
crossvul-cpp_data_good_2891_3
/* * Copyright (C) 2010 IBM Corporation * Copyright (C) 2010 Politecnico di Torino, Italy * TORSEC group -- http://security.polito.it * * Authors: * Mimi Zohar <zohar@us.ibm.com> * Roberto Sassu <roberto.sassu@polito.it> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 2 of the License. * * See Documentation/security/keys/trusted-encrypted.rst */ #include <linux/uaccess.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/parser.h> #include <linux/string.h> #include <linux/err.h> #include <keys/user-type.h> #include <keys/trusted-type.h> #include <keys/encrypted-type.h> #include <linux/key-type.h> #include <linux/random.h> #include <linux/rcupdate.h> #include <linux/scatterlist.h> #include <linux/ctype.h> #include <crypto/aes.h> #include <crypto/algapi.h> #include <crypto/hash.h> #include <crypto/sha.h> #include <crypto/skcipher.h> #include "encrypted.h" #include "ecryptfs_format.h" static const char KEY_TRUSTED_PREFIX[] = "trusted:"; static const char KEY_USER_PREFIX[] = "user:"; static const char hash_alg[] = "sha256"; static const char hmac_alg[] = "hmac(sha256)"; static const char blkcipher_alg[] = "cbc(aes)"; static const char key_format_default[] = "default"; static const char key_format_ecryptfs[] = "ecryptfs"; static unsigned int ivsize; static int blksize; #define KEY_TRUSTED_PREFIX_LEN (sizeof (KEY_TRUSTED_PREFIX) - 1) #define KEY_USER_PREFIX_LEN (sizeof (KEY_USER_PREFIX) - 1) #define KEY_ECRYPTFS_DESC_LEN 16 #define HASH_SIZE SHA256_DIGEST_SIZE #define MAX_DATA_SIZE 4096 #define MIN_DATA_SIZE 20 static struct crypto_shash *hash_tfm; enum { Opt_err = -1, Opt_new, Opt_load, Opt_update }; enum { Opt_error = -1, Opt_default, Opt_ecryptfs }; static const match_table_t key_format_tokens = { {Opt_default, "default"}, {Opt_ecryptfs, "ecryptfs"}, {Opt_error, NULL} }; static const match_table_t key_tokens = { {Opt_new, "new"}, {Opt_load, "load"}, {Opt_update, "update"}, {Opt_err, NULL} }; static int aes_get_sizes(void) { struct crypto_skcipher *tfm; tfm = crypto_alloc_skcipher(blkcipher_alg, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { pr_err("encrypted_key: failed to alloc_cipher (%ld)\n", PTR_ERR(tfm)); return PTR_ERR(tfm); } ivsize = crypto_skcipher_ivsize(tfm); blksize = crypto_skcipher_blocksize(tfm); crypto_free_skcipher(tfm); return 0; } /* * valid_ecryptfs_desc - verify the description of a new/loaded encrypted key * * The description of a encrypted key with format 'ecryptfs' must contain * exactly 16 hexadecimal characters. * */ static int valid_ecryptfs_desc(const char *ecryptfs_desc) { int i; if (strlen(ecryptfs_desc) != KEY_ECRYPTFS_DESC_LEN) { pr_err("encrypted_key: key description must be %d hexadecimal " "characters long\n", KEY_ECRYPTFS_DESC_LEN); return -EINVAL; } for (i = 0; i < KEY_ECRYPTFS_DESC_LEN; i++) { if (!isxdigit(ecryptfs_desc[i])) { pr_err("encrypted_key: key description must contain " "only hexadecimal characters\n"); return -EINVAL; } } return 0; } /* * valid_master_desc - verify the 'key-type:desc' of a new/updated master-key * * key-type:= "trusted:" | "user:" * desc:= master-key description * * Verify that 'key-type' is valid and that 'desc' exists. On key update, * only the master key description is permitted to change, not the key-type. * The key-type remains constant. * * On success returns 0, otherwise -EINVAL. */ static int valid_master_desc(const char *new_desc, const char *orig_desc) { int prefix_len; if (!strncmp(new_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN)) prefix_len = KEY_TRUSTED_PREFIX_LEN; else if (!strncmp(new_desc, KEY_USER_PREFIX, KEY_USER_PREFIX_LEN)) prefix_len = KEY_USER_PREFIX_LEN; else return -EINVAL; if (!new_desc[prefix_len]) return -EINVAL; if (orig_desc && strncmp(new_desc, orig_desc, prefix_len)) return -EINVAL; return 0; } /* * datablob_parse - parse the keyctl data * * datablob format: * new [<format>] <master-key name> <decrypted data length> * load [<format>] <master-key name> <decrypted data length> * <encrypted iv + data> * update <new-master-key name> * * Tokenizes a copy of the keyctl data, returning a pointer to each token, * which is null terminated. * * On success returns 0, otherwise -EINVAL. */ static int datablob_parse(char *datablob, const char **format, char **master_desc, char **decrypted_datalen, char **hex_encoded_iv) { substring_t args[MAX_OPT_ARGS]; int ret = -EINVAL; int key_cmd; int key_format; char *p, *keyword; keyword = strsep(&datablob, " \t"); if (!keyword) { pr_info("encrypted_key: insufficient parameters specified\n"); return ret; } key_cmd = match_token(keyword, key_tokens, args); /* Get optional format: default | ecryptfs */ p = strsep(&datablob, " \t"); if (!p) { pr_err("encrypted_key: insufficient parameters specified\n"); return ret; } key_format = match_token(p, key_format_tokens, args); switch (key_format) { case Opt_ecryptfs: case Opt_default: *format = p; *master_desc = strsep(&datablob, " \t"); break; case Opt_error: *master_desc = p; break; } if (!*master_desc) { pr_info("encrypted_key: master key parameter is missing\n"); goto out; } if (valid_master_desc(*master_desc, NULL) < 0) { pr_info("encrypted_key: master key parameter \'%s\' " "is invalid\n", *master_desc); goto out; } if (decrypted_datalen) { *decrypted_datalen = strsep(&datablob, " \t"); if (!*decrypted_datalen) { pr_info("encrypted_key: keylen parameter is missing\n"); goto out; } } switch (key_cmd) { case Opt_new: if (!decrypted_datalen) { pr_info("encrypted_key: keyword \'%s\' not allowed " "when called from .update method\n", keyword); break; } ret = 0; break; case Opt_load: if (!decrypted_datalen) { pr_info("encrypted_key: keyword \'%s\' not allowed " "when called from .update method\n", keyword); break; } *hex_encoded_iv = strsep(&datablob, " \t"); if (!*hex_encoded_iv) { pr_info("encrypted_key: hex blob is missing\n"); break; } ret = 0; break; case Opt_update: if (decrypted_datalen) { pr_info("encrypted_key: keyword \'%s\' not allowed " "when called from .instantiate method\n", keyword); break; } ret = 0; break; case Opt_err: pr_info("encrypted_key: keyword \'%s\' not recognized\n", keyword); break; } out: return ret; } /* * datablob_format - format as an ascii string, before copying to userspace */ static char *datablob_format(struct encrypted_key_payload *epayload, size_t asciiblob_len) { char *ascii_buf, *bufp; u8 *iv = epayload->iv; int len; int i; ascii_buf = kmalloc(asciiblob_len + 1, GFP_KERNEL); if (!ascii_buf) goto out; ascii_buf[asciiblob_len] = '\0'; /* copy datablob master_desc and datalen strings */ len = sprintf(ascii_buf, "%s %s %s ", epayload->format, epayload->master_desc, epayload->datalen); /* convert the hex encoded iv, encrypted-data and HMAC to ascii */ bufp = &ascii_buf[len]; for (i = 0; i < (asciiblob_len - len) / 2; i++) bufp = hex_byte_pack(bufp, iv[i]); out: return ascii_buf; } /* * request_user_key - request the user key * * Use a user provided key to encrypt/decrypt an encrypted-key. */ static struct key *request_user_key(const char *master_desc, const u8 **master_key, size_t *master_keylen) { const struct user_key_payload *upayload; struct key *ukey; ukey = request_key(&key_type_user, master_desc, NULL); if (IS_ERR(ukey)) goto error; down_read(&ukey->sem); upayload = user_key_payload_locked(ukey); if (!upayload) { /* key was revoked before we acquired its semaphore */ up_read(&ukey->sem); key_put(ukey); ukey = ERR_PTR(-EKEYREVOKED); goto error; } *master_key = upayload->data; *master_keylen = upayload->datalen; error: return ukey; } static int calc_hash(struct crypto_shash *tfm, u8 *digest, const u8 *buf, unsigned int buflen) { SHASH_DESC_ON_STACK(desc, tfm); int err; desc->tfm = tfm; desc->flags = 0; err = crypto_shash_digest(desc, buf, buflen, digest); shash_desc_zero(desc); return err; } static int calc_hmac(u8 *digest, const u8 *key, unsigned int keylen, const u8 *buf, unsigned int buflen) { struct crypto_shash *tfm; int err; tfm = crypto_alloc_shash(hmac_alg, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { pr_err("encrypted_key: can't alloc %s transform: %ld\n", hmac_alg, PTR_ERR(tfm)); return PTR_ERR(tfm); } err = crypto_shash_setkey(tfm, key, keylen); if (!err) err = calc_hash(tfm, digest, buf, buflen); crypto_free_shash(tfm); return err; } enum derived_key_type { ENC_KEY, AUTH_KEY }; /* Derive authentication/encryption key from trusted key */ static int get_derived_key(u8 *derived_key, enum derived_key_type key_type, const u8 *master_key, size_t master_keylen) { u8 *derived_buf; unsigned int derived_buf_len; int ret; derived_buf_len = strlen("AUTH_KEY") + 1 + master_keylen; if (derived_buf_len < HASH_SIZE) derived_buf_len = HASH_SIZE; derived_buf = kzalloc(derived_buf_len, GFP_KERNEL); if (!derived_buf) return -ENOMEM; if (key_type) strcpy(derived_buf, "AUTH_KEY"); else strcpy(derived_buf, "ENC_KEY"); memcpy(derived_buf + strlen(derived_buf) + 1, master_key, master_keylen); ret = calc_hash(hash_tfm, derived_key, derived_buf, derived_buf_len); kzfree(derived_buf); return ret; } static struct skcipher_request *init_skcipher_req(const u8 *key, unsigned int key_len) { struct skcipher_request *req; struct crypto_skcipher *tfm; int ret; tfm = crypto_alloc_skcipher(blkcipher_alg, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { pr_err("encrypted_key: failed to load %s transform (%ld)\n", blkcipher_alg, PTR_ERR(tfm)); return ERR_CAST(tfm); } ret = crypto_skcipher_setkey(tfm, key, key_len); if (ret < 0) { pr_err("encrypted_key: failed to setkey (%d)\n", ret); crypto_free_skcipher(tfm); return ERR_PTR(ret); } req = skcipher_request_alloc(tfm, GFP_KERNEL); if (!req) { pr_err("encrypted_key: failed to allocate request for %s\n", blkcipher_alg); crypto_free_skcipher(tfm); return ERR_PTR(-ENOMEM); } skcipher_request_set_callback(req, 0, NULL, NULL); return req; } static struct key *request_master_key(struct encrypted_key_payload *epayload, const u8 **master_key, size_t *master_keylen) { struct key *mkey = ERR_PTR(-EINVAL); if (!strncmp(epayload->master_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN)) { mkey = request_trusted_key(epayload->master_desc + KEY_TRUSTED_PREFIX_LEN, master_key, master_keylen); } else if (!strncmp(epayload->master_desc, KEY_USER_PREFIX, KEY_USER_PREFIX_LEN)) { mkey = request_user_key(epayload->master_desc + KEY_USER_PREFIX_LEN, master_key, master_keylen); } else goto out; if (IS_ERR(mkey)) { int ret = PTR_ERR(mkey); if (ret == -ENOTSUPP) pr_info("encrypted_key: key %s not supported", epayload->master_desc); else pr_info("encrypted_key: key %s not found", epayload->master_desc); goto out; } dump_master_key(*master_key, *master_keylen); out: return mkey; } /* Before returning data to userspace, encrypt decrypted data. */ static int derived_key_encrypt(struct encrypted_key_payload *epayload, const u8 *derived_key, unsigned int derived_keylen) { struct scatterlist sg_in[2]; struct scatterlist sg_out[1]; struct crypto_skcipher *tfm; struct skcipher_request *req; unsigned int encrypted_datalen; u8 iv[AES_BLOCK_SIZE]; int ret; encrypted_datalen = roundup(epayload->decrypted_datalen, blksize); req = init_skcipher_req(derived_key, derived_keylen); ret = PTR_ERR(req); if (IS_ERR(req)) goto out; dump_decrypted_data(epayload); sg_init_table(sg_in, 2); sg_set_buf(&sg_in[0], epayload->decrypted_data, epayload->decrypted_datalen); sg_set_page(&sg_in[1], ZERO_PAGE(0), AES_BLOCK_SIZE, 0); sg_init_table(sg_out, 1); sg_set_buf(sg_out, epayload->encrypted_data, encrypted_datalen); memcpy(iv, epayload->iv, sizeof(iv)); skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv); ret = crypto_skcipher_encrypt(req); tfm = crypto_skcipher_reqtfm(req); skcipher_request_free(req); crypto_free_skcipher(tfm); if (ret < 0) pr_err("encrypted_key: failed to encrypt (%d)\n", ret); else dump_encrypted_data(epayload, encrypted_datalen); out: return ret; } static int datablob_hmac_append(struct encrypted_key_payload *epayload, const u8 *master_key, size_t master_keylen) { u8 derived_key[HASH_SIZE]; u8 *digest; int ret; ret = get_derived_key(derived_key, AUTH_KEY, master_key, master_keylen); if (ret < 0) goto out; digest = epayload->format + epayload->datablob_len; ret = calc_hmac(digest, derived_key, sizeof derived_key, epayload->format, epayload->datablob_len); if (!ret) dump_hmac(NULL, digest, HASH_SIZE); out: memzero_explicit(derived_key, sizeof(derived_key)); return ret; } /* verify HMAC before decrypting encrypted key */ static int datablob_hmac_verify(struct encrypted_key_payload *epayload, const u8 *format, const u8 *master_key, size_t master_keylen) { u8 derived_key[HASH_SIZE]; u8 digest[HASH_SIZE]; int ret; char *p; unsigned short len; ret = get_derived_key(derived_key, AUTH_KEY, master_key, master_keylen); if (ret < 0) goto out; len = epayload->datablob_len; if (!format) { p = epayload->master_desc; len -= strlen(epayload->format) + 1; } else p = epayload->format; ret = calc_hmac(digest, derived_key, sizeof derived_key, p, len); if (ret < 0) goto out; ret = crypto_memneq(digest, epayload->format + epayload->datablob_len, sizeof(digest)); if (ret) { ret = -EINVAL; dump_hmac("datablob", epayload->format + epayload->datablob_len, HASH_SIZE); dump_hmac("calc", digest, HASH_SIZE); } out: memzero_explicit(derived_key, sizeof(derived_key)); return ret; } static int derived_key_decrypt(struct encrypted_key_payload *epayload, const u8 *derived_key, unsigned int derived_keylen) { struct scatterlist sg_in[1]; struct scatterlist sg_out[2]; struct crypto_skcipher *tfm; struct skcipher_request *req; unsigned int encrypted_datalen; u8 iv[AES_BLOCK_SIZE]; u8 *pad; int ret; /* Throwaway buffer to hold the unused zero padding at the end */ pad = kmalloc(AES_BLOCK_SIZE, GFP_KERNEL); if (!pad) return -ENOMEM; encrypted_datalen = roundup(epayload->decrypted_datalen, blksize); req = init_skcipher_req(derived_key, derived_keylen); ret = PTR_ERR(req); if (IS_ERR(req)) goto out; dump_encrypted_data(epayload, encrypted_datalen); sg_init_table(sg_in, 1); sg_init_table(sg_out, 2); sg_set_buf(sg_in, epayload->encrypted_data, encrypted_datalen); sg_set_buf(&sg_out[0], epayload->decrypted_data, epayload->decrypted_datalen); sg_set_buf(&sg_out[1], pad, AES_BLOCK_SIZE); memcpy(iv, epayload->iv, sizeof(iv)); skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv); ret = crypto_skcipher_decrypt(req); tfm = crypto_skcipher_reqtfm(req); skcipher_request_free(req); crypto_free_skcipher(tfm); if (ret < 0) goto out; dump_decrypted_data(epayload); out: kfree(pad); return ret; } /* Allocate memory for decrypted key and datablob. */ static struct encrypted_key_payload *encrypted_key_alloc(struct key *key, const char *format, const char *master_desc, const char *datalen) { struct encrypted_key_payload *epayload = NULL; unsigned short datablob_len; unsigned short decrypted_datalen; unsigned short payload_datalen; unsigned int encrypted_datalen; unsigned int format_len; long dlen; int ret; ret = kstrtol(datalen, 10, &dlen); if (ret < 0 || dlen < MIN_DATA_SIZE || dlen > MAX_DATA_SIZE) return ERR_PTR(-EINVAL); format_len = (!format) ? strlen(key_format_default) : strlen(format); decrypted_datalen = dlen; payload_datalen = decrypted_datalen; if (format && !strcmp(format, key_format_ecryptfs)) { if (dlen != ECRYPTFS_MAX_KEY_BYTES) { pr_err("encrypted_key: keylen for the ecryptfs format " "must be equal to %d bytes\n", ECRYPTFS_MAX_KEY_BYTES); return ERR_PTR(-EINVAL); } decrypted_datalen = ECRYPTFS_MAX_KEY_BYTES; payload_datalen = sizeof(struct ecryptfs_auth_tok); } encrypted_datalen = roundup(decrypted_datalen, blksize); datablob_len = format_len + 1 + strlen(master_desc) + 1 + strlen(datalen) + 1 + ivsize + 1 + encrypted_datalen; ret = key_payload_reserve(key, payload_datalen + datablob_len + HASH_SIZE + 1); if (ret < 0) return ERR_PTR(ret); epayload = kzalloc(sizeof(*epayload) + payload_datalen + datablob_len + HASH_SIZE + 1, GFP_KERNEL); if (!epayload) return ERR_PTR(-ENOMEM); epayload->payload_datalen = payload_datalen; epayload->decrypted_datalen = decrypted_datalen; epayload->datablob_len = datablob_len; return epayload; } static int encrypted_key_decrypt(struct encrypted_key_payload *epayload, const char *format, const char *hex_encoded_iv) { struct key *mkey; u8 derived_key[HASH_SIZE]; const u8 *master_key; u8 *hmac; const char *hex_encoded_data; unsigned int encrypted_datalen; size_t master_keylen; size_t asciilen; int ret; encrypted_datalen = roundup(epayload->decrypted_datalen, blksize); asciilen = (ivsize + 1 + encrypted_datalen + HASH_SIZE) * 2; if (strlen(hex_encoded_iv) != asciilen) return -EINVAL; hex_encoded_data = hex_encoded_iv + (2 * ivsize) + 2; ret = hex2bin(epayload->iv, hex_encoded_iv, ivsize); if (ret < 0) return -EINVAL; ret = hex2bin(epayload->encrypted_data, hex_encoded_data, encrypted_datalen); if (ret < 0) return -EINVAL; hmac = epayload->format + epayload->datablob_len; ret = hex2bin(hmac, hex_encoded_data + (encrypted_datalen * 2), HASH_SIZE); if (ret < 0) return -EINVAL; mkey = request_master_key(epayload, &master_key, &master_keylen); if (IS_ERR(mkey)) return PTR_ERR(mkey); ret = datablob_hmac_verify(epayload, format, master_key, master_keylen); if (ret < 0) { pr_err("encrypted_key: bad hmac (%d)\n", ret); goto out; } ret = get_derived_key(derived_key, ENC_KEY, master_key, master_keylen); if (ret < 0) goto out; ret = derived_key_decrypt(epayload, derived_key, sizeof derived_key); if (ret < 0) pr_err("encrypted_key: failed to decrypt key (%d)\n", ret); out: up_read(&mkey->sem); key_put(mkey); memzero_explicit(derived_key, sizeof(derived_key)); return ret; } static void __ekey_init(struct encrypted_key_payload *epayload, const char *format, const char *master_desc, const char *datalen) { unsigned int format_len; format_len = (!format) ? strlen(key_format_default) : strlen(format); epayload->format = epayload->payload_data + epayload->payload_datalen; epayload->master_desc = epayload->format + format_len + 1; epayload->datalen = epayload->master_desc + strlen(master_desc) + 1; epayload->iv = epayload->datalen + strlen(datalen) + 1; epayload->encrypted_data = epayload->iv + ivsize + 1; epayload->decrypted_data = epayload->payload_data; if (!format) memcpy(epayload->format, key_format_default, format_len); else { if (!strcmp(format, key_format_ecryptfs)) epayload->decrypted_data = ecryptfs_get_auth_tok_key((struct ecryptfs_auth_tok *)epayload->payload_data); memcpy(epayload->format, format, format_len); } memcpy(epayload->master_desc, master_desc, strlen(master_desc)); memcpy(epayload->datalen, datalen, strlen(datalen)); } /* * encrypted_init - initialize an encrypted key * * For a new key, use a random number for both the iv and data * itself. For an old key, decrypt the hex encoded data. */ static int encrypted_init(struct encrypted_key_payload *epayload, const char *key_desc, const char *format, const char *master_desc, const char *datalen, const char *hex_encoded_iv) { int ret = 0; if (format && !strcmp(format, key_format_ecryptfs)) { ret = valid_ecryptfs_desc(key_desc); if (ret < 0) return ret; ecryptfs_fill_auth_tok((struct ecryptfs_auth_tok *)epayload->payload_data, key_desc); } __ekey_init(epayload, format, master_desc, datalen); if (!hex_encoded_iv) { get_random_bytes(epayload->iv, ivsize); get_random_bytes(epayload->decrypted_data, epayload->decrypted_datalen); } else ret = encrypted_key_decrypt(epayload, format, hex_encoded_iv); return ret; } /* * encrypted_instantiate - instantiate an encrypted key * * Decrypt an existing encrypted datablob or create a new encrypted key * based on a kernel random number. * * On success, return 0. Otherwise return errno. */ static int encrypted_instantiate(struct key *key, struct key_preparsed_payload *prep) { struct encrypted_key_payload *epayload = NULL; char *datablob = NULL; const char *format = NULL; char *master_desc = NULL; char *decrypted_datalen = NULL; char *hex_encoded_iv = NULL; size_t datalen = prep->datalen; int ret; if (datalen <= 0 || datalen > 32767 || !prep->data) return -EINVAL; datablob = kmalloc(datalen + 1, GFP_KERNEL); if (!datablob) return -ENOMEM; datablob[datalen] = 0; memcpy(datablob, prep->data, datalen); ret = datablob_parse(datablob, &format, &master_desc, &decrypted_datalen, &hex_encoded_iv); if (ret < 0) goto out; epayload = encrypted_key_alloc(key, format, master_desc, decrypted_datalen); if (IS_ERR(epayload)) { ret = PTR_ERR(epayload); goto out; } ret = encrypted_init(epayload, key->description, format, master_desc, decrypted_datalen, hex_encoded_iv); if (ret < 0) { kzfree(epayload); goto out; } rcu_assign_keypointer(key, epayload); out: kzfree(datablob); return ret; } static void encrypted_rcu_free(struct rcu_head *rcu) { struct encrypted_key_payload *epayload; epayload = container_of(rcu, struct encrypted_key_payload, rcu); kzfree(epayload); } /* * encrypted_update - update the master key description * * Change the master key description for an existing encrypted key. * The next read will return an encrypted datablob using the new * master key description. * * On success, return 0. Otherwise return errno. */ static int encrypted_update(struct key *key, struct key_preparsed_payload *prep) { struct encrypted_key_payload *epayload = key->payload.data[0]; struct encrypted_key_payload *new_epayload; char *buf; char *new_master_desc = NULL; const char *format = NULL; size_t datalen = prep->datalen; int ret = 0; if (key_is_negative(key)) return -ENOKEY; if (datalen <= 0 || datalen > 32767 || !prep->data) return -EINVAL; buf = kmalloc(datalen + 1, GFP_KERNEL); if (!buf) return -ENOMEM; buf[datalen] = 0; memcpy(buf, prep->data, datalen); ret = datablob_parse(buf, &format, &new_master_desc, NULL, NULL); if (ret < 0) goto out; ret = valid_master_desc(new_master_desc, epayload->master_desc); if (ret < 0) goto out; new_epayload = encrypted_key_alloc(key, epayload->format, new_master_desc, epayload->datalen); if (IS_ERR(new_epayload)) { ret = PTR_ERR(new_epayload); goto out; } __ekey_init(new_epayload, epayload->format, new_master_desc, epayload->datalen); memcpy(new_epayload->iv, epayload->iv, ivsize); memcpy(new_epayload->payload_data, epayload->payload_data, epayload->payload_datalen); rcu_assign_keypointer(key, new_epayload); call_rcu(&epayload->rcu, encrypted_rcu_free); out: kzfree(buf); return ret; } /* * encrypted_read - format and copy the encrypted data to userspace * * The resulting datablob format is: * <master-key name> <decrypted data length> <encrypted iv> <encrypted data> * * On success, return to userspace the encrypted key datablob size. */ static long encrypted_read(const struct key *key, char __user *buffer, size_t buflen) { struct encrypted_key_payload *epayload; struct key *mkey; const u8 *master_key; size_t master_keylen; char derived_key[HASH_SIZE]; char *ascii_buf; size_t asciiblob_len; int ret; epayload = dereference_key_locked(key); /* returns the hex encoded iv, encrypted-data, and hmac as ascii */ asciiblob_len = epayload->datablob_len + ivsize + 1 + roundup(epayload->decrypted_datalen, blksize) + (HASH_SIZE * 2); if (!buffer || buflen < asciiblob_len) return asciiblob_len; mkey = request_master_key(epayload, &master_key, &master_keylen); if (IS_ERR(mkey)) return PTR_ERR(mkey); ret = get_derived_key(derived_key, ENC_KEY, master_key, master_keylen); if (ret < 0) goto out; ret = derived_key_encrypt(epayload, derived_key, sizeof derived_key); if (ret < 0) goto out; ret = datablob_hmac_append(epayload, master_key, master_keylen); if (ret < 0) goto out; ascii_buf = datablob_format(epayload, asciiblob_len); if (!ascii_buf) { ret = -ENOMEM; goto out; } up_read(&mkey->sem); key_put(mkey); memzero_explicit(derived_key, sizeof(derived_key)); if (copy_to_user(buffer, ascii_buf, asciiblob_len) != 0) ret = -EFAULT; kzfree(ascii_buf); return asciiblob_len; out: up_read(&mkey->sem); key_put(mkey); memzero_explicit(derived_key, sizeof(derived_key)); return ret; } /* * encrypted_destroy - clear and free the key's payload */ static void encrypted_destroy(struct key *key) { kzfree(key->payload.data[0]); } struct key_type key_type_encrypted = { .name = "encrypted", .instantiate = encrypted_instantiate, .update = encrypted_update, .destroy = encrypted_destroy, .describe = user_describe, .read = encrypted_read, }; EXPORT_SYMBOL_GPL(key_type_encrypted); static int __init init_encrypted(void) { int ret; hash_tfm = crypto_alloc_shash(hash_alg, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hash_tfm)) { pr_err("encrypted_key: can't allocate %s transform: %ld\n", hash_alg, PTR_ERR(hash_tfm)); return PTR_ERR(hash_tfm); } ret = aes_get_sizes(); if (ret < 0) goto out; ret = register_key_type(&key_type_encrypted); if (ret < 0) goto out; return 0; out: crypto_free_shash(hash_tfm); return ret; } static void __exit cleanup_encrypted(void) { crypto_free_shash(hash_tfm); unregister_key_type(&key_type_encrypted); } late_initcall(init_encrypted); module_exit(cleanup_encrypted); MODULE_LICENSE("GPL");
./CrossVul/dataset_final_sorted/CWE-20/c/good_2891_3
crossvul-cpp_data_bad_5185_0
/* LibTomCrypt, modular cryptographic library -- Tom St Denis * * LibTomCrypt is a library that provides various cryptographic * algorithms in a highly modular and flexible manner. * * The library is free for all purposes without any express * guarantee it works. * * Tom St Denis, tomstdenis@gmail.com, http://libtom.org */ #include "tomcrypt.h" /** @file rsa_verify_hash.c RSA PKCS #1 v1.5 or v2 PSS signature verification, Tom St Denis and Andreas Lange */ #ifdef LTC_MRSA /** PKCS #1 de-sign then v1.5 or PSS depad @param sig The signature data @param siglen The length of the signature data (octets) @param hash The hash of the message that was signed @param hashlen The length of the hash of the message that was signed (octets) @param padding Type of padding (LTC_PKCS_1_PSS or LTC_PKCS_1_V1_5) @param hash_idx The index of the desired hash @param saltlen The length of the salt used during signature @param stat [out] The result of the signature comparison, 1==valid, 0==invalid @param key The public RSA key corresponding to the key that performed the signature @return CRYPT_OK on success (even if the signature is invalid) */ int rsa_verify_hash_ex(const unsigned char *sig, unsigned long siglen, const unsigned char *hash, unsigned long hashlen, int padding, int hash_idx, unsigned long saltlen, int *stat, rsa_key *key) { unsigned long modulus_bitlen, modulus_bytelen, x; int err; unsigned char *tmpbuf; LTC_ARGCHK(hash != NULL); LTC_ARGCHK(sig != NULL); LTC_ARGCHK(stat != NULL); LTC_ARGCHK(key != NULL); /* default to invalid */ *stat = 0; /* valid padding? */ if ((padding != LTC_PKCS_1_V1_5) && (padding != LTC_PKCS_1_PSS)) { return CRYPT_PK_INVALID_PADDING; } if (padding == LTC_PKCS_1_PSS) { /* valid hash ? */ if ((err = hash_is_valid(hash_idx)) != CRYPT_OK) { return err; } } /* get modulus len in bits */ modulus_bitlen = mp_count_bits( (key->N)); /* outlen must be at least the size of the modulus */ modulus_bytelen = mp_unsigned_bin_size( (key->N)); if (modulus_bytelen != siglen) { return CRYPT_INVALID_PACKET; } /* allocate temp buffer for decoded sig */ tmpbuf = XMALLOC(siglen); if (tmpbuf == NULL) { return CRYPT_MEM; } /* RSA decode it */ x = siglen; if ((err = ltc_mp.rsa_me(sig, siglen, tmpbuf, &x, PK_PUBLIC, key)) != CRYPT_OK) { XFREE(tmpbuf); return err; } /* make sure the output is the right size */ if (x != siglen) { XFREE(tmpbuf); return CRYPT_INVALID_PACKET; } if (padding == LTC_PKCS_1_PSS) { /* PSS decode and verify it */ if(modulus_bitlen%8 == 1){ err = pkcs_1_pss_decode(hash, hashlen, tmpbuf+1, x-1, saltlen, hash_idx, modulus_bitlen, stat); } else{ err = pkcs_1_pss_decode(hash, hashlen, tmpbuf, x, saltlen, hash_idx, modulus_bitlen, stat); } } else { /* PKCS #1 v1.5 decode it */ unsigned char *out; unsigned long outlen, loid[16]; int decoded; ltc_asn1_list digestinfo[2], siginfo[2]; /* not all hashes have OIDs... so sad */ if (hash_descriptor[hash_idx].OIDlen == 0) { err = CRYPT_INVALID_ARG; goto bail_2; } /* allocate temp buffer for decoded hash */ outlen = ((modulus_bitlen >> 3) + (modulus_bitlen & 7 ? 1 : 0)) - 3; out = XMALLOC(outlen); if (out == NULL) { err = CRYPT_MEM; goto bail_2; } if ((err = pkcs_1_v1_5_decode(tmpbuf, x, LTC_PKCS_1_EMSA, modulus_bitlen, out, &outlen, &decoded)) != CRYPT_OK) { XFREE(out); goto bail_2; } /* now we must decode out[0...outlen-1] using ASN.1, test the OID and then test the hash */ /* construct the SEQUENCE SEQUENCE { SEQUENCE {hashoid OID blah NULL } hash OCTET STRING } */ LTC_SET_ASN1(digestinfo, 0, LTC_ASN1_OBJECT_IDENTIFIER, loid, sizeof(loid)/sizeof(loid[0])); LTC_SET_ASN1(digestinfo, 1, LTC_ASN1_NULL, NULL, 0); LTC_SET_ASN1(siginfo, 0, LTC_ASN1_SEQUENCE, digestinfo, 2); LTC_SET_ASN1(siginfo, 1, LTC_ASN1_OCTET_STRING, tmpbuf, siglen); if ((err = der_decode_sequence(out, outlen, siginfo, 2)) != CRYPT_OK) { XFREE(out); goto bail_2; } /* test OID */ if ((digestinfo[0].size == hash_descriptor[hash_idx].OIDlen) && (XMEMCMP(digestinfo[0].data, hash_descriptor[hash_idx].OID, sizeof(unsigned long) * hash_descriptor[hash_idx].OIDlen) == 0) && (siginfo[1].size == hashlen) && (XMEMCMP(siginfo[1].data, hash, hashlen) == 0)) { *stat = 1; } #ifdef LTC_CLEAN_STACK zeromem(out, outlen); #endif XFREE(out); } bail_2: #ifdef LTC_CLEAN_STACK zeromem(tmpbuf, siglen); #endif XFREE(tmpbuf); return err; } #endif /* LTC_MRSA */ /* $Source$ */ /* $Revision$ */ /* $Date$ */
./CrossVul/dataset_final_sorted/CWE-20/c/bad_5185_0
crossvul-cpp_data_good_2313_0
/* * Copyright (c) 2007, 2008, 2009 Andrea Bittau <a.bittau@cs.ucl.ac.uk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <sys/types.h> #include <sys/socket.h> #include <sys/param.h> #include <stdio.h> #include <stdlib.h> #include <err.h> #include <unistd.h> #include <string.h> #include <arpa/inet.h> #include <netinet/in.h> #include <netinet/in_systm.h> #include <netinet/ip.h> #include <netinet/udp.h> #include <assert.h> #include <grp.h> #include <sys/utsname.h> #include "easside.h" #include "version.h" extern char * getVersion(char * progname, int maj, int min, int submin, int svnrev, int beta, int rc); unsigned char ids[8192]; unsigned short last_id; int wrap; int is_dup(unsigned short id) { int idx = id/8; int bit = id % 8; unsigned char mask = (1 << bit); if (ids[idx] & mask) return 1; ids[idx] |= mask; return 0; } int handle(int s, unsigned char* data, int len, struct sockaddr_in *s_in) { char buf[2048]; unsigned short *cmd = (unsigned short *)buf; int plen; struct in_addr *addr = &s_in->sin_addr; unsigned short *pid = (unsigned short*) data; /* inet check */ if (len == S_HELLO_LEN && memcmp(data, "sorbo", 5) == 0) { unsigned short *id = (unsigned short*) (data+5); int x = 2+4+2; *cmd = htons(S_CMD_INET_CHECK); memcpy(cmd+1, addr, 4); memcpy(cmd+1+2, id, 2); printf("Inet check by %s %d\n", inet_ntoa(*addr), ntohs(*id)); if (send(s, buf, x, 0) != x) return 1; return 0; } *cmd++ = htons(S_CMD_PACKET); *cmd++ = *pid; plen = len - 2; if (plen < 0) return 0; last_id = ntohs(*pid); if (last_id > 20000) wrap = 1; if (wrap && last_id < 100) { wrap = 0; memset(ids, 0, sizeof(ids)); } printf("Got packet %d %d", last_id, plen); if (is_dup(last_id)) { printf(" (DUP)\n"); return 0; } printf("\n"); *cmd++ = htons(plen); memcpy(cmd, data+2, plen); plen += 2 + 2 + 2; assert(plen <= (int) sizeof(buf)); if (send(s, buf, plen, 0) != plen) return 1; return 0; } void handle_dude(int dude, int udp) { unsigned char buf[2048]; int rc; fd_set rfds; int maxfd; struct sockaddr_in s_in; socklen_t len; /* handshake */ rc = recv(dude, buf, 5, 0); if (rc != 5) { close(dude); return; } if (memcmp(buf, "sorbo", 5) != 0) { close(dude); return; } if (send(dude, "sorbox", 6, 0) != 6) { close(dude); return; } printf("Handshake complete\n"); memset(ids, 0, sizeof(ids)); last_id = 0; wrap = 0; while (1) { FD_ZERO(&rfds); FD_SET(udp, &rfds); FD_SET(dude, &rfds); if (dude > udp) maxfd = dude; else maxfd = udp; if (select(maxfd+1, &rfds, NULL, NULL, NULL) == -1) err(1, "select()"); if (FD_ISSET(dude, &rfds)) break; if (!FD_ISSET(udp, &rfds)) continue; len = sizeof(s_in); rc = recvfrom(udp, buf, sizeof(buf), 0, (struct sockaddr*) &s_in, &len); if (rc == -1) err(1, "read()"); if (handle(dude, buf, rc, &s_in)) break; } close(dude); } void drop_privs() { if (chroot(".") == -1) err(1, "chroot()"); if (setgroups(0, NULL) == -1) err(1, "setgroups()"); if (setgid(69) == -1) err(1, "setgid()"); if (setuid(69) == -1) err(1, "setuid()"); } void usage() { printf("\n" " %s - (C) 2007,2008 Andrea Bittau\n" " http://www.aircrack-ng.org\n" "\n" " Usage: buddy-ng <options>\n" "\n" " Options:\n" "\n" " -h : This help screen\n" " -p : Don't drop privileges\n" "\n", getVersion("Buddy-ng", _MAJ, _MIN, _SUB_MIN, _REVISION, _BETA, _RC)); exit(1); } int main(int argc, char *argv[]) { struct utsname utsName; struct sockaddr_in s_in; struct sockaddr_in dude_sin; int len, udp, ch, dude, s; int port = S_DEFAULT_PORT; int drop; while ((ch = getopt(argc, argv, "ph")) != -1) { switch (ch) { case 'p': drop = 0; break; default: case 'h': usage(); break; } } memset(&s_in, 0, sizeof(s_in)); s_in.sin_family = PF_INET; s_in.sin_addr.s_addr = INADDR_ANY; s_in.sin_port = htons(S_DEFAULT_UDP_PORT); udp = socket(s_in.sin_family, SOCK_DGRAM, IPPROTO_UDP); if (udp == -1) err(1, "socket(UDP)"); if (bind(udp, (struct sockaddr*) &s_in, sizeof(s_in)) == -1) err(1, "bind()"); s = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP); if (s == -1) err(1, "socket(TCP)"); drop = 1; // Do not drop privileges on Windows (doing it fails). if (uname(&utsName) == 0) { drop = strncasecmp(utsName.sysname, "cygwin", 6); } if (drop) drop_privs(); memset(&s_in, 0, sizeof(s_in)); s_in.sin_family = PF_INET; s_in.sin_port = htons(port); s_in.sin_addr.s_addr = INADDR_ANY; len = 1; if (setsockopt(s, SOL_SOCKET, SO_REUSEADDR, &len, sizeof(len)) == -1) err(1, "setsockopt(SO_REUSEADDR)"); if (bind(s, (struct sockaddr*) &s_in, sizeof(s_in)) == -1) err(1, "bind()"); if (listen(s, 5) == -1) err(1, "listen()"); while (1) { len = sizeof(dude_sin); printf("Waiting for connexion\n"); dude = accept(s, (struct sockaddr*) &dude_sin, (socklen_t*) &len); if (dude == -1) err(1, "accept()"); printf("Got connection from %s\n", inet_ntoa(dude_sin.sin_addr)); handle_dude(dude, udp); printf("That was it\n"); } exit(0); }
./CrossVul/dataset_final_sorted/CWE-20/c/good_2313_0
crossvul-cpp_data_bad_3547_1
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk) * Copyright (C) Terry Dawson VK2KTJ (terry@animats.net) * Copyright (C) Tomi Manninen OH2BNS (oh2bns@sral.fi) */ #include <linux/capability.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/stat.h> #include <net/net_namespace.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <net/sock.h> #include <asm/system.h> #include <asm/uaccess.h> #include <linux/fcntl.h> #include <linux/termios.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/notifier.h> #include <net/rose.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <net/tcp_states.h> #include <net/ip.h> #include <net/arp.h> static int rose_ndevs = 10; int sysctl_rose_restart_request_timeout = ROSE_DEFAULT_T0; int sysctl_rose_call_request_timeout = ROSE_DEFAULT_T1; int sysctl_rose_reset_request_timeout = ROSE_DEFAULT_T2; int sysctl_rose_clear_request_timeout = ROSE_DEFAULT_T3; int sysctl_rose_no_activity_timeout = ROSE_DEFAULT_IDLE; int sysctl_rose_ack_hold_back_timeout = ROSE_DEFAULT_HB; int sysctl_rose_routing_control = ROSE_DEFAULT_ROUTING; int sysctl_rose_link_fail_timeout = ROSE_DEFAULT_FAIL_TIMEOUT; int sysctl_rose_maximum_vcs = ROSE_DEFAULT_MAXVC; int sysctl_rose_window_size = ROSE_DEFAULT_WINDOW_SIZE; static HLIST_HEAD(rose_list); static DEFINE_SPINLOCK(rose_list_lock); static const struct proto_ops rose_proto_ops; ax25_address rose_callsign; /* * ROSE network devices are virtual network devices encapsulating ROSE * frames into AX.25 which will be sent through an AX.25 device, so form a * special "super class" of normal net devices; split their locks off into a * separate class since they always nest. */ static struct lock_class_key rose_netdev_xmit_lock_key; static struct lock_class_key rose_netdev_addr_lock_key; static void rose_set_lockdep_one(struct net_device *dev, struct netdev_queue *txq, void *_unused) { lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key); } static void rose_set_lockdep_key(struct net_device *dev) { lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key); netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL); } /* * Convert a ROSE address into text. */ char *rose2asc(char *buf, const rose_address *addr) { if (addr->rose_addr[0] == 0x00 && addr->rose_addr[1] == 0x00 && addr->rose_addr[2] == 0x00 && addr->rose_addr[3] == 0x00 && addr->rose_addr[4] == 0x00) { strcpy(buf, "*"); } else { sprintf(buf, "%02X%02X%02X%02X%02X", addr->rose_addr[0] & 0xFF, addr->rose_addr[1] & 0xFF, addr->rose_addr[2] & 0xFF, addr->rose_addr[3] & 0xFF, addr->rose_addr[4] & 0xFF); } return buf; } /* * Compare two ROSE addresses, 0 == equal. */ int rosecmp(rose_address *addr1, rose_address *addr2) { int i; for (i = 0; i < 5; i++) if (addr1->rose_addr[i] != addr2->rose_addr[i]) return 1; return 0; } /* * Compare two ROSE addresses for only mask digits, 0 == equal. */ int rosecmpm(rose_address *addr1, rose_address *addr2, unsigned short mask) { unsigned int i, j; if (mask > 10) return 1; for (i = 0; i < mask; i++) { j = i / 2; if ((i % 2) != 0) { if ((addr1->rose_addr[j] & 0x0F) != (addr2->rose_addr[j] & 0x0F)) return 1; } else { if ((addr1->rose_addr[j] & 0xF0) != (addr2->rose_addr[j] & 0xF0)) return 1; } } return 0; } /* * Socket removal during an interrupt is now safe. */ static void rose_remove_socket(struct sock *sk) { spin_lock_bh(&rose_list_lock); sk_del_node_init(sk); spin_unlock_bh(&rose_list_lock); } /* * Kill all bound sockets on a broken link layer connection to a * particular neighbour. */ void rose_kill_by_neigh(struct rose_neigh *neigh) { struct sock *s; struct hlist_node *node; spin_lock_bh(&rose_list_lock); sk_for_each(s, node, &rose_list) { struct rose_sock *rose = rose_sk(s); if (rose->neighbour == neigh) { rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0); rose->neighbour->use--; rose->neighbour = NULL; } } spin_unlock_bh(&rose_list_lock); } /* * Kill all bound sockets on a dropped device. */ static void rose_kill_by_device(struct net_device *dev) { struct sock *s; struct hlist_node *node; spin_lock_bh(&rose_list_lock); sk_for_each(s, node, &rose_list) { struct rose_sock *rose = rose_sk(s); if (rose->device == dev) { rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0); rose->neighbour->use--; rose->device = NULL; } } spin_unlock_bh(&rose_list_lock); } /* * Handle device status changes. */ static int rose_device_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = (struct net_device *)ptr; if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; if (event != NETDEV_DOWN) return NOTIFY_DONE; switch (dev->type) { case ARPHRD_ROSE: rose_kill_by_device(dev); break; case ARPHRD_AX25: rose_link_device_down(dev); rose_rt_device_down(dev); break; } return NOTIFY_DONE; } /* * Add a socket to the bound sockets list. */ static void rose_insert_socket(struct sock *sk) { spin_lock_bh(&rose_list_lock); sk_add_node(sk, &rose_list); spin_unlock_bh(&rose_list_lock); } /* * Find a socket that wants to accept the Call Request we just * received. */ static struct sock *rose_find_listener(rose_address *addr, ax25_address *call) { struct sock *s; struct hlist_node *node; spin_lock_bh(&rose_list_lock); sk_for_each(s, node, &rose_list) { struct rose_sock *rose = rose_sk(s); if (!rosecmp(&rose->source_addr, addr) && !ax25cmp(&rose->source_call, call) && !rose->source_ndigis && s->sk_state == TCP_LISTEN) goto found; } sk_for_each(s, node, &rose_list) { struct rose_sock *rose = rose_sk(s); if (!rosecmp(&rose->source_addr, addr) && !ax25cmp(&rose->source_call, &null_ax25_address) && s->sk_state == TCP_LISTEN) goto found; } s = NULL; found: spin_unlock_bh(&rose_list_lock); return s; } /* * Find a connected ROSE socket given my LCI and device. */ struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh) { struct sock *s; struct hlist_node *node; spin_lock_bh(&rose_list_lock); sk_for_each(s, node, &rose_list) { struct rose_sock *rose = rose_sk(s); if (rose->lci == lci && rose->neighbour == neigh) goto found; } s = NULL; found: spin_unlock_bh(&rose_list_lock); return s; } /* * Find a unique LCI for a given device. */ unsigned int rose_new_lci(struct rose_neigh *neigh) { int lci; if (neigh->dce_mode) { for (lci = 1; lci <= sysctl_rose_maximum_vcs; lci++) if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL) return lci; } else { for (lci = sysctl_rose_maximum_vcs; lci > 0; lci--) if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL) return lci; } return 0; } /* * Deferred destroy. */ void rose_destroy_socket(struct sock *); /* * Handler for deferred kills. */ static void rose_destroy_timer(unsigned long data) { rose_destroy_socket((struct sock *)data); } /* * This is called from user mode and the timers. Thus it protects itself * against interrupt users but doesn't worry about being called during * work. Once it is removed from the queue no interrupt or bottom half * will touch it and we are (fairly 8-) ) safe. */ void rose_destroy_socket(struct sock *sk) { struct sk_buff *skb; rose_remove_socket(sk); rose_stop_heartbeat(sk); rose_stop_idletimer(sk); rose_stop_timer(sk); rose_clear_queues(sk); /* Flush the queues */ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { if (skb->sk != sk) { /* A pending connection */ /* Queue the unaccepted socket for death */ sock_set_flag(skb->sk, SOCK_DEAD); rose_start_heartbeat(skb->sk); rose_sk(skb->sk)->state = ROSE_STATE_0; } kfree_skb(skb); } if (sk_has_allocations(sk)) { /* Defer: outstanding buffers */ setup_timer(&sk->sk_timer, rose_destroy_timer, (unsigned long)sk); sk->sk_timer.expires = jiffies + 10 * HZ; add_timer(&sk->sk_timer); } else sock_put(sk); } /* * Handling for system calls applied via the various interfaces to a * ROSE socket object. */ static int rose_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); int opt; if (level != SOL_ROSE) return -ENOPROTOOPT; if (optlen < sizeof(int)) return -EINVAL; if (get_user(opt, (int __user *)optval)) return -EFAULT; switch (optname) { case ROSE_DEFER: rose->defer = opt ? 1 : 0; return 0; case ROSE_T1: if (opt < 1) return -EINVAL; rose->t1 = opt * HZ; return 0; case ROSE_T2: if (opt < 1) return -EINVAL; rose->t2 = opt * HZ; return 0; case ROSE_T3: if (opt < 1) return -EINVAL; rose->t3 = opt * HZ; return 0; case ROSE_HOLDBACK: if (opt < 1) return -EINVAL; rose->hb = opt * HZ; return 0; case ROSE_IDLE: if (opt < 0) return -EINVAL; rose->idle = opt * 60 * HZ; return 0; case ROSE_QBITINCL: rose->qbitincl = opt ? 1 : 0; return 0; default: return -ENOPROTOOPT; } } static int rose_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); int val = 0; int len; if (level != SOL_ROSE) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL; switch (optname) { case ROSE_DEFER: val = rose->defer; break; case ROSE_T1: val = rose->t1 / HZ; break; case ROSE_T2: val = rose->t2 / HZ; break; case ROSE_T3: val = rose->t3 / HZ; break; case ROSE_HOLDBACK: val = rose->hb / HZ; break; case ROSE_IDLE: val = rose->idle / (60 * HZ); break; case ROSE_QBITINCL: val = rose->qbitincl; break; default: return -ENOPROTOOPT; } len = min_t(unsigned int, len, sizeof(int)); if (put_user(len, optlen)) return -EFAULT; return copy_to_user(optval, &val, len) ? -EFAULT : 0; } static int rose_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; if (sk->sk_state != TCP_LISTEN) { struct rose_sock *rose = rose_sk(sk); rose->dest_ndigis = 0; memset(&rose->dest_addr, 0, ROSE_ADDR_LEN); memset(&rose->dest_call, 0, AX25_ADDR_LEN); memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS); sk->sk_max_ack_backlog = backlog; sk->sk_state = TCP_LISTEN; return 0; } return -EOPNOTSUPP; } static struct proto rose_proto = { .name = "ROSE", .owner = THIS_MODULE, .obj_size = sizeof(struct rose_sock), }; static int rose_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; struct rose_sock *rose; if (!net_eq(net, &init_net)) return -EAFNOSUPPORT; if (sock->type != SOCK_SEQPACKET || protocol != 0) return -ESOCKTNOSUPPORT; sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto); if (sk == NULL) return -ENOMEM; rose = rose_sk(sk); sock_init_data(sock, sk); skb_queue_head_init(&rose->ack_queue); #ifdef M_BIT skb_queue_head_init(&rose->frag_queue); rose->fraglen = 0; #endif sock->ops = &rose_proto_ops; sk->sk_protocol = protocol; init_timer(&rose->timer); init_timer(&rose->idletimer); rose->t1 = msecs_to_jiffies(sysctl_rose_call_request_timeout); rose->t2 = msecs_to_jiffies(sysctl_rose_reset_request_timeout); rose->t3 = msecs_to_jiffies(sysctl_rose_clear_request_timeout); rose->hb = msecs_to_jiffies(sysctl_rose_ack_hold_back_timeout); rose->idle = msecs_to_jiffies(sysctl_rose_no_activity_timeout); rose->state = ROSE_STATE_0; return 0; } static struct sock *rose_make_new(struct sock *osk) { struct sock *sk; struct rose_sock *rose, *orose; if (osk->sk_type != SOCK_SEQPACKET) return NULL; sk = sk_alloc(sock_net(osk), PF_ROSE, GFP_ATOMIC, &rose_proto); if (sk == NULL) return NULL; rose = rose_sk(sk); sock_init_data(NULL, sk); skb_queue_head_init(&rose->ack_queue); #ifdef M_BIT skb_queue_head_init(&rose->frag_queue); rose->fraglen = 0; #endif sk->sk_type = osk->sk_type; sk->sk_priority = osk->sk_priority; sk->sk_protocol = osk->sk_protocol; sk->sk_rcvbuf = osk->sk_rcvbuf; sk->sk_sndbuf = osk->sk_sndbuf; sk->sk_state = TCP_ESTABLISHED; sock_copy_flags(sk, osk); init_timer(&rose->timer); init_timer(&rose->idletimer); orose = rose_sk(osk); rose->t1 = orose->t1; rose->t2 = orose->t2; rose->t3 = orose->t3; rose->hb = orose->hb; rose->idle = orose->idle; rose->defer = orose->defer; rose->device = orose->device; rose->qbitincl = orose->qbitincl; return sk; } static int rose_release(struct socket *sock) { struct sock *sk = sock->sk; struct rose_sock *rose; if (sk == NULL) return 0; sock_hold(sk); sock_orphan(sk); lock_sock(sk); rose = rose_sk(sk); switch (rose->state) { case ROSE_STATE_0: release_sock(sk); rose_disconnect(sk, 0, -1, -1); lock_sock(sk); rose_destroy_socket(sk); break; case ROSE_STATE_2: rose->neighbour->use--; release_sock(sk); rose_disconnect(sk, 0, -1, -1); lock_sock(sk); rose_destroy_socket(sk); break; case ROSE_STATE_1: case ROSE_STATE_3: case ROSE_STATE_4: case ROSE_STATE_5: rose_clear_queues(sk); rose_stop_idletimer(sk); rose_write_internal(sk, ROSE_CLEAR_REQUEST); rose_start_t3timer(sk); rose->state = ROSE_STATE_2; sk->sk_state = TCP_CLOSE; sk->sk_shutdown |= SEND_SHUTDOWN; sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); sock_set_flag(sk, SOCK_DESTROY); break; default: break; } sock->sk = NULL; release_sock(sk); sock_put(sk); return 0; } static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr; struct net_device *dev; ax25_address *source; ax25_uid_assoc *user; int n; if (!sock_flag(sk, SOCK_ZAPPED)) return -EINVAL; if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose)) return -EINVAL; if (addr->srose_family != AF_ROSE) return -EINVAL; if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) return -EINVAL; if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS) return -EINVAL; if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) { SOCK_DEBUG(sk, "ROSE: bind failed: invalid address\n"); return -EADDRNOTAVAIL; } source = &addr->srose_call; user = ax25_findbyuid(current_euid()); if (user) { rose->source_call = user->call; ax25_uid_put(user); } else { if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) return -EACCES; rose->source_call = *source; } rose->source_addr = addr->srose_addr; rose->device = dev; rose->source_ndigis = addr->srose_ndigis; if (addr_len == sizeof(struct full_sockaddr_rose)) { struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr; for (n = 0 ; n < addr->srose_ndigis ; n++) rose->source_digis[n] = full_addr->srose_digis[n]; } else { if (rose->source_ndigis == 1) { rose->source_digis[0] = addr->srose_digi; } } rose_insert_socket(sk); sock_reset_flag(sk, SOCK_ZAPPED); SOCK_DEBUG(sk, "ROSE: socket is bound\n"); return 0; } static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr; unsigned char cause, diagnostic; struct net_device *dev; ax25_uid_assoc *user; int n, err = 0; if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose)) return -EINVAL; if (addr->srose_family != AF_ROSE) return -EINVAL; if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) return -EINVAL; if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS) return -EINVAL; /* Source + Destination digis should not exceed ROSE_MAX_DIGIS */ if ((rose->source_ndigis + addr->srose_ndigis) > ROSE_MAX_DIGIS) return -EINVAL; lock_sock(sk); if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { /* Connect completed during a ERESTARTSYS event */ sock->state = SS_CONNECTED; goto out_release; } if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) { sock->state = SS_UNCONNECTED; err = -ECONNREFUSED; goto out_release; } if (sk->sk_state == TCP_ESTABLISHED) { /* No reconnect on a seqpacket socket */ err = -EISCONN; goto out_release; } sk->sk_state = TCP_CLOSE; sock->state = SS_UNCONNECTED; rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause, &diagnostic, 0); if (!rose->neighbour) { err = -ENETUNREACH; goto out_release; } rose->lci = rose_new_lci(rose->neighbour); if (!rose->lci) { err = -ENETUNREACH; goto out_release; } if (sock_flag(sk, SOCK_ZAPPED)) { /* Must bind first - autobinding in this may or may not work */ sock_reset_flag(sk, SOCK_ZAPPED); if ((dev = rose_dev_first()) == NULL) { err = -ENETUNREACH; goto out_release; } user = ax25_findbyuid(current_euid()); if (!user) { err = -EINVAL; goto out_release; } memcpy(&rose->source_addr, dev->dev_addr, ROSE_ADDR_LEN); rose->source_call = user->call; rose->device = dev; ax25_uid_put(user); rose_insert_socket(sk); /* Finish the bind */ } rose->dest_addr = addr->srose_addr; rose->dest_call = addr->srose_call; rose->rand = ((long)rose & 0xFFFF) + rose->lci; rose->dest_ndigis = addr->srose_ndigis; if (addr_len == sizeof(struct full_sockaddr_rose)) { struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr; for (n = 0 ; n < addr->srose_ndigis ; n++) rose->dest_digis[n] = full_addr->srose_digis[n]; } else { if (rose->dest_ndigis == 1) { rose->dest_digis[0] = addr->srose_digi; } } /* Move to connecting socket, start sending Connect Requests */ sock->state = SS_CONNECTING; sk->sk_state = TCP_SYN_SENT; rose->state = ROSE_STATE_1; rose->neighbour->use++; rose_write_internal(sk, ROSE_CALL_REQUEST); rose_start_heartbeat(sk); rose_start_t1timer(sk); /* Now the loop */ if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) { err = -EINPROGRESS; goto out_release; } /* * A Connect Ack with Choke or timeout or failed routing will go to * closed. */ if (sk->sk_state == TCP_SYN_SENT) { DEFINE_WAIT(wait); for (;;) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (sk->sk_state != TCP_SYN_SENT) break; if (!signal_pending(current)) { release_sock(sk); schedule(); lock_sock(sk); continue; } err = -ERESTARTSYS; break; } finish_wait(sk_sleep(sk), &wait); if (err) goto out_release; } if (sk->sk_state != TCP_ESTABLISHED) { sock->state = SS_UNCONNECTED; err = sock_error(sk); /* Always set at this point */ goto out_release; } sock->state = SS_CONNECTED; out_release: release_sock(sk); return err; } static int rose_accept(struct socket *sock, struct socket *newsock, int flags) { struct sk_buff *skb; struct sock *newsk; DEFINE_WAIT(wait); struct sock *sk; int err = 0; if ((sk = sock->sk) == NULL) return -EINVAL; lock_sock(sk); if (sk->sk_type != SOCK_SEQPACKET) { err = -EOPNOTSUPP; goto out_release; } if (sk->sk_state != TCP_LISTEN) { err = -EINVAL; goto out_release; } /* * The write queue this time is holding sockets ready to use * hooked into the SABM we saved */ for (;;) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); skb = skb_dequeue(&sk->sk_receive_queue); if (skb) break; if (flags & O_NONBLOCK) { err = -EWOULDBLOCK; break; } if (!signal_pending(current)) { release_sock(sk); schedule(); lock_sock(sk); continue; } err = -ERESTARTSYS; break; } finish_wait(sk_sleep(sk), &wait); if (err) goto out_release; newsk = skb->sk; sock_graft(newsk, newsock); /* Now attach up the new socket */ skb->sk = NULL; kfree_skb(skb); sk->sk_ack_backlog--; out_release: release_sock(sk); return err; } static int rose_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct full_sockaddr_rose *srose = (struct full_sockaddr_rose *)uaddr; struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); int n; memset(srose, 0, sizeof(*srose)); if (peer != 0) { if (sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; srose->srose_family = AF_ROSE; srose->srose_addr = rose->dest_addr; srose->srose_call = rose->dest_call; srose->srose_ndigis = rose->dest_ndigis; for (n = 0; n < rose->dest_ndigis; n++) srose->srose_digis[n] = rose->dest_digis[n]; } else { srose->srose_family = AF_ROSE; srose->srose_addr = rose->source_addr; srose->srose_call = rose->source_call; srose->srose_ndigis = rose->source_ndigis; for (n = 0; n < rose->source_ndigis; n++) srose->srose_digis[n] = rose->source_digis[n]; } *uaddr_len = sizeof(struct full_sockaddr_rose); return 0; } int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct rose_neigh *neigh, unsigned int lci) { struct sock *sk; struct sock *make; struct rose_sock *make_rose; struct rose_facilities_struct facilities; int n, len; skb->sk = NULL; /* Initially we don't know who it's for */ /* * skb->data points to the rose frame start */ memset(&facilities, 0x00, sizeof(struct rose_facilities_struct)); len = (((skb->data[3] >> 4) & 0x0F) + 1) >> 1; len += (((skb->data[3] >> 0) & 0x0F) + 1) >> 1; if (!rose_parse_facilities(skb->data + len + 4, &facilities)) { rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76); return 0; } sk = rose_find_listener(&facilities.source_addr, &facilities.source_call); /* * We can't accept the Call Request. */ if (sk == NULL || sk_acceptq_is_full(sk) || (make = rose_make_new(sk)) == NULL) { rose_transmit_clear_request(neigh, lci, ROSE_NETWORK_CONGESTION, 120); return 0; } skb->sk = make; make->sk_state = TCP_ESTABLISHED; make_rose = rose_sk(make); make_rose->lci = lci; make_rose->dest_addr = facilities.dest_addr; make_rose->dest_call = facilities.dest_call; make_rose->dest_ndigis = facilities.dest_ndigis; for (n = 0 ; n < facilities.dest_ndigis ; n++) make_rose->dest_digis[n] = facilities.dest_digis[n]; make_rose->source_addr = facilities.source_addr; make_rose->source_call = facilities.source_call; make_rose->source_ndigis = facilities.source_ndigis; for (n = 0 ; n < facilities.source_ndigis ; n++) make_rose->source_digis[n]= facilities.source_digis[n]; make_rose->neighbour = neigh; make_rose->device = dev; make_rose->facilities = facilities; make_rose->neighbour->use++; if (rose_sk(sk)->defer) { make_rose->state = ROSE_STATE_5; } else { rose_write_internal(make, ROSE_CALL_ACCEPTED); make_rose->state = ROSE_STATE_3; rose_start_idletimer(make); } make_rose->condition = 0x00; make_rose->vs = 0; make_rose->va = 0; make_rose->vr = 0; make_rose->vl = 0; sk->sk_ack_backlog++; rose_insert_socket(make); skb_queue_head(&sk->sk_receive_queue, skb); rose_start_heartbeat(make); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, skb->len); return 1; } static int rose_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); struct sockaddr_rose *usrose = (struct sockaddr_rose *)msg->msg_name; int err; struct full_sockaddr_rose srose; struct sk_buff *skb; unsigned char *asmptr; int n, size, qbit = 0; if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT)) return -EINVAL; if (sock_flag(sk, SOCK_ZAPPED)) return -EADDRNOTAVAIL; if (sk->sk_shutdown & SEND_SHUTDOWN) { send_sig(SIGPIPE, current, 0); return -EPIPE; } if (rose->neighbour == NULL || rose->device == NULL) return -ENETUNREACH; if (usrose != NULL) { if (msg->msg_namelen != sizeof(struct sockaddr_rose) && msg->msg_namelen != sizeof(struct full_sockaddr_rose)) return -EINVAL; memset(&srose, 0, sizeof(struct full_sockaddr_rose)); memcpy(&srose, usrose, msg->msg_namelen); if (rosecmp(&rose->dest_addr, &srose.srose_addr) != 0 || ax25cmp(&rose->dest_call, &srose.srose_call) != 0) return -EISCONN; if (srose.srose_ndigis != rose->dest_ndigis) return -EISCONN; if (srose.srose_ndigis == rose->dest_ndigis) { for (n = 0 ; n < srose.srose_ndigis ; n++) if (ax25cmp(&rose->dest_digis[n], &srose.srose_digis[n])) return -EISCONN; } if (srose.srose_family != AF_ROSE) return -EINVAL; } else { if (sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; srose.srose_family = AF_ROSE; srose.srose_addr = rose->dest_addr; srose.srose_call = rose->dest_call; srose.srose_ndigis = rose->dest_ndigis; for (n = 0 ; n < rose->dest_ndigis ; n++) srose.srose_digis[n] = rose->dest_digis[n]; } SOCK_DEBUG(sk, "ROSE: sendto: Addresses built.\n"); /* Build a packet */ SOCK_DEBUG(sk, "ROSE: sendto: building packet.\n"); /* Sanity check the packet size */ if (len > 65535) return -EMSGSIZE; size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN; if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL) return err; skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN); /* * Put the data on the end */ SOCK_DEBUG(sk, "ROSE: Appending user data\n"); skb_reset_transport_header(skb); skb_put(skb, len); err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); if (err) { kfree_skb(skb); return err; } /* * If the Q BIT Include socket option is in force, the first * byte of the user data is the logical value of the Q Bit. */ if (rose->qbitincl) { qbit = skb->data[0]; skb_pull(skb, 1); } /* * Push down the ROSE header */ asmptr = skb_push(skb, ROSE_MIN_LEN); SOCK_DEBUG(sk, "ROSE: Building Network Header.\n"); /* Build a ROSE Network header */ asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI; asmptr[1] = (rose->lci >> 0) & 0xFF; asmptr[2] = ROSE_DATA; if (qbit) asmptr[0] |= ROSE_Q_BIT; SOCK_DEBUG(sk, "ROSE: Built header.\n"); SOCK_DEBUG(sk, "ROSE: Transmitting buffer\n"); if (sk->sk_state != TCP_ESTABLISHED) { kfree_skb(skb); return -ENOTCONN; } #ifdef M_BIT #define ROSE_PACLEN (256-ROSE_MIN_LEN) if (skb->len - ROSE_MIN_LEN > ROSE_PACLEN) { unsigned char header[ROSE_MIN_LEN]; struct sk_buff *skbn; int frontlen; int lg; /* Save a copy of the Header */ skb_copy_from_linear_data(skb, header, ROSE_MIN_LEN); skb_pull(skb, ROSE_MIN_LEN); frontlen = skb_headroom(skb); while (skb->len > 0) { if ((skbn = sock_alloc_send_skb(sk, frontlen + ROSE_PACLEN, 0, &err)) == NULL) { kfree_skb(skb); return err; } skbn->sk = sk; skbn->free = 1; skbn->arp = 1; skb_reserve(skbn, frontlen); lg = (ROSE_PACLEN > skb->len) ? skb->len : ROSE_PACLEN; /* Copy the user data */ skb_copy_from_linear_data(skb, skb_put(skbn, lg), lg); skb_pull(skb, lg); /* Duplicate the Header */ skb_push(skbn, ROSE_MIN_LEN); skb_copy_to_linear_data(skbn, header, ROSE_MIN_LEN); if (skb->len > 0) skbn->data[2] |= M_BIT; skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */ } skb->free = 1; kfree_skb(skb); } else { skb_queue_tail(&sk->sk_write_queue, skb); /* Throw it on the queue */ } #else skb_queue_tail(&sk->sk_write_queue, skb); /* Shove it onto the queue */ #endif rose_kick(sk); return len; } static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); struct sockaddr_rose *srose = (struct sockaddr_rose *)msg->msg_name; size_t copied; unsigned char *asmptr; struct sk_buff *skb; int n, er, qbit; /* * This works for seqpacket too. The receiver has ordered the queue for * us! We do one quick check first though */ if (sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; /* Now we can treat all alike */ if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) return er; qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT; skb_pull(skb, ROSE_MIN_LEN); if (rose->qbitincl) { asmptr = skb_push(skb, 1); *asmptr = qbit; } skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (srose != NULL) { srose->srose_family = AF_ROSE; srose->srose_addr = rose->dest_addr; srose->srose_call = rose->dest_call; srose->srose_ndigis = rose->dest_ndigis; if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) { struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name; for (n = 0 ; n < rose->dest_ndigis ; n++) full_srose->srose_digis[n] = rose->dest_digis[n]; msg->msg_namelen = sizeof(struct full_sockaddr_rose); } else { if (rose->dest_ndigis >= 1) { srose->srose_ndigis = 1; srose->srose_digi = rose->dest_digis[0]; } msg->msg_namelen = sizeof(struct sockaddr_rose); } } skb_free_datagram(sk, skb); return copied; } static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); void __user *argp = (void __user *)arg; switch (cmd) { case TIOCOUTQ: { long amount; amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amount < 0) amount = 0; return put_user(amount, (unsigned int __user *) argp); } case TIOCINQ: { struct sk_buff *skb; long amount = 0L; /* These two are safe on a single CPU system as only user tasks fiddle here */ if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) amount = skb->len; return put_user(amount, (unsigned int __user *) argp); } case SIOCGSTAMP: return sock_get_timestamp(sk, (struct timeval __user *) argp); case SIOCGSTAMPNS: return sock_get_timestampns(sk, (struct timespec __user *) argp); case SIOCGIFADDR: case SIOCSIFADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCGIFMETRIC: case SIOCSIFMETRIC: return -EINVAL; case SIOCADDRT: case SIOCDELRT: case SIOCRSCLRRT: if (!capable(CAP_NET_ADMIN)) return -EPERM; return rose_rt_ioctl(cmd, argp); case SIOCRSGCAUSE: { struct rose_cause_struct rose_cause; rose_cause.cause = rose->cause; rose_cause.diagnostic = rose->diagnostic; return copy_to_user(argp, &rose_cause, sizeof(struct rose_cause_struct)) ? -EFAULT : 0; } case SIOCRSSCAUSE: { struct rose_cause_struct rose_cause; if (copy_from_user(&rose_cause, argp, sizeof(struct rose_cause_struct))) return -EFAULT; rose->cause = rose_cause.cause; rose->diagnostic = rose_cause.diagnostic; return 0; } case SIOCRSSL2CALL: if (!capable(CAP_NET_ADMIN)) return -EPERM; if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) ax25_listen_release(&rose_callsign, NULL); if (copy_from_user(&rose_callsign, argp, sizeof(ax25_address))) return -EFAULT; if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) return ax25_listen_register(&rose_callsign, NULL); return 0; case SIOCRSGL2CALL: return copy_to_user(argp, &rose_callsign, sizeof(ax25_address)) ? -EFAULT : 0; case SIOCRSACCEPT: if (rose->state == ROSE_STATE_5) { rose_write_internal(sk, ROSE_CALL_ACCEPTED); rose_start_idletimer(sk); rose->condition = 0x00; rose->vs = 0; rose->va = 0; rose->vr = 0; rose->vl = 0; rose->state = ROSE_STATE_3; } return 0; default: return -ENOIOCTLCMD; } return 0; } #ifdef CONFIG_PROC_FS static void *rose_info_start(struct seq_file *seq, loff_t *pos) __acquires(rose_list_lock) { spin_lock_bh(&rose_list_lock); return seq_hlist_start_head(&rose_list, *pos); } static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos) { return seq_hlist_next(v, &rose_list, pos); } static void rose_info_stop(struct seq_file *seq, void *v) __releases(rose_list_lock) { spin_unlock_bh(&rose_list_lock); } static int rose_info_show(struct seq_file *seq, void *v) { char buf[11], rsbuf[11]; if (v == SEQ_START_TOKEN) seq_puts(seq, "dest_addr dest_call src_addr src_call dev lci neigh st vs vr va t t1 t2 t3 hb idle Snd-Q Rcv-Q inode\n"); else { struct sock *s = sk_entry(v); struct rose_sock *rose = rose_sk(s); const char *devname, *callsign; const struct net_device *dev = rose->device; if (!dev) devname = "???"; else devname = dev->name; seq_printf(seq, "%-10s %-9s ", rose2asc(rsbuf, &rose->dest_addr), ax2asc(buf, &rose->dest_call)); if (ax25cmp(&rose->source_call, &null_ax25_address) == 0) callsign = "??????-?"; else callsign = ax2asc(buf, &rose->source_call); seq_printf(seq, "%-10s %-9s %-5s %3.3X %05d %d %d %d %d %3lu %3lu %3lu %3lu %3lu %3lu/%03lu %5d %5d %ld\n", rose2asc(rsbuf, &rose->source_addr), callsign, devname, rose->lci & 0x0FFF, (rose->neighbour) ? rose->neighbour->number : 0, rose->state, rose->vs, rose->vr, rose->va, ax25_display_timer(&rose->timer) / HZ, rose->t1 / HZ, rose->t2 / HZ, rose->t3 / HZ, rose->hb / HZ, ax25_display_timer(&rose->idletimer) / (60 * HZ), rose->idle / (60 * HZ), sk_wmem_alloc_get(s), sk_rmem_alloc_get(s), s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L); } return 0; } static const struct seq_operations rose_info_seqops = { .start = rose_info_start, .next = rose_info_next, .stop = rose_info_stop, .show = rose_info_show, }; static int rose_info_open(struct inode *inode, struct file *file) { return seq_open(file, &rose_info_seqops); } static const struct file_operations rose_info_fops = { .owner = THIS_MODULE, .open = rose_info_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #endif /* CONFIG_PROC_FS */ static const struct net_proto_family rose_family_ops = { .family = PF_ROSE, .create = rose_create, .owner = THIS_MODULE, }; static const struct proto_ops rose_proto_ops = { .family = PF_ROSE, .owner = THIS_MODULE, .release = rose_release, .bind = rose_bind, .connect = rose_connect, .socketpair = sock_no_socketpair, .accept = rose_accept, .getname = rose_getname, .poll = datagram_poll, .ioctl = rose_ioctl, .listen = rose_listen, .shutdown = sock_no_shutdown, .setsockopt = rose_setsockopt, .getsockopt = rose_getsockopt, .sendmsg = rose_sendmsg, .recvmsg = rose_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static struct notifier_block rose_dev_notifier = { .notifier_call = rose_device_event, }; static struct net_device **dev_rose; static struct ax25_protocol rose_pid = { .pid = AX25_P_ROSE, .func = rose_route_frame }; static struct ax25_linkfail rose_linkfail_notifier = { .func = rose_link_failed }; static int __init rose_proto_init(void) { int i; int rc; if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) { printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter to large\n"); rc = -EINVAL; goto out; } rc = proto_register(&rose_proto, 0); if (rc != 0) goto out; rose_callsign = null_ax25_address; dev_rose = kzalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL); if (dev_rose == NULL) { printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n"); rc = -ENOMEM; goto out_proto_unregister; } for (i = 0; i < rose_ndevs; i++) { struct net_device *dev; char name[IFNAMSIZ]; sprintf(name, "rose%d", i); dev = alloc_netdev(0, name, rose_setup); if (!dev) { printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n"); rc = -ENOMEM; goto fail; } rc = register_netdev(dev); if (rc) { printk(KERN_ERR "ROSE: netdevice registration failed\n"); free_netdev(dev); goto fail; } rose_set_lockdep_key(dev); dev_rose[i] = dev; } sock_register(&rose_family_ops); register_netdevice_notifier(&rose_dev_notifier); ax25_register_pid(&rose_pid); ax25_linkfail_register(&rose_linkfail_notifier); #ifdef CONFIG_SYSCTL rose_register_sysctl(); #endif rose_loopback_init(); rose_add_loopback_neigh(); proc_net_fops_create(&init_net, "rose", S_IRUGO, &rose_info_fops); proc_net_fops_create(&init_net, "rose_neigh", S_IRUGO, &rose_neigh_fops); proc_net_fops_create(&init_net, "rose_nodes", S_IRUGO, &rose_nodes_fops); proc_net_fops_create(&init_net, "rose_routes", S_IRUGO, &rose_routes_fops); out: return rc; fail: while (--i >= 0) { unregister_netdev(dev_rose[i]); free_netdev(dev_rose[i]); } kfree(dev_rose); out_proto_unregister: proto_unregister(&rose_proto); goto out; } module_init(rose_proto_init); module_param(rose_ndevs, int, 0); MODULE_PARM_DESC(rose_ndevs, "number of ROSE devices"); MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>"); MODULE_DESCRIPTION("The amateur radio ROSE network layer protocol"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_ROSE); static void __exit rose_exit(void) { int i; proc_net_remove(&init_net, "rose"); proc_net_remove(&init_net, "rose_neigh"); proc_net_remove(&init_net, "rose_nodes"); proc_net_remove(&init_net, "rose_routes"); rose_loopback_clear(); rose_rt_free(); ax25_protocol_release(AX25_P_ROSE); ax25_linkfail_release(&rose_linkfail_notifier); if (ax25cmp(&rose_callsign, &null_ax25_address) != 0) ax25_listen_release(&rose_callsign, NULL); #ifdef CONFIG_SYSCTL rose_unregister_sysctl(); #endif unregister_netdevice_notifier(&rose_dev_notifier); sock_unregister(PF_ROSE); for (i = 0; i < rose_ndevs; i++) { struct net_device *dev = dev_rose[i]; if (dev) { unregister_netdev(dev); free_netdev(dev); } } kfree(dev_rose); proto_unregister(&rose_proto); } module_exit(rose_exit);
./CrossVul/dataset_final_sorted/CWE-20/c/bad_3547_1
crossvul-cpp_data_good_4718_0
/* * reflection.c: Routines for creating an image at runtime. * * Author: * Paolo Molaro (lupus@ximian.com) * * Copyright 2001-2003 Ximian, Inc (http://www.ximian.com) * Copyright 2004-2009 Novell, Inc (http://www.novell.com) * */ #include <config.h> #include "mono/utils/mono-digest.h" #include "mono/utils/mono-membar.h" #include "mono/metadata/reflection.h" #include "mono/metadata/tabledefs.h" #include "mono/metadata/metadata-internals.h" #include <mono/metadata/profiler-private.h> #include "mono/metadata/class-internals.h" #include "mono/metadata/gc-internal.h" #include "mono/metadata/tokentype.h" #include "mono/metadata/domain-internals.h" #include "mono/metadata/opcodes.h" #include "mono/metadata/assembly.h" #include "mono/metadata/object-internals.h" #include <mono/metadata/exception.h> #include <mono/metadata/marshal.h> #include <mono/metadata/security-manager.h> #include <stdio.h> #include <glib.h> #include <errno.h> #include <time.h> #include <string.h> #include <ctype.h> #include "image.h" #include "cil-coff.h" #include "mono-endian.h" #include <mono/metadata/gc-internal.h> #include <mono/metadata/mempool-internals.h> #include <mono/metadata/security-core-clr.h> #include <mono/metadata/debug-helpers.h> #include <mono/metadata/verify-internals.h> #include <mono/metadata/mono-ptr-array.h> #include <mono/utils/mono-string.h> #include <mono/utils/mono-error-internals.h> #if HAVE_SGEN_GC static void* reflection_info_desc = NULL; #define MOVING_GC_REGISTER(addr) do { \ if (!reflection_info_desc) { \ gsize bmap = 1; \ reflection_info_desc = mono_gc_make_descr_from_bitmap (&bmap, 1); \ } \ mono_gc_register_root ((char*)(addr), sizeof (gpointer), reflection_info_desc); \ } while (0) #else #define MOVING_GC_REGISTER(addr) #endif static gboolean is_usertype (MonoReflectionType *ref); static MonoReflectionType *mono_reflection_type_resolve_user_types (MonoReflectionType *type); typedef struct { char *p; char *buf; char *end; } SigBuffer; #define TEXT_OFFSET 512 #define CLI_H_SIZE 136 #define FILE_ALIGN 512 #define VIRT_ALIGN 8192 #define START_TEXT_RVA 0x00002000 typedef struct { MonoReflectionILGen *ilgen; MonoReflectionType *rtype; MonoArray *parameters; MonoArray *generic_params; MonoGenericContainer *generic_container; MonoArray *pinfo; MonoArray *opt_types; guint32 attrs; guint32 iattrs; guint32 call_conv; guint32 *table_idx; /* note: it's a pointer */ MonoArray *code; MonoObject *type; MonoString *name; MonoBoolean init_locals; MonoBoolean skip_visibility; MonoArray *return_modreq; MonoArray *return_modopt; MonoArray *param_modreq; MonoArray *param_modopt; MonoArray *permissions; MonoMethod *mhandle; guint32 nrefs; gpointer *refs; /* for PInvoke */ int charset, extra_flags, native_cc; MonoString *dll, *dllentry; } ReflectionMethodBuilder; typedef struct { guint32 owner; MonoReflectionGenericParam *gparam; } GenericParamTableEntry; const unsigned char table_sizes [MONO_TABLE_NUM] = { MONO_MODULE_SIZE, MONO_TYPEREF_SIZE, MONO_TYPEDEF_SIZE, 0, MONO_FIELD_SIZE, 0, MONO_METHOD_SIZE, 0, MONO_PARAM_SIZE, MONO_INTERFACEIMPL_SIZE, MONO_MEMBERREF_SIZE, /* 0x0A */ MONO_CONSTANT_SIZE, MONO_CUSTOM_ATTR_SIZE, MONO_FIELD_MARSHAL_SIZE, MONO_DECL_SECURITY_SIZE, MONO_CLASS_LAYOUT_SIZE, MONO_FIELD_LAYOUT_SIZE, /* 0x10 */ MONO_STAND_ALONE_SIGNATURE_SIZE, MONO_EVENT_MAP_SIZE, 0, MONO_EVENT_SIZE, MONO_PROPERTY_MAP_SIZE, 0, MONO_PROPERTY_SIZE, MONO_METHOD_SEMA_SIZE, MONO_METHODIMPL_SIZE, MONO_MODULEREF_SIZE, /* 0x1A */ MONO_TYPESPEC_SIZE, MONO_IMPLMAP_SIZE, MONO_FIELD_RVA_SIZE, 0, 0, MONO_ASSEMBLY_SIZE, /* 0x20 */ MONO_ASSEMBLY_PROCESSOR_SIZE, MONO_ASSEMBLYOS_SIZE, MONO_ASSEMBLYREF_SIZE, MONO_ASSEMBLYREFPROC_SIZE, MONO_ASSEMBLYREFOS_SIZE, MONO_FILE_SIZE, MONO_EXP_TYPE_SIZE, MONO_MANIFEST_SIZE, MONO_NESTED_CLASS_SIZE, MONO_GENERICPARAM_SIZE, /* 0x2A */ MONO_METHODSPEC_SIZE, MONO_GENPARCONSTRAINT_SIZE }; #ifndef DISABLE_REFLECTION_EMIT static guint32 mono_image_get_methodref_token (MonoDynamicImage *assembly, MonoMethod *method, gboolean create_typespec); static guint32 mono_image_get_methodbuilder_token (MonoDynamicImage *assembly, MonoReflectionMethodBuilder *mb, gboolean create_open_instance); static guint32 mono_image_get_ctorbuilder_token (MonoDynamicImage *assembly, MonoReflectionCtorBuilder *cb); static guint32 mono_image_get_sighelper_token (MonoDynamicImage *assembly, MonoReflectionSigHelper *helper); static void ensure_runtime_vtable (MonoClass *klass); static gpointer resolve_object (MonoImage *image, MonoObject *obj, MonoClass **handle_class, MonoGenericContext *context); static guint32 mono_image_get_methodref_token_for_methodbuilder (MonoDynamicImage *assembly, MonoReflectionMethodBuilder *method); static guint32 encode_generic_method_sig (MonoDynamicImage *assembly, MonoGenericContext *context); static gpointer register_assembly (MonoDomain *domain, MonoReflectionAssembly *res, MonoAssembly *assembly); static void reflection_methodbuilder_from_method_builder (ReflectionMethodBuilder *rmb, MonoReflectionMethodBuilder *mb); static void reflection_methodbuilder_from_ctor_builder (ReflectionMethodBuilder *rmb, MonoReflectionCtorBuilder *mb); static guint32 create_generic_typespec (MonoDynamicImage *assembly, MonoReflectionTypeBuilder *tb); #endif static guint32 mono_image_typedef_or_ref (MonoDynamicImage *assembly, MonoType *type); static guint32 mono_image_typedef_or_ref_full (MonoDynamicImage *assembly, MonoType *type, gboolean try_typespec); static void mono_image_get_generic_param_info (MonoReflectionGenericParam *gparam, guint32 owner, MonoDynamicImage *assembly); static guint32 encode_marshal_blob (MonoDynamicImage *assembly, MonoReflectionMarshal *minfo); static guint32 encode_constant (MonoDynamicImage *assembly, MonoObject *val, guint32 *ret_type); static char* type_get_qualified_name (MonoType *type, MonoAssembly *ass); static void encode_type (MonoDynamicImage *assembly, MonoType *type, SigBuffer *buf); static void get_default_param_value_blobs (MonoMethod *method, char **blobs, guint32 *types); static MonoReflectionType *mono_reflection_type_get_underlying_system_type (MonoReflectionType* t); static MonoType* mono_reflection_get_type_with_rootimage (MonoImage *rootimage, MonoImage* image, MonoTypeNameParse *info, gboolean ignorecase, gboolean *type_resolve); static MonoReflectionType* mono_reflection_type_resolve_user_types (MonoReflectionType *type); static gboolean is_sre_array (MonoClass *class); static gboolean is_sre_byref (MonoClass *class); static gboolean is_sre_pointer (MonoClass *class); static gboolean is_sre_type_builder (MonoClass *class); static gboolean is_sre_method_builder (MonoClass *class); static gboolean is_sre_ctor_builder (MonoClass *class); static gboolean is_sre_field_builder (MonoClass *class); static gboolean is_sr_mono_method (MonoClass *class); static gboolean is_sr_mono_cmethod (MonoClass *class); static gboolean is_sr_mono_generic_method (MonoClass *class); static gboolean is_sr_mono_generic_cmethod (MonoClass *class); static gboolean is_sr_mono_field (MonoClass *class); static gboolean is_sr_mono_property (MonoClass *class); static gboolean is_sre_method_on_tb_inst (MonoClass *class); static gboolean is_sre_ctor_on_tb_inst (MonoClass *class); static guint32 mono_image_get_methodspec_token (MonoDynamicImage *assembly, MonoMethod *method); static guint32 mono_image_get_inflated_method_token (MonoDynamicImage *assembly, MonoMethod *m); static MonoMethod * inflate_method (MonoReflectionType *type, MonoObject *obj); static guint32 create_typespec (MonoDynamicImage *assembly, MonoType *type); static void init_type_builder_generics (MonoObject *type); #define RESOLVE_TYPE(type) do { type = (void*)mono_reflection_type_resolve_user_types ((MonoReflectionType*)type); } while (0) #define RESOLVE_ARRAY_TYPE_ELEMENT(array, index) do { \ MonoReflectionType *__type = mono_array_get (array, MonoReflectionType*, index); \ __type = mono_reflection_type_resolve_user_types (__type); \ mono_array_set (arr, MonoReflectionType*, index, __type); \ } while (0) #define mono_type_array_get_and_resolve(array, index) mono_reflection_type_get_handle ((MonoReflectionType*)mono_array_get (array, gpointer, index)) void mono_reflection_init (void) { } static void sigbuffer_init (SigBuffer *buf, int size) { buf->buf = g_malloc (size); buf->p = buf->buf; buf->end = buf->buf + size; } static void sigbuffer_make_room (SigBuffer *buf, int size) { if (buf->end - buf->p < size) { int new_size = buf->end - buf->buf + size + 32; char *p = g_realloc (buf->buf, new_size); size = buf->p - buf->buf; buf->buf = p; buf->p = p + size; buf->end = buf->buf + new_size; } } static void sigbuffer_add_value (SigBuffer *buf, guint32 val) { sigbuffer_make_room (buf, 6); mono_metadata_encode_value (val, buf->p, &buf->p); } static void sigbuffer_add_byte (SigBuffer *buf, guint8 val) { sigbuffer_make_room (buf, 1); buf->p [0] = val; buf->p++; } static void sigbuffer_add_mem (SigBuffer *buf, char *p, guint32 size) { sigbuffer_make_room (buf, size); memcpy (buf->p, p, size); buf->p += size; } static void sigbuffer_free (SigBuffer *buf) { g_free (buf->buf); } #ifndef DISABLE_REFLECTION_EMIT /** * mp_g_alloc: * * Allocate memory from the @image mempool if it is non-NULL. Otherwise, allocate memory * from the C heap. */ static gpointer image_g_malloc (MonoImage *image, guint size) { if (image) return mono_image_alloc (image, size); else return g_malloc (size); } #endif /* !DISABLE_REFLECTION_EMIT */ /** * image_g_alloc0: * * Allocate memory from the @image mempool if it is non-NULL. Otherwise, allocate memory * from the C heap. */ static gpointer image_g_malloc0 (MonoImage *image, guint size) { if (image) return mono_image_alloc0 (image, size); else return g_malloc0 (size); } #ifndef DISABLE_REFLECTION_EMIT static char* image_strdup (MonoImage *image, const char *s) { if (image) return mono_image_strdup (image, s); else return g_strdup (s); } #endif #define image_g_new(image,struct_type, n_structs) \ ((struct_type *) image_g_malloc (image, ((gsize) sizeof (struct_type)) * ((gsize) (n_structs)))) #define image_g_new0(image,struct_type, n_structs) \ ((struct_type *) image_g_malloc0 (image, ((gsize) sizeof (struct_type)) * ((gsize) (n_structs)))) static void alloc_table (MonoDynamicTable *table, guint nrows) { table->rows = nrows; g_assert (table->columns); if (nrows + 1 >= table->alloc_rows) { while (nrows + 1 >= table->alloc_rows) { if (table->alloc_rows == 0) table->alloc_rows = 16; else table->alloc_rows *= 2; } table->values = g_renew (guint32, table->values, (table->alloc_rows) * table->columns); } } static void make_room_in_stream (MonoDynamicStream *stream, int size) { if (size <= stream->alloc_size) return; while (stream->alloc_size <= size) { if (stream->alloc_size < 4096) stream->alloc_size = 4096; else stream->alloc_size *= 2; } stream->data = g_realloc (stream->data, stream->alloc_size); } static guint32 string_heap_insert (MonoDynamicStream *sh, const char *str) { guint32 idx; guint32 len; gpointer oldkey, oldval; if (g_hash_table_lookup_extended (sh->hash, str, &oldkey, &oldval)) return GPOINTER_TO_UINT (oldval); len = strlen (str) + 1; idx = sh->index; make_room_in_stream (sh, idx + len); /* * We strdup the string even if we already copy them in sh->data * so that the string pointers in the hash remain valid even if * we need to realloc sh->data. We may want to avoid that later. */ g_hash_table_insert (sh->hash, g_strdup (str), GUINT_TO_POINTER (idx)); memcpy (sh->data + idx, str, len); sh->index += len; return idx; } static guint32 string_heap_insert_mstring (MonoDynamicStream *sh, MonoString *str) { char *name = mono_string_to_utf8 (str); guint32 idx; idx = string_heap_insert (sh, name); g_free (name); return idx; } #ifndef DISABLE_REFLECTION_EMIT static void string_heap_init (MonoDynamicStream *sh) { sh->index = 0; sh->alloc_size = 4096; sh->data = g_malloc (4096); sh->hash = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL); string_heap_insert (sh, ""); } #endif static guint32 mono_image_add_stream_data (MonoDynamicStream *stream, const char *data, guint32 len) { guint32 idx; make_room_in_stream (stream, stream->index + len); memcpy (stream->data + stream->index, data, len); idx = stream->index; stream->index += len; /* * align index? Not without adding an additional param that controls it since * we may store a blob value in pieces. */ return idx; } static guint32 mono_image_add_stream_zero (MonoDynamicStream *stream, guint32 len) { guint32 idx; make_room_in_stream (stream, stream->index + len); memset (stream->data + stream->index, 0, len); idx = stream->index; stream->index += len; return idx; } static void stream_data_align (MonoDynamicStream *stream) { char buf [4] = {0}; guint32 count = stream->index % 4; /* we assume the stream data will be aligned */ if (count) mono_image_add_stream_data (stream, buf, 4 - count); } #ifndef DISABLE_REFLECTION_EMIT static int mono_blob_entry_hash (const char* str) { guint len, h; const char *end; len = mono_metadata_decode_blob_size (str, &str); if (len > 0) { end = str + len; h = *str; for (str += 1; str < end; str++) h = (h << 5) - h + *str; return h; } else { return 0; } } static gboolean mono_blob_entry_equal (const char *str1, const char *str2) { int len, len2; const char *end1; const char *end2; len = mono_metadata_decode_blob_size (str1, &end1); len2 = mono_metadata_decode_blob_size (str2, &end2); if (len != len2) return 0; return memcmp (end1, end2, len) == 0; } #endif static guint32 add_to_blob_cached (MonoDynamicImage *assembly, char *b1, int s1, char *b2, int s2) { guint32 idx; char *copy; gpointer oldkey, oldval; copy = g_malloc (s1+s2); memcpy (copy, b1, s1); memcpy (copy + s1, b2, s2); if (g_hash_table_lookup_extended (assembly->blob_cache, copy, &oldkey, &oldval)) { g_free (copy); idx = GPOINTER_TO_UINT (oldval); } else { idx = mono_image_add_stream_data (&assembly->blob, b1, s1); mono_image_add_stream_data (&assembly->blob, b2, s2); g_hash_table_insert (assembly->blob_cache, copy, GUINT_TO_POINTER (idx)); } return idx; } static guint32 sigbuffer_add_to_blob_cached (MonoDynamicImage *assembly, SigBuffer *buf) { char blob_size [8]; char *b = blob_size; guint32 size = buf->p - buf->buf; /* store length */ g_assert (size <= (buf->end - buf->buf)); mono_metadata_encode_value (size, b, &b); return add_to_blob_cached (assembly, blob_size, b-blob_size, buf->buf, size); } /* * Copy len * nelem bytes from val to dest, swapping bytes to LE if necessary. * dest may be misaligned. */ static void swap_with_size (char *dest, const char* val, int len, int nelem) { #if G_BYTE_ORDER != G_LITTLE_ENDIAN int elem; for (elem = 0; elem < nelem; ++elem) { switch (len) { case 1: *dest = *val; break; case 2: dest [0] = val [1]; dest [1] = val [0]; break; case 4: dest [0] = val [3]; dest [1] = val [2]; dest [2] = val [1]; dest [3] = val [0]; break; case 8: dest [0] = val [7]; dest [1] = val [6]; dest [2] = val [5]; dest [3] = val [4]; dest [4] = val [3]; dest [5] = val [2]; dest [6] = val [1]; dest [7] = val [0]; break; default: g_assert_not_reached (); } dest += len; val += len; } #else memcpy (dest, val, len * nelem); #endif } static guint32 add_mono_string_to_blob_cached (MonoDynamicImage *assembly, MonoString *str) { char blob_size [64]; char *b = blob_size; guint32 idx = 0, len; len = str->length * 2; mono_metadata_encode_value (len, b, &b); #if G_BYTE_ORDER != G_LITTLE_ENDIAN { char *swapped = g_malloc (2 * mono_string_length (str)); const char *p = (const char*)mono_string_chars (str); swap_with_size (swapped, p, 2, mono_string_length (str)); idx = add_to_blob_cached (assembly, blob_size, b-blob_size, swapped, len); g_free (swapped); } #else idx = add_to_blob_cached (assembly, blob_size, b-blob_size, (char*)mono_string_chars (str), len); #endif return idx; } #ifndef DISABLE_REFLECTION_EMIT static MonoClass * default_class_from_mono_type (MonoType *type) { switch (type->type) { case MONO_TYPE_OBJECT: return mono_defaults.object_class; case MONO_TYPE_VOID: return mono_defaults.void_class; case MONO_TYPE_BOOLEAN: return mono_defaults.boolean_class; case MONO_TYPE_CHAR: return mono_defaults.char_class; case MONO_TYPE_I1: return mono_defaults.sbyte_class; case MONO_TYPE_U1: return mono_defaults.byte_class; case MONO_TYPE_I2: return mono_defaults.int16_class; case MONO_TYPE_U2: return mono_defaults.uint16_class; case MONO_TYPE_I4: return mono_defaults.int32_class; case MONO_TYPE_U4: return mono_defaults.uint32_class; case MONO_TYPE_I: return mono_defaults.int_class; case MONO_TYPE_U: return mono_defaults.uint_class; case MONO_TYPE_I8: return mono_defaults.int64_class; case MONO_TYPE_U8: return mono_defaults.uint64_class; case MONO_TYPE_R4: return mono_defaults.single_class; case MONO_TYPE_R8: return mono_defaults.double_class; case MONO_TYPE_STRING: return mono_defaults.string_class; default: g_warning ("default_class_from_mono_type: implement me 0x%02x\n", type->type); g_assert_not_reached (); } return NULL; } #endif /* * mono_class_get_ref_info: * * Return the type builder/generic param builder corresponding to KLASS, if it exists. */ gpointer mono_class_get_ref_info (MonoClass *klass) { if (klass->ref_info_handle == 0) return NULL; else return mono_gchandle_get_target (klass->ref_info_handle); } void mono_class_set_ref_info (MonoClass *klass, gpointer obj) { klass->ref_info_handle = mono_gchandle_new ((MonoObject*)obj, FALSE); g_assert (klass->ref_info_handle != 0); } void mono_class_free_ref_info (MonoClass *klass) { if (klass->ref_info_handle) { mono_gchandle_free (klass->ref_info_handle); klass->ref_info_handle = 0; } } static void encode_generic_class (MonoDynamicImage *assembly, MonoGenericClass *gclass, SigBuffer *buf) { int i; MonoGenericInst *class_inst; MonoClass *klass; g_assert (gclass); class_inst = gclass->context.class_inst; sigbuffer_add_value (buf, MONO_TYPE_GENERICINST); klass = gclass->container_class; sigbuffer_add_value (buf, klass->byval_arg.type); sigbuffer_add_value (buf, mono_image_typedef_or_ref_full (assembly, &klass->byval_arg, FALSE)); sigbuffer_add_value (buf, class_inst->type_argc); for (i = 0; i < class_inst->type_argc; ++i) encode_type (assembly, class_inst->type_argv [i], buf); } static void encode_type (MonoDynamicImage *assembly, MonoType *type, SigBuffer *buf) { if (!type) { g_assert_not_reached (); return; } if (type->byref) sigbuffer_add_value (buf, MONO_TYPE_BYREF); switch (type->type){ case MONO_TYPE_VOID: case MONO_TYPE_BOOLEAN: case MONO_TYPE_CHAR: case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I8: case MONO_TYPE_U8: case MONO_TYPE_R4: case MONO_TYPE_R8: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_STRING: case MONO_TYPE_OBJECT: case MONO_TYPE_TYPEDBYREF: sigbuffer_add_value (buf, type->type); break; case MONO_TYPE_PTR: sigbuffer_add_value (buf, type->type); encode_type (assembly, type->data.type, buf); break; case MONO_TYPE_SZARRAY: sigbuffer_add_value (buf, type->type); encode_type (assembly, &type->data.klass->byval_arg, buf); break; case MONO_TYPE_VALUETYPE: case MONO_TYPE_CLASS: { MonoClass *k = mono_class_from_mono_type (type); if (k->generic_container) { MonoGenericClass *gclass = mono_metadata_lookup_generic_class (k, k->generic_container->context.class_inst, TRUE); encode_generic_class (assembly, gclass, buf); } else { /* * Make sure we use the correct type. */ sigbuffer_add_value (buf, k->byval_arg.type); /* * ensure only non-byref gets passed to mono_image_typedef_or_ref(), * otherwise two typerefs could point to the same type, leading to * verification errors. */ sigbuffer_add_value (buf, mono_image_typedef_or_ref (assembly, &k->byval_arg)); } break; } case MONO_TYPE_ARRAY: sigbuffer_add_value (buf, type->type); encode_type (assembly, &type->data.array->eklass->byval_arg, buf); sigbuffer_add_value (buf, type->data.array->rank); sigbuffer_add_value (buf, 0); /* FIXME: set to 0 for now */ sigbuffer_add_value (buf, 0); break; case MONO_TYPE_GENERICINST: encode_generic_class (assembly, type->data.generic_class, buf); break; case MONO_TYPE_VAR: case MONO_TYPE_MVAR: sigbuffer_add_value (buf, type->type); sigbuffer_add_value (buf, mono_type_get_generic_param_num (type)); break; default: g_error ("need to encode type %x", type->type); } } static void encode_reflection_type (MonoDynamicImage *assembly, MonoReflectionType *type, SigBuffer *buf) { if (!type) { sigbuffer_add_value (buf, MONO_TYPE_VOID); return; } encode_type (assembly, mono_reflection_type_get_handle (type), buf); } static void encode_custom_modifiers (MonoDynamicImage *assembly, MonoArray *modreq, MonoArray *modopt, SigBuffer *buf) { int i; if (modreq) { for (i = 0; i < mono_array_length (modreq); ++i) { MonoType *mod = mono_type_array_get_and_resolve (modreq, i); sigbuffer_add_byte (buf, MONO_TYPE_CMOD_REQD); sigbuffer_add_value (buf, mono_image_typedef_or_ref (assembly, mod)); } } if (modopt) { for (i = 0; i < mono_array_length (modopt); ++i) { MonoType *mod = mono_type_array_get_and_resolve (modopt, i); sigbuffer_add_byte (buf, MONO_TYPE_CMOD_OPT); sigbuffer_add_value (buf, mono_image_typedef_or_ref (assembly, mod)); } } } #ifndef DISABLE_REFLECTION_EMIT static guint32 method_encode_signature (MonoDynamicImage *assembly, MonoMethodSignature *sig) { SigBuffer buf; int i; guint32 nparams = sig->param_count; guint32 idx; if (!assembly->save) return 0; sigbuffer_init (&buf, 32); /* * FIXME: vararg, explicit_this, differenc call_conv values... */ idx = sig->call_convention; if (sig->hasthis) idx |= 0x20; /* hasthis */ if (sig->generic_param_count) idx |= 0x10; /* generic */ sigbuffer_add_byte (&buf, idx); if (sig->generic_param_count) sigbuffer_add_value (&buf, sig->generic_param_count); sigbuffer_add_value (&buf, nparams); encode_type (assembly, sig->ret, &buf); for (i = 0; i < nparams; ++i) { if (i == sig->sentinelpos) sigbuffer_add_byte (&buf, MONO_TYPE_SENTINEL); encode_type (assembly, sig->params [i], &buf); } idx = sigbuffer_add_to_blob_cached (assembly, &buf); sigbuffer_free (&buf); return idx; } #endif static guint32 method_builder_encode_signature (MonoDynamicImage *assembly, ReflectionMethodBuilder *mb) { /* * FIXME: reuse code from method_encode_signature(). */ SigBuffer buf; int i; guint32 nparams = mb->parameters ? mono_array_length (mb->parameters): 0; guint32 ngparams = mb->generic_params ? mono_array_length (mb->generic_params): 0; guint32 notypes = mb->opt_types ? mono_array_length (mb->opt_types): 0; guint32 idx; sigbuffer_init (&buf, 32); /* LAMESPEC: all the call conv spec is foobared */ idx = mb->call_conv & 0x60; /* has-this, explicit-this */ if (mb->call_conv & 2) idx |= 0x5; /* vararg */ if (!(mb->attrs & METHOD_ATTRIBUTE_STATIC)) idx |= 0x20; /* hasthis */ if (ngparams) idx |= 0x10; /* generic */ sigbuffer_add_byte (&buf, idx); if (ngparams) sigbuffer_add_value (&buf, ngparams); sigbuffer_add_value (&buf, nparams + notypes); encode_custom_modifiers (assembly, mb->return_modreq, mb->return_modopt, &buf); encode_reflection_type (assembly, mb->rtype, &buf); for (i = 0; i < nparams; ++i) { MonoArray *modreq = NULL; MonoArray *modopt = NULL; MonoReflectionType *pt; if (mb->param_modreq && (i < mono_array_length (mb->param_modreq))) modreq = mono_array_get (mb->param_modreq, MonoArray*, i); if (mb->param_modopt && (i < mono_array_length (mb->param_modopt))) modopt = mono_array_get (mb->param_modopt, MonoArray*, i); encode_custom_modifiers (assembly, modreq, modopt, &buf); pt = mono_array_get (mb->parameters, MonoReflectionType*, i); encode_reflection_type (assembly, pt, &buf); } if (notypes) sigbuffer_add_byte (&buf, MONO_TYPE_SENTINEL); for (i = 0; i < notypes; ++i) { MonoReflectionType *pt; pt = mono_array_get (mb->opt_types, MonoReflectionType*, i); encode_reflection_type (assembly, pt, &buf); } idx = sigbuffer_add_to_blob_cached (assembly, &buf); sigbuffer_free (&buf); return idx; } static guint32 encode_locals (MonoDynamicImage *assembly, MonoReflectionILGen *ilgen) { MonoDynamicTable *table; guint32 *values; guint32 idx, sig_idx; guint nl = mono_array_length (ilgen->locals); SigBuffer buf; int i; sigbuffer_init (&buf, 32); sigbuffer_add_value (&buf, 0x07); sigbuffer_add_value (&buf, nl); for (i = 0; i < nl; ++i) { MonoReflectionLocalBuilder *lb = mono_array_get (ilgen->locals, MonoReflectionLocalBuilder*, i); if (lb->is_pinned) sigbuffer_add_value (&buf, MONO_TYPE_PINNED); encode_reflection_type (assembly, (MonoReflectionType*)lb->type, &buf); } sig_idx = sigbuffer_add_to_blob_cached (assembly, &buf); sigbuffer_free (&buf); if (assembly->standalonesig_cache == NULL) assembly->standalonesig_cache = g_hash_table_new (NULL, NULL); idx = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->standalonesig_cache, GUINT_TO_POINTER (sig_idx))); if (idx) return idx; table = &assembly->tables [MONO_TABLE_STANDALONESIG]; idx = table->next_idx ++; table->rows ++; alloc_table (table, table->rows); values = table->values + idx * MONO_STAND_ALONE_SIGNATURE_SIZE; values [MONO_STAND_ALONE_SIGNATURE] = sig_idx; g_hash_table_insert (assembly->standalonesig_cache, GUINT_TO_POINTER (sig_idx), GUINT_TO_POINTER (idx)); return idx; } static guint32 method_count_clauses (MonoReflectionILGen *ilgen) { guint32 num_clauses = 0; int i; MonoILExceptionInfo *ex_info; for (i = 0; i < mono_array_length (ilgen->ex_handlers); ++i) { ex_info = (MonoILExceptionInfo*)mono_array_addr (ilgen->ex_handlers, MonoILExceptionInfo, i); if (ex_info->handlers) num_clauses += mono_array_length (ex_info->handlers); else num_clauses++; } return num_clauses; } #ifndef DISABLE_REFLECTION_EMIT static MonoExceptionClause* method_encode_clauses (MonoImage *image, MonoDynamicImage *assembly, MonoReflectionILGen *ilgen, guint32 num_clauses) { MonoExceptionClause *clauses; MonoExceptionClause *clause; MonoILExceptionInfo *ex_info; MonoILExceptionBlock *ex_block; guint32 finally_start; int i, j, clause_index;; clauses = image_g_new0 (image, MonoExceptionClause, num_clauses); clause_index = 0; for (i = mono_array_length (ilgen->ex_handlers) - 1; i >= 0; --i) { ex_info = (MonoILExceptionInfo*)mono_array_addr (ilgen->ex_handlers, MonoILExceptionInfo, i); finally_start = ex_info->start + ex_info->len; if (!ex_info->handlers) continue; for (j = 0; j < mono_array_length (ex_info->handlers); ++j) { ex_block = (MonoILExceptionBlock*)mono_array_addr (ex_info->handlers, MonoILExceptionBlock, j); clause = &(clauses [clause_index]); clause->flags = ex_block->type; clause->try_offset = ex_info->start; if (ex_block->type == MONO_EXCEPTION_CLAUSE_FINALLY) clause->try_len = finally_start - ex_info->start; else clause->try_len = ex_info->len; clause->handler_offset = ex_block->start; clause->handler_len = ex_block->len; if (ex_block->extype) { clause->data.catch_class = mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)ex_block->extype)); } else { if (ex_block->type == MONO_EXCEPTION_CLAUSE_FILTER) clause->data.filter_offset = ex_block->filter_offset; else clause->data.filter_offset = 0; } finally_start = ex_block->start + ex_block->len; clause_index ++; } } return clauses; } #endif /* !DISABLE_REFLECTION_EMIT */ static guint32 method_encode_code (MonoDynamicImage *assembly, ReflectionMethodBuilder *mb) { char flags = 0; guint32 idx; guint32 code_size; gint32 max_stack, i; gint32 num_locals = 0; gint32 num_exception = 0; gint maybe_small; guint32 fat_flags; char fat_header [12]; guint32 int_value; guint16 short_value; guint32 local_sig = 0; guint32 header_size = 12; MonoArray *code; if ((mb->attrs & (METHOD_ATTRIBUTE_PINVOKE_IMPL | METHOD_ATTRIBUTE_ABSTRACT)) || (mb->iattrs & (METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL | METHOD_IMPL_ATTRIBUTE_RUNTIME))) return 0; /*if (mb->name) g_print ("Encode method %s\n", mono_string_to_utf8 (mb->name));*/ if (mb->ilgen) { code = mb->ilgen->code; code_size = mb->ilgen->code_len; max_stack = mb->ilgen->max_stack; num_locals = mb->ilgen->locals ? mono_array_length (mb->ilgen->locals) : 0; if (mb->ilgen->ex_handlers) num_exception = method_count_clauses (mb->ilgen); } else { code = mb->code; if (code == NULL){ char *name = mono_string_to_utf8 (mb->name); char *str = g_strdup_printf ("Method %s does not have any IL associated", name); MonoException *exception = mono_get_exception_argument (NULL, "a method does not have any IL associated"); g_free (str); g_free (name); mono_raise_exception (exception); } code_size = mono_array_length (code); max_stack = 8; /* we probably need to run a verifier on the code... */ } stream_data_align (&assembly->code); /* check for exceptions, maxstack, locals */ maybe_small = (max_stack <= 8) && (!num_locals) && (!num_exception); if (maybe_small) { if (code_size < 64 && !(code_size & 1)) { flags = (code_size << 2) | 0x2; } else if (code_size < 32 && (code_size & 1)) { flags = (code_size << 2) | 0x6; /* LAMESPEC: see metadata.c */ } else { goto fat_header; } idx = mono_image_add_stream_data (&assembly->code, &flags, 1); /* add to the fixup todo list */ if (mb->ilgen && mb->ilgen->num_token_fixups) mono_g_hash_table_insert (assembly->token_fixups, mb->ilgen, GUINT_TO_POINTER (idx + 1)); mono_image_add_stream_data (&assembly->code, mono_array_addr (code, char, 0), code_size); return assembly->text_rva + idx; } fat_header: if (num_locals) local_sig = MONO_TOKEN_SIGNATURE | encode_locals (assembly, mb->ilgen); /* * FIXME: need to set also the header size in fat_flags. * (and more sects and init locals flags) */ fat_flags = 0x03; if (num_exception) fat_flags |= METHOD_HEADER_MORE_SECTS; if (mb->init_locals) fat_flags |= METHOD_HEADER_INIT_LOCALS; fat_header [0] = fat_flags; fat_header [1] = (header_size / 4 ) << 4; short_value = GUINT16_TO_LE (max_stack); memcpy (fat_header + 2, &short_value, 2); int_value = GUINT32_TO_LE (code_size); memcpy (fat_header + 4, &int_value, 4); int_value = GUINT32_TO_LE (local_sig); memcpy (fat_header + 8, &int_value, 4); idx = mono_image_add_stream_data (&assembly->code, fat_header, 12); /* add to the fixup todo list */ if (mb->ilgen && mb->ilgen->num_token_fixups) mono_g_hash_table_insert (assembly->token_fixups, mb->ilgen, GUINT_TO_POINTER (idx + 12)); mono_image_add_stream_data (&assembly->code, mono_array_addr (code, char, 0), code_size); if (num_exception) { unsigned char sheader [4]; MonoILExceptionInfo * ex_info; MonoILExceptionBlock * ex_block; int j; stream_data_align (&assembly->code); /* always use fat format for now */ sheader [0] = METHOD_HEADER_SECTION_FAT_FORMAT | METHOD_HEADER_SECTION_EHTABLE; num_exception *= 6 * sizeof (guint32); num_exception += 4; /* include the size of the header */ sheader [1] = num_exception & 0xff; sheader [2] = (num_exception >> 8) & 0xff; sheader [3] = (num_exception >> 16) & 0xff; mono_image_add_stream_data (&assembly->code, (char*)sheader, 4); /* fat header, so we are already aligned */ /* reverse order */ for (i = mono_array_length (mb->ilgen->ex_handlers) - 1; i >= 0; --i) { ex_info = (MonoILExceptionInfo *)mono_array_addr (mb->ilgen->ex_handlers, MonoILExceptionInfo, i); if (ex_info->handlers) { int finally_start = ex_info->start + ex_info->len; for (j = 0; j < mono_array_length (ex_info->handlers); ++j) { guint32 val; ex_block = (MonoILExceptionBlock*)mono_array_addr (ex_info->handlers, MonoILExceptionBlock, j); /* the flags */ val = GUINT32_TO_LE (ex_block->type); mono_image_add_stream_data (&assembly->code, (char*)&val, sizeof (guint32)); /* try offset */ val = GUINT32_TO_LE (ex_info->start); mono_image_add_stream_data (&assembly->code, (char*)&val, sizeof (guint32)); /* need fault, too, probably */ if (ex_block->type == MONO_EXCEPTION_CLAUSE_FINALLY) val = GUINT32_TO_LE (finally_start - ex_info->start); else val = GUINT32_TO_LE (ex_info->len); mono_image_add_stream_data (&assembly->code, (char*)&val, sizeof (guint32)); /* handler offset */ val = GUINT32_TO_LE (ex_block->start); mono_image_add_stream_data (&assembly->code, (char*)&val, sizeof (guint32)); /* handler len */ val = GUINT32_TO_LE (ex_block->len); mono_image_add_stream_data (&assembly->code, (char*)&val, sizeof (guint32)); finally_start = ex_block->start + ex_block->len; if (ex_block->extype) { val = mono_metadata_token_from_dor (mono_image_typedef_or_ref (assembly, mono_reflection_type_get_handle ((MonoReflectionType*)ex_block->extype))); } else { if (ex_block->type == MONO_EXCEPTION_CLAUSE_FILTER) val = ex_block->filter_offset; else val = 0; } val = GUINT32_TO_LE (val); mono_image_add_stream_data (&assembly->code, (char*)&val, sizeof (guint32)); /*g_print ("out clause %d: from %d len=%d, handler at %d, %d, finally_start=%d, ex_info->start=%d, ex_info->len=%d, ex_block->type=%d, j=%d, i=%d\n", clause.flags, clause.try_offset, clause.try_len, clause.handler_offset, clause.handler_len, finally_start, ex_info->start, ex_info->len, ex_block->type, j, i);*/ } } else { g_error ("No clauses for ex info block %d", i); } } } return assembly->text_rva + idx; } static guint32 find_index_in_table (MonoDynamicImage *assembly, int table_idx, int col, guint32 token) { int i; MonoDynamicTable *table; guint32 *values; table = &assembly->tables [table_idx]; g_assert (col < table->columns); values = table->values + table->columns; for (i = 1; i <= table->rows; ++i) { if (values [col] == token) return i; values += table->columns; } return 0; } /* * LOCKING: Acquires the loader lock. */ static MonoCustomAttrInfo* lookup_custom_attr (MonoImage *image, gpointer member) { MonoCustomAttrInfo* res; res = mono_image_property_lookup (image, member, MONO_PROP_DYNAMIC_CATTR); if (!res) return NULL; return g_memdup (res, MONO_SIZEOF_CUSTOM_ATTR_INFO + sizeof (MonoCustomAttrEntry) * res->num_attrs); } static gboolean custom_attr_visible (MonoImage *image, MonoReflectionCustomAttr *cattr) { /* FIXME: Need to do more checks */ if (cattr->ctor->method && (cattr->ctor->method->klass->image != image)) { int visibility = cattr->ctor->method->klass->flags & TYPE_ATTRIBUTE_VISIBILITY_MASK; if ((visibility != TYPE_ATTRIBUTE_PUBLIC) && (visibility != TYPE_ATTRIBUTE_NESTED_PUBLIC)) return FALSE; } return TRUE; } static MonoCustomAttrInfo* mono_custom_attrs_from_builders (MonoImage *alloc_img, MonoImage *image, MonoArray *cattrs) { int i, index, count, not_visible; MonoCustomAttrInfo *ainfo; MonoReflectionCustomAttr *cattr; if (!cattrs) return NULL; /* FIXME: check in assembly the Run flag is set */ count = mono_array_length (cattrs); /* Skip nonpublic attributes since MS.NET seems to do the same */ /* FIXME: This needs to be done more globally */ not_visible = 0; for (i = 0; i < count; ++i) { cattr = (MonoReflectionCustomAttr*)mono_array_get (cattrs, gpointer, i); if (!custom_attr_visible (image, cattr)) not_visible ++; } count -= not_visible; ainfo = image_g_malloc0 (alloc_img, MONO_SIZEOF_CUSTOM_ATTR_INFO + sizeof (MonoCustomAttrEntry) * count); ainfo->image = image; ainfo->num_attrs = count; ainfo->cached = alloc_img != NULL; index = 0; for (i = 0; i < count; ++i) { cattr = (MonoReflectionCustomAttr*)mono_array_get (cattrs, gpointer, i); if (custom_attr_visible (image, cattr)) { unsigned char *saved = mono_image_alloc (image, mono_array_length (cattr->data)); memcpy (saved, mono_array_addr (cattr->data, char, 0), mono_array_length (cattr->data)); ainfo->attrs [index].ctor = cattr->ctor->method; ainfo->attrs [index].data = saved; ainfo->attrs [index].data_size = mono_array_length (cattr->data); index ++; } } return ainfo; } #ifndef DISABLE_REFLECTION_EMIT /* * LOCKING: Acquires the loader lock. */ static void mono_save_custom_attrs (MonoImage *image, void *obj, MonoArray *cattrs) { MonoCustomAttrInfo *ainfo, *tmp; if (!cattrs || !mono_array_length (cattrs)) return; ainfo = mono_custom_attrs_from_builders (image, image, cattrs); mono_loader_lock (); tmp = mono_image_property_lookup (image, obj, MONO_PROP_DYNAMIC_CATTR); if (tmp) mono_custom_attrs_free (tmp); mono_image_property_insert (image, obj, MONO_PROP_DYNAMIC_CATTR, ainfo); mono_loader_unlock (); } #endif void mono_custom_attrs_free (MonoCustomAttrInfo *ainfo) { if (!ainfo->cached) g_free (ainfo); } /* * idx is the table index of the object * type is one of MONO_CUSTOM_ATTR_* */ static void mono_image_add_cattrs (MonoDynamicImage *assembly, guint32 idx, guint32 type, MonoArray *cattrs) { MonoDynamicTable *table; MonoReflectionCustomAttr *cattr; guint32 *values; guint32 count, i, token; char blob_size [6]; char *p = blob_size; /* it is legal to pass a NULL cattrs: we avoid to use the if in a lot of places */ if (!cattrs) return; count = mono_array_length (cattrs); table = &assembly->tables [MONO_TABLE_CUSTOMATTRIBUTE]; table->rows += count; alloc_table (table, table->rows); values = table->values + table->next_idx * MONO_CUSTOM_ATTR_SIZE; idx <<= MONO_CUSTOM_ATTR_BITS; idx |= type; for (i = 0; i < count; ++i) { cattr = (MonoReflectionCustomAttr*)mono_array_get (cattrs, gpointer, i); values [MONO_CUSTOM_ATTR_PARENT] = idx; token = mono_image_create_token (assembly, (MonoObject*)cattr->ctor, FALSE, FALSE); type = mono_metadata_token_index (token); type <<= MONO_CUSTOM_ATTR_TYPE_BITS; switch (mono_metadata_token_table (token)) { case MONO_TABLE_METHOD: type |= MONO_CUSTOM_ATTR_TYPE_METHODDEF; break; case MONO_TABLE_MEMBERREF: type |= MONO_CUSTOM_ATTR_TYPE_MEMBERREF; break; default: g_warning ("got wrong token in custom attr"); continue; } values [MONO_CUSTOM_ATTR_TYPE] = type; p = blob_size; mono_metadata_encode_value (mono_array_length (cattr->data), p, &p); values [MONO_CUSTOM_ATTR_VALUE] = add_to_blob_cached (assembly, blob_size, p - blob_size, mono_array_addr (cattr->data, char, 0), mono_array_length (cattr->data)); values += MONO_CUSTOM_ATTR_SIZE; ++table->next_idx; } } static void mono_image_add_decl_security (MonoDynamicImage *assembly, guint32 parent_token, MonoArray *permissions) { MonoDynamicTable *table; guint32 *values; guint32 count, i, idx; MonoReflectionPermissionSet *perm; if (!permissions) return; count = mono_array_length (permissions); table = &assembly->tables [MONO_TABLE_DECLSECURITY]; table->rows += count; alloc_table (table, table->rows); for (i = 0; i < mono_array_length (permissions); ++i) { perm = (MonoReflectionPermissionSet*)mono_array_addr (permissions, MonoReflectionPermissionSet, i); values = table->values + table->next_idx * MONO_DECL_SECURITY_SIZE; idx = mono_metadata_token_index (parent_token); idx <<= MONO_HAS_DECL_SECURITY_BITS; switch (mono_metadata_token_table (parent_token)) { case MONO_TABLE_TYPEDEF: idx |= MONO_HAS_DECL_SECURITY_TYPEDEF; break; case MONO_TABLE_METHOD: idx |= MONO_HAS_DECL_SECURITY_METHODDEF; break; case MONO_TABLE_ASSEMBLY: idx |= MONO_HAS_DECL_SECURITY_ASSEMBLY; break; default: g_assert_not_reached (); } values [MONO_DECL_SECURITY_ACTION] = perm->action; values [MONO_DECL_SECURITY_PARENT] = idx; values [MONO_DECL_SECURITY_PERMISSIONSET] = add_mono_string_to_blob_cached (assembly, perm->pset); ++table->next_idx; } } /* * Fill in the MethodDef and ParamDef tables for a method. * This is used for both normal methods and constructors. */ static void mono_image_basic_method (ReflectionMethodBuilder *mb, MonoDynamicImage *assembly) { MonoDynamicTable *table; guint32 *values; guint i, count; /* room in this table is already allocated */ table = &assembly->tables [MONO_TABLE_METHOD]; *mb->table_idx = table->next_idx ++; g_hash_table_insert (assembly->method_to_table_idx, mb->mhandle, GUINT_TO_POINTER ((*mb->table_idx))); values = table->values + *mb->table_idx * MONO_METHOD_SIZE; values [MONO_METHOD_NAME] = string_heap_insert_mstring (&assembly->sheap, mb->name); values [MONO_METHOD_FLAGS] = mb->attrs; values [MONO_METHOD_IMPLFLAGS] = mb->iattrs; values [MONO_METHOD_SIGNATURE] = method_builder_encode_signature (assembly, mb); values [MONO_METHOD_RVA] = method_encode_code (assembly, mb); table = &assembly->tables [MONO_TABLE_PARAM]; values [MONO_METHOD_PARAMLIST] = table->next_idx; mono_image_add_decl_security (assembly, mono_metadata_make_token (MONO_TABLE_METHOD, *mb->table_idx), mb->permissions); if (mb->pinfo) { MonoDynamicTable *mtable; guint32 *mvalues; mtable = &assembly->tables [MONO_TABLE_FIELDMARSHAL]; mvalues = mtable->values + mtable->next_idx * MONO_FIELD_MARSHAL_SIZE; count = 0; for (i = 0; i < mono_array_length (mb->pinfo); ++i) { if (mono_array_get (mb->pinfo, gpointer, i)) count++; } table->rows += count; alloc_table (table, table->rows); values = table->values + table->next_idx * MONO_PARAM_SIZE; for (i = 0; i < mono_array_length (mb->pinfo); ++i) { MonoReflectionParamBuilder *pb; if ((pb = mono_array_get (mb->pinfo, MonoReflectionParamBuilder*, i))) { values [MONO_PARAM_FLAGS] = pb->attrs; values [MONO_PARAM_SEQUENCE] = i; if (pb->name != NULL) { values [MONO_PARAM_NAME] = string_heap_insert_mstring (&assembly->sheap, pb->name); } else { values [MONO_PARAM_NAME] = 0; } values += MONO_PARAM_SIZE; if (pb->marshal_info) { mtable->rows++; alloc_table (mtable, mtable->rows); mvalues = mtable->values + mtable->rows * MONO_FIELD_MARSHAL_SIZE; mvalues [MONO_FIELD_MARSHAL_PARENT] = (table->next_idx << MONO_HAS_FIELD_MARSHAL_BITS) | MONO_HAS_FIELD_MARSHAL_PARAMDEF; mvalues [MONO_FIELD_MARSHAL_NATIVE_TYPE] = encode_marshal_blob (assembly, pb->marshal_info); } pb->table_idx = table->next_idx++; if (pb->attrs & PARAM_ATTRIBUTE_HAS_DEFAULT) { guint32 field_type = 0; mtable = &assembly->tables [MONO_TABLE_CONSTANT]; mtable->rows ++; alloc_table (mtable, mtable->rows); mvalues = mtable->values + mtable->rows * MONO_CONSTANT_SIZE; mvalues [MONO_CONSTANT_PARENT] = MONO_HASCONSTANT_PARAM | (pb->table_idx << MONO_HASCONSTANT_BITS); mvalues [MONO_CONSTANT_VALUE] = encode_constant (assembly, pb->def_value, &field_type); mvalues [MONO_CONSTANT_TYPE] = field_type; mvalues [MONO_CONSTANT_PADDING] = 0; } } } } } #ifndef DISABLE_REFLECTION_EMIT static void reflection_methodbuilder_from_method_builder (ReflectionMethodBuilder *rmb, MonoReflectionMethodBuilder *mb) { memset (rmb, 0, sizeof (ReflectionMethodBuilder)); rmb->ilgen = mb->ilgen; rmb->rtype = mono_reflection_type_resolve_user_types ((MonoReflectionType*)mb->rtype); rmb->parameters = mb->parameters; rmb->generic_params = mb->generic_params; rmb->generic_container = mb->generic_container; rmb->opt_types = NULL; rmb->pinfo = mb->pinfo; rmb->attrs = mb->attrs; rmb->iattrs = mb->iattrs; rmb->call_conv = mb->call_conv; rmb->code = mb->code; rmb->type = mb->type; rmb->name = mb->name; rmb->table_idx = &mb->table_idx; rmb->init_locals = mb->init_locals; rmb->skip_visibility = FALSE; rmb->return_modreq = mb->return_modreq; rmb->return_modopt = mb->return_modopt; rmb->param_modreq = mb->param_modreq; rmb->param_modopt = mb->param_modopt; rmb->permissions = mb->permissions; rmb->mhandle = mb->mhandle; rmb->nrefs = 0; rmb->refs = NULL; if (mb->dll) { rmb->charset = mb->charset; rmb->extra_flags = mb->extra_flags; rmb->native_cc = mb->native_cc; rmb->dllentry = mb->dllentry; rmb->dll = mb->dll; } } static void reflection_methodbuilder_from_ctor_builder (ReflectionMethodBuilder *rmb, MonoReflectionCtorBuilder *mb) { const char *name = mb->attrs & METHOD_ATTRIBUTE_STATIC ? ".cctor": ".ctor"; memset (rmb, 0, sizeof (ReflectionMethodBuilder)); rmb->ilgen = mb->ilgen; rmb->rtype = mono_type_get_object (mono_domain_get (), &mono_defaults.void_class->byval_arg); rmb->parameters = mb->parameters; rmb->generic_params = NULL; rmb->generic_container = NULL; rmb->opt_types = NULL; rmb->pinfo = mb->pinfo; rmb->attrs = mb->attrs; rmb->iattrs = mb->iattrs; rmb->call_conv = mb->call_conv; rmb->code = NULL; rmb->type = mb->type; rmb->name = mono_string_new (mono_domain_get (), name); rmb->table_idx = &mb->table_idx; rmb->init_locals = mb->init_locals; rmb->skip_visibility = FALSE; rmb->return_modreq = NULL; rmb->return_modopt = NULL; rmb->param_modreq = mb->param_modreq; rmb->param_modopt = mb->param_modopt; rmb->permissions = mb->permissions; rmb->mhandle = mb->mhandle; rmb->nrefs = 0; rmb->refs = NULL; } static void reflection_methodbuilder_from_dynamic_method (ReflectionMethodBuilder *rmb, MonoReflectionDynamicMethod *mb) { memset (rmb, 0, sizeof (ReflectionMethodBuilder)); rmb->ilgen = mb->ilgen; rmb->rtype = mb->rtype; rmb->parameters = mb->parameters; rmb->generic_params = NULL; rmb->generic_container = NULL; rmb->opt_types = NULL; rmb->pinfo = NULL; rmb->attrs = mb->attrs; rmb->iattrs = 0; rmb->call_conv = mb->call_conv; rmb->code = NULL; rmb->type = (MonoObject *) mb->owner; rmb->name = mb->name; rmb->table_idx = NULL; rmb->init_locals = mb->init_locals; rmb->skip_visibility = mb->skip_visibility; rmb->return_modreq = NULL; rmb->return_modopt = NULL; rmb->param_modreq = NULL; rmb->param_modopt = NULL; rmb->permissions = NULL; rmb->mhandle = mb->mhandle; rmb->nrefs = 0; rmb->refs = NULL; } #endif static void mono_image_add_methodimpl (MonoDynamicImage *assembly, MonoReflectionMethodBuilder *mb) { MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder *)mb->type; MonoDynamicTable *table; guint32 *values; guint32 tok; if (!mb->override_method) return; table = &assembly->tables [MONO_TABLE_METHODIMPL]; table->rows ++; alloc_table (table, table->rows); values = table->values + table->rows * MONO_METHODIMPL_SIZE; values [MONO_METHODIMPL_CLASS] = tb->table_idx; values [MONO_METHODIMPL_BODY] = MONO_METHODDEFORREF_METHODDEF | (mb->table_idx << MONO_METHODDEFORREF_BITS); tok = mono_image_create_token (assembly, (MonoObject*)mb->override_method, FALSE, FALSE); switch (mono_metadata_token_table (tok)) { case MONO_TABLE_MEMBERREF: tok = (mono_metadata_token_index (tok) << MONO_METHODDEFORREF_BITS ) | MONO_METHODDEFORREF_METHODREF; break; case MONO_TABLE_METHOD: tok = (mono_metadata_token_index (tok) << MONO_METHODDEFORREF_BITS ) | MONO_METHODDEFORREF_METHODDEF; break; default: g_assert_not_reached (); } values [MONO_METHODIMPL_DECLARATION] = tok; } #ifndef DISABLE_REFLECTION_EMIT static void mono_image_get_method_info (MonoReflectionMethodBuilder *mb, MonoDynamicImage *assembly) { MonoDynamicTable *table; guint32 *values; ReflectionMethodBuilder rmb; int i; reflection_methodbuilder_from_method_builder (&rmb, mb); mono_image_basic_method (&rmb, assembly); mb->table_idx = *rmb.table_idx; if (mb->dll) { /* It's a P/Invoke method */ guint32 moduleref; /* map CharSet values to on-disk values */ int ncharset = (mb->charset ? (mb->charset - 1) * 2 : 0); int extra_flags = mb->extra_flags; table = &assembly->tables [MONO_TABLE_IMPLMAP]; table->rows ++; alloc_table (table, table->rows); values = table->values + table->rows * MONO_IMPLMAP_SIZE; values [MONO_IMPLMAP_FLAGS] = (mb->native_cc << 8) | ncharset | extra_flags; values [MONO_IMPLMAP_MEMBER] = (mb->table_idx << 1) | 1; /* memberforwarded: method */ if (mb->dllentry) values [MONO_IMPLMAP_NAME] = string_heap_insert_mstring (&assembly->sheap, mb->dllentry); else values [MONO_IMPLMAP_NAME] = string_heap_insert_mstring (&assembly->sheap, mb->name); moduleref = string_heap_insert_mstring (&assembly->sheap, mb->dll); if (!(values [MONO_IMPLMAP_SCOPE] = find_index_in_table (assembly, MONO_TABLE_MODULEREF, MONO_MODULEREF_NAME, moduleref))) { table = &assembly->tables [MONO_TABLE_MODULEREF]; table->rows ++; alloc_table (table, table->rows); table->values [table->rows * MONO_MODULEREF_SIZE + MONO_MODULEREF_NAME] = moduleref; values [MONO_IMPLMAP_SCOPE] = table->rows; } } if (mb->generic_params) { table = &assembly->tables [MONO_TABLE_GENERICPARAM]; table->rows += mono_array_length (mb->generic_params); alloc_table (table, table->rows); for (i = 0; i < mono_array_length (mb->generic_params); ++i) { guint32 owner = MONO_TYPEORMETHOD_METHOD | (mb->table_idx << MONO_TYPEORMETHOD_BITS); mono_image_get_generic_param_info ( mono_array_get (mb->generic_params, gpointer, i), owner, assembly); } } } static void mono_image_get_ctor_info (MonoDomain *domain, MonoReflectionCtorBuilder *mb, MonoDynamicImage *assembly) { ReflectionMethodBuilder rmb; reflection_methodbuilder_from_ctor_builder (&rmb, mb); mono_image_basic_method (&rmb, assembly); mb->table_idx = *rmb.table_idx; } #endif static char* type_get_fully_qualified_name (MonoType *type) { return mono_type_get_name_full (type, MONO_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED); } static char* type_get_qualified_name (MonoType *type, MonoAssembly *ass) { MonoClass *klass; MonoAssembly *ta; klass = mono_class_from_mono_type (type); if (!klass) return mono_type_get_name_full (type, MONO_TYPE_NAME_FORMAT_REFLECTION); ta = klass->image->assembly; if (ta->dynamic || (ta == ass)) { if (klass->generic_class || klass->generic_container) /* For generic type definitions, we want T, while REFLECTION returns T<K> */ return mono_type_get_name_full (type, MONO_TYPE_NAME_FORMAT_FULL_NAME); else return mono_type_get_name_full (type, MONO_TYPE_NAME_FORMAT_REFLECTION); } return mono_type_get_name_full (type, MONO_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED); } #ifndef DISABLE_REFLECTION_EMIT /*field_image is the image to which the eventual custom mods have been encoded against*/ static guint32 fieldref_encode_signature (MonoDynamicImage *assembly, MonoImage *field_image, MonoType *type) { SigBuffer buf; guint32 idx, i, token; if (!assembly->save) return 0; sigbuffer_init (&buf, 32); sigbuffer_add_value (&buf, 0x06); /* encode custom attributes before the type */ if (type->num_mods) { for (i = 0; i < type->num_mods; ++i) { if (field_image) { MonoClass *class = mono_class_get (field_image, type->modifiers [i].token); g_assert (class); token = mono_image_typedef_or_ref (assembly, &class->byval_arg); } else { token = type->modifiers [i].token; } if (type->modifiers [i].required) sigbuffer_add_byte (&buf, MONO_TYPE_CMOD_REQD); else sigbuffer_add_byte (&buf, MONO_TYPE_CMOD_OPT); sigbuffer_add_value (&buf, token); } } encode_type (assembly, type, &buf); idx = sigbuffer_add_to_blob_cached (assembly, &buf); sigbuffer_free (&buf); return idx; } #endif static guint32 field_encode_signature (MonoDynamicImage *assembly, MonoReflectionFieldBuilder *fb) { SigBuffer buf; guint32 idx; guint32 typespec = 0; MonoType *type; MonoClass *class; init_type_builder_generics (fb->type); type = mono_reflection_type_get_handle ((MonoReflectionType*)fb->type); class = mono_class_from_mono_type (type); sigbuffer_init (&buf, 32); sigbuffer_add_value (&buf, 0x06); encode_custom_modifiers (assembly, fb->modreq, fb->modopt, &buf); /* encode custom attributes before the type */ if (class->generic_container) typespec = create_typespec (assembly, type); if (typespec) { MonoGenericClass *gclass; gclass = mono_metadata_lookup_generic_class (class, class->generic_container->context.class_inst, TRUE); encode_generic_class (assembly, gclass, &buf); } else { encode_type (assembly, type, &buf); } idx = sigbuffer_add_to_blob_cached (assembly, &buf); sigbuffer_free (&buf); return idx; } static guint32 encode_constant (MonoDynamicImage *assembly, MonoObject *val, guint32 *ret_type) { char blob_size [64]; char *b = blob_size; char *p, *box_val; char* buf; guint32 idx = 0, len = 0, dummy = 0; #ifdef ARM_FPU_FPA #if G_BYTE_ORDER == G_LITTLE_ENDIAN guint32 fpa_double [2]; guint32 *fpa_p; #endif #endif p = buf = g_malloc (64); if (!val) { *ret_type = MONO_TYPE_CLASS; len = 4; box_val = (char*)&dummy; } else { box_val = ((char*)val) + sizeof (MonoObject); *ret_type = val->vtable->klass->byval_arg.type; } handle_enum: switch (*ret_type) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_U1: case MONO_TYPE_I1: len = 1; break; case MONO_TYPE_CHAR: case MONO_TYPE_U2: case MONO_TYPE_I2: len = 2; break; case MONO_TYPE_U4: case MONO_TYPE_I4: case MONO_TYPE_R4: len = 4; break; case MONO_TYPE_U8: case MONO_TYPE_I8: len = 8; break; case MONO_TYPE_R8: len = 8; #ifdef ARM_FPU_FPA #if G_BYTE_ORDER == G_LITTLE_ENDIAN fpa_p = (guint32*)box_val; fpa_double [0] = fpa_p [1]; fpa_double [1] = fpa_p [0]; box_val = (char*)fpa_double; #endif #endif break; case MONO_TYPE_VALUETYPE: { MonoClass *klass = val->vtable->klass; if (klass->enumtype) { *ret_type = mono_class_enum_basetype (klass)->type; goto handle_enum; } else if (mono_is_corlib_image (klass->image) && strcmp (klass->name_space, "System") == 0 && strcmp (klass->name, "DateTime") == 0) { len = 8; } else g_error ("we can't encode valuetypes, we should have never reached this line"); break; } case MONO_TYPE_CLASS: break; case MONO_TYPE_STRING: { MonoString *str = (MonoString*)val; /* there is no signature */ len = str->length * 2; mono_metadata_encode_value (len, b, &b); #if G_BYTE_ORDER != G_LITTLE_ENDIAN { char *swapped = g_malloc (2 * mono_string_length (str)); const char *p = (const char*)mono_string_chars (str); swap_with_size (swapped, p, 2, mono_string_length (str)); idx = add_to_blob_cached (assembly, blob_size, b-blob_size, swapped, len); g_free (swapped); } #else idx = add_to_blob_cached (assembly, blob_size, b-blob_size, (char*)mono_string_chars (str), len); #endif g_free (buf); return idx; } case MONO_TYPE_GENERICINST: *ret_type = val->vtable->klass->generic_class->container_class->byval_arg.type; goto handle_enum; default: g_error ("we don't encode constant type 0x%02x yet", *ret_type); } /* there is no signature */ mono_metadata_encode_value (len, b, &b); #if G_BYTE_ORDER != G_LITTLE_ENDIAN idx = mono_image_add_stream_data (&assembly->blob, blob_size, b-blob_size); swap_with_size (blob_size, box_val, len, 1); mono_image_add_stream_data (&assembly->blob, blob_size, len); #else idx = add_to_blob_cached (assembly, blob_size, b-blob_size, box_val, len); #endif g_free (buf); return idx; } static guint32 encode_marshal_blob (MonoDynamicImage *assembly, MonoReflectionMarshal *minfo) { char *str; SigBuffer buf; guint32 idx, len; sigbuffer_init (&buf, 32); sigbuffer_add_value (&buf, minfo->type); switch (minfo->type) { case MONO_NATIVE_BYVALTSTR: case MONO_NATIVE_BYVALARRAY: sigbuffer_add_value (&buf, minfo->count); break; case MONO_NATIVE_LPARRAY: if (minfo->eltype || minfo->has_size) { sigbuffer_add_value (&buf, minfo->eltype); if (minfo->has_size) { sigbuffer_add_value (&buf, minfo->param_num != -1? minfo->param_num: 0); sigbuffer_add_value (&buf, minfo->count != -1? minfo->count: 0); /* LAMESPEC: ElemMult is undocumented */ sigbuffer_add_value (&buf, minfo->param_num != -1? 1: 0); } } break; case MONO_NATIVE_SAFEARRAY: if (minfo->eltype) sigbuffer_add_value (&buf, minfo->eltype); break; case MONO_NATIVE_CUSTOM: if (minfo->guid) { str = mono_string_to_utf8 (minfo->guid); len = strlen (str); sigbuffer_add_value (&buf, len); sigbuffer_add_mem (&buf, str, len); g_free (str); } else { sigbuffer_add_value (&buf, 0); } /* native type name */ sigbuffer_add_value (&buf, 0); /* custom marshaler type name */ if (minfo->marshaltype || minfo->marshaltyperef) { if (minfo->marshaltyperef) str = type_get_fully_qualified_name (mono_reflection_type_get_handle ((MonoReflectionType*)minfo->marshaltyperef)); else str = mono_string_to_utf8 (minfo->marshaltype); len = strlen (str); sigbuffer_add_value (&buf, len); sigbuffer_add_mem (&buf, str, len); g_free (str); } else { /* FIXME: Actually a bug, since this field is required. Punting for now ... */ sigbuffer_add_value (&buf, 0); } if (minfo->mcookie) { str = mono_string_to_utf8 (minfo->mcookie); len = strlen (str); sigbuffer_add_value (&buf, len); sigbuffer_add_mem (&buf, str, len); g_free (str); } else { sigbuffer_add_value (&buf, 0); } break; default: break; } idx = sigbuffer_add_to_blob_cached (assembly, &buf); sigbuffer_free (&buf); return idx; } static void mono_image_get_field_info (MonoReflectionFieldBuilder *fb, MonoDynamicImage *assembly) { MonoDynamicTable *table; guint32 *values; /* maybe this fixup should be done in the C# code */ if (fb->attrs & FIELD_ATTRIBUTE_LITERAL) fb->attrs |= FIELD_ATTRIBUTE_HAS_DEFAULT; table = &assembly->tables [MONO_TABLE_FIELD]; fb->table_idx = table->next_idx ++; g_hash_table_insert (assembly->field_to_table_idx, fb->handle, GUINT_TO_POINTER (fb->table_idx)); values = table->values + fb->table_idx * MONO_FIELD_SIZE; values [MONO_FIELD_NAME] = string_heap_insert_mstring (&assembly->sheap, fb->name); values [MONO_FIELD_FLAGS] = fb->attrs; values [MONO_FIELD_SIGNATURE] = field_encode_signature (assembly, fb); if (fb->offset != -1) { table = &assembly->tables [MONO_TABLE_FIELDLAYOUT]; table->rows ++; alloc_table (table, table->rows); values = table->values + table->rows * MONO_FIELD_LAYOUT_SIZE; values [MONO_FIELD_LAYOUT_FIELD] = fb->table_idx; values [MONO_FIELD_LAYOUT_OFFSET] = fb->offset; } if (fb->attrs & FIELD_ATTRIBUTE_LITERAL) { guint32 field_type = 0; table = &assembly->tables [MONO_TABLE_CONSTANT]; table->rows ++; alloc_table (table, table->rows); values = table->values + table->rows * MONO_CONSTANT_SIZE; values [MONO_CONSTANT_PARENT] = MONO_HASCONSTANT_FIEDDEF | (fb->table_idx << MONO_HASCONSTANT_BITS); values [MONO_CONSTANT_VALUE] = encode_constant (assembly, fb->def_value, &field_type); values [MONO_CONSTANT_TYPE] = field_type; values [MONO_CONSTANT_PADDING] = 0; } if (fb->attrs & FIELD_ATTRIBUTE_HAS_FIELD_RVA) { guint32 rva_idx; table = &assembly->tables [MONO_TABLE_FIELDRVA]; table->rows ++; alloc_table (table, table->rows); values = table->values + table->rows * MONO_FIELD_RVA_SIZE; values [MONO_FIELD_RVA_FIELD] = fb->table_idx; /* * We store it in the code section because it's simpler for now. */ if (fb->rva_data) { if (mono_array_length (fb->rva_data) >= 10) stream_data_align (&assembly->code); rva_idx = mono_image_add_stream_data (&assembly->code, mono_array_addr (fb->rva_data, char, 0), mono_array_length (fb->rva_data)); } else rva_idx = mono_image_add_stream_zero (&assembly->code, mono_class_value_size (fb->handle->parent, NULL)); values [MONO_FIELD_RVA_RVA] = rva_idx + assembly->text_rva; } if (fb->marshal_info) { table = &assembly->tables [MONO_TABLE_FIELDMARSHAL]; table->rows ++; alloc_table (table, table->rows); values = table->values + table->rows * MONO_FIELD_MARSHAL_SIZE; values [MONO_FIELD_MARSHAL_PARENT] = (fb->table_idx << MONO_HAS_FIELD_MARSHAL_BITS) | MONO_HAS_FIELD_MARSHAL_FIELDSREF; values [MONO_FIELD_MARSHAL_NATIVE_TYPE] = encode_marshal_blob (assembly, fb->marshal_info); } } static guint32 property_encode_signature (MonoDynamicImage *assembly, MonoReflectionPropertyBuilder *fb) { SigBuffer buf; guint32 nparams = 0; MonoReflectionMethodBuilder *mb = fb->get_method; MonoReflectionMethodBuilder *smb = fb->set_method; guint32 idx, i; if (mb && mb->parameters) nparams = mono_array_length (mb->parameters); if (!mb && smb && smb->parameters) nparams = mono_array_length (smb->parameters) - 1; sigbuffer_init (&buf, 32); if (fb->call_conv & 0x20) sigbuffer_add_byte (&buf, 0x28); else sigbuffer_add_byte (&buf, 0x08); sigbuffer_add_value (&buf, nparams); if (mb) { encode_reflection_type (assembly, (MonoReflectionType*)mb->rtype, &buf); for (i = 0; i < nparams; ++i) { MonoReflectionType *pt = mono_array_get (mb->parameters, MonoReflectionType*, i); encode_reflection_type (assembly, pt, &buf); } } else if (smb && smb->parameters) { /* the property type is the last param */ encode_reflection_type (assembly, mono_array_get (smb->parameters, MonoReflectionType*, nparams), &buf); for (i = 0; i < nparams; ++i) { MonoReflectionType *pt = mono_array_get (smb->parameters, MonoReflectionType*, i); encode_reflection_type (assembly, pt, &buf); } } else { encode_reflection_type (assembly, (MonoReflectionType*)fb->type, &buf); } idx = sigbuffer_add_to_blob_cached (assembly, &buf); sigbuffer_free (&buf); return idx; } static void mono_image_get_property_info (MonoReflectionPropertyBuilder *pb, MonoDynamicImage *assembly) { MonoDynamicTable *table; guint32 *values; guint num_methods = 0; guint32 semaidx; /* * we need to set things in the following tables: * PROPERTYMAP (info already filled in _get_type_info ()) * PROPERTY (rows already preallocated in _get_type_info ()) * METHOD (method info already done with the generic method code) * METHODSEMANTICS * CONSTANT */ table = &assembly->tables [MONO_TABLE_PROPERTY]; pb->table_idx = table->next_idx ++; values = table->values + pb->table_idx * MONO_PROPERTY_SIZE; values [MONO_PROPERTY_NAME] = string_heap_insert_mstring (&assembly->sheap, pb->name); values [MONO_PROPERTY_FLAGS] = pb->attrs; values [MONO_PROPERTY_TYPE] = property_encode_signature (assembly, pb); /* FIXME: we still don't handle 'other' methods */ if (pb->get_method) num_methods ++; if (pb->set_method) num_methods ++; table = &assembly->tables [MONO_TABLE_METHODSEMANTICS]; table->rows += num_methods; alloc_table (table, table->rows); if (pb->get_method) { semaidx = table->next_idx ++; values = table->values + semaidx * MONO_METHOD_SEMA_SIZE; values [MONO_METHOD_SEMA_SEMANTICS] = METHOD_SEMANTIC_GETTER; values [MONO_METHOD_SEMA_METHOD] = pb->get_method->table_idx; values [MONO_METHOD_SEMA_ASSOCIATION] = (pb->table_idx << MONO_HAS_SEMANTICS_BITS) | MONO_HAS_SEMANTICS_PROPERTY; } if (pb->set_method) { semaidx = table->next_idx ++; values = table->values + semaidx * MONO_METHOD_SEMA_SIZE; values [MONO_METHOD_SEMA_SEMANTICS] = METHOD_SEMANTIC_SETTER; values [MONO_METHOD_SEMA_METHOD] = pb->set_method->table_idx; values [MONO_METHOD_SEMA_ASSOCIATION] = (pb->table_idx << MONO_HAS_SEMANTICS_BITS) | MONO_HAS_SEMANTICS_PROPERTY; } if (pb->attrs & PROPERTY_ATTRIBUTE_HAS_DEFAULT) { guint32 field_type = 0; table = &assembly->tables [MONO_TABLE_CONSTANT]; table->rows ++; alloc_table (table, table->rows); values = table->values + table->rows * MONO_CONSTANT_SIZE; values [MONO_CONSTANT_PARENT] = MONO_HASCONSTANT_PROPERTY | (pb->table_idx << MONO_HASCONSTANT_BITS); values [MONO_CONSTANT_VALUE] = encode_constant (assembly, pb->def_value, &field_type); values [MONO_CONSTANT_TYPE] = field_type; values [MONO_CONSTANT_PADDING] = 0; } } static void mono_image_get_event_info (MonoReflectionEventBuilder *eb, MonoDynamicImage *assembly) { MonoDynamicTable *table; guint32 *values; guint num_methods = 0; guint32 semaidx; /* * we need to set things in the following tables: * EVENTMAP (info already filled in _get_type_info ()) * EVENT (rows already preallocated in _get_type_info ()) * METHOD (method info already done with the generic method code) * METHODSEMANTICS */ table = &assembly->tables [MONO_TABLE_EVENT]; eb->table_idx = table->next_idx ++; values = table->values + eb->table_idx * MONO_EVENT_SIZE; values [MONO_EVENT_NAME] = string_heap_insert_mstring (&assembly->sheap, eb->name); values [MONO_EVENT_FLAGS] = eb->attrs; values [MONO_EVENT_TYPE] = mono_image_typedef_or_ref (assembly, mono_reflection_type_get_handle (eb->type)); /* * FIXME: we still don't handle 'other' methods */ if (eb->add_method) num_methods ++; if (eb->remove_method) num_methods ++; if (eb->raise_method) num_methods ++; table = &assembly->tables [MONO_TABLE_METHODSEMANTICS]; table->rows += num_methods; alloc_table (table, table->rows); if (eb->add_method) { semaidx = table->next_idx ++; values = table->values + semaidx * MONO_METHOD_SEMA_SIZE; values [MONO_METHOD_SEMA_SEMANTICS] = METHOD_SEMANTIC_ADD_ON; values [MONO_METHOD_SEMA_METHOD] = eb->add_method->table_idx; values [MONO_METHOD_SEMA_ASSOCIATION] = (eb->table_idx << MONO_HAS_SEMANTICS_BITS) | MONO_HAS_SEMANTICS_EVENT; } if (eb->remove_method) { semaidx = table->next_idx ++; values = table->values + semaidx * MONO_METHOD_SEMA_SIZE; values [MONO_METHOD_SEMA_SEMANTICS] = METHOD_SEMANTIC_REMOVE_ON; values [MONO_METHOD_SEMA_METHOD] = eb->remove_method->table_idx; values [MONO_METHOD_SEMA_ASSOCIATION] = (eb->table_idx << MONO_HAS_SEMANTICS_BITS) | MONO_HAS_SEMANTICS_EVENT; } if (eb->raise_method) { semaidx = table->next_idx ++; values = table->values + semaidx * MONO_METHOD_SEMA_SIZE; values [MONO_METHOD_SEMA_SEMANTICS] = METHOD_SEMANTIC_FIRE; values [MONO_METHOD_SEMA_METHOD] = eb->raise_method->table_idx; values [MONO_METHOD_SEMA_ASSOCIATION] = (eb->table_idx << MONO_HAS_SEMANTICS_BITS) | MONO_HAS_SEMANTICS_EVENT; } } static void encode_constraints (MonoReflectionGenericParam *gparam, guint32 owner, MonoDynamicImage *assembly) { MonoDynamicTable *table; guint32 num_constraints, i; guint32 *values; guint32 table_idx; table = &assembly->tables [MONO_TABLE_GENERICPARAMCONSTRAINT]; num_constraints = gparam->iface_constraints ? mono_array_length (gparam->iface_constraints) : 0; table->rows += num_constraints; if (gparam->base_type) table->rows++; alloc_table (table, table->rows); if (gparam->base_type) { table_idx = table->next_idx ++; values = table->values + table_idx * MONO_GENPARCONSTRAINT_SIZE; values [MONO_GENPARCONSTRAINT_GENERICPAR] = owner; values [MONO_GENPARCONSTRAINT_CONSTRAINT] = mono_image_typedef_or_ref ( assembly, mono_reflection_type_get_handle (gparam->base_type)); } for (i = 0; i < num_constraints; i++) { MonoReflectionType *constraint = mono_array_get ( gparam->iface_constraints, gpointer, i); table_idx = table->next_idx ++; values = table->values + table_idx * MONO_GENPARCONSTRAINT_SIZE; values [MONO_GENPARCONSTRAINT_GENERICPAR] = owner; values [MONO_GENPARCONSTRAINT_CONSTRAINT] = mono_image_typedef_or_ref ( assembly, mono_reflection_type_get_handle (constraint)); } } static void mono_image_get_generic_param_info (MonoReflectionGenericParam *gparam, guint32 owner, MonoDynamicImage *assembly) { GenericParamTableEntry *entry; /* * The GenericParam table must be sorted according to the `owner' field. * We need to do this sorting prior to writing the GenericParamConstraint * table, since we have to use the final GenericParam table indices there * and they must also be sorted. */ entry = g_new0 (GenericParamTableEntry, 1); entry->owner = owner; /* FIXME: track where gen_params should be freed and remove the GC root as well */ MOVING_GC_REGISTER (&entry->gparam); entry->gparam = gparam; g_ptr_array_add (assembly->gen_params, entry); } static void write_generic_param_entry (MonoDynamicImage *assembly, GenericParamTableEntry *entry) { MonoDynamicTable *table; MonoGenericParam *param; guint32 *values; guint32 table_idx; table = &assembly->tables [MONO_TABLE_GENERICPARAM]; table_idx = table->next_idx ++; values = table->values + table_idx * MONO_GENERICPARAM_SIZE; param = mono_reflection_type_get_handle ((MonoReflectionType*)entry->gparam)->data.generic_param; values [MONO_GENERICPARAM_OWNER] = entry->owner; values [MONO_GENERICPARAM_FLAGS] = entry->gparam->attrs; values [MONO_GENERICPARAM_NUMBER] = mono_generic_param_num (param); values [MONO_GENERICPARAM_NAME] = string_heap_insert (&assembly->sheap, mono_generic_param_info (param)->name); mono_image_add_cattrs (assembly, table_idx, MONO_CUSTOM_ATTR_GENERICPAR, entry->gparam->cattrs); encode_constraints (entry->gparam, table_idx, assembly); } static guint32 resolution_scope_from_image (MonoDynamicImage *assembly, MonoImage *image) { MonoDynamicTable *table; guint32 token; guint32 *values; guint32 cols [MONO_ASSEMBLY_SIZE]; const char *pubkey; guint32 publen; if ((token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->handleref, image)))) return token; if (image->assembly->dynamic && (image->assembly == assembly->image.assembly)) { table = &assembly->tables [MONO_TABLE_MODULEREF]; token = table->next_idx ++; table->rows ++; alloc_table (table, table->rows); values = table->values + token * MONO_MODULEREF_SIZE; values [MONO_MODULEREF_NAME] = string_heap_insert (&assembly->sheap, image->module_name); token <<= MONO_RESOLTION_SCOPE_BITS; token |= MONO_RESOLTION_SCOPE_MODULEREF; g_hash_table_insert (assembly->handleref, image, GUINT_TO_POINTER (token)); return token; } if (image->assembly->dynamic) /* FIXME: */ memset (cols, 0, sizeof (cols)); else { /* image->assembly->image is the manifest module */ image = image->assembly->image; mono_metadata_decode_row (&image->tables [MONO_TABLE_ASSEMBLY], 0, cols, MONO_ASSEMBLY_SIZE); } table = &assembly->tables [MONO_TABLE_ASSEMBLYREF]; token = table->next_idx ++; table->rows ++; alloc_table (table, table->rows); values = table->values + token * MONO_ASSEMBLYREF_SIZE; values [MONO_ASSEMBLYREF_NAME] = string_heap_insert (&assembly->sheap, image->assembly_name); values [MONO_ASSEMBLYREF_MAJOR_VERSION] = cols [MONO_ASSEMBLY_MAJOR_VERSION]; values [MONO_ASSEMBLYREF_MINOR_VERSION] = cols [MONO_ASSEMBLY_MINOR_VERSION]; values [MONO_ASSEMBLYREF_BUILD_NUMBER] = cols [MONO_ASSEMBLY_BUILD_NUMBER]; values [MONO_ASSEMBLYREF_REV_NUMBER] = cols [MONO_ASSEMBLY_REV_NUMBER]; values [MONO_ASSEMBLYREF_FLAGS] = 0; values [MONO_ASSEMBLYREF_CULTURE] = 0; values [MONO_ASSEMBLYREF_HASH_VALUE] = 0; if (strcmp ("", image->assembly->aname.culture)) { values [MONO_ASSEMBLYREF_CULTURE] = string_heap_insert (&assembly->sheap, image->assembly->aname.culture); } if ((pubkey = mono_image_get_public_key (image, &publen))) { guchar pubtoken [9]; pubtoken [0] = 8; mono_digest_get_public_token (pubtoken + 1, (guchar*)pubkey, publen); values [MONO_ASSEMBLYREF_PUBLIC_KEY] = mono_image_add_stream_data (&assembly->blob, (char*)pubtoken, 9); } else { values [MONO_ASSEMBLYREF_PUBLIC_KEY] = 0; } token <<= MONO_RESOLTION_SCOPE_BITS; token |= MONO_RESOLTION_SCOPE_ASSEMBLYREF; g_hash_table_insert (assembly->handleref, image, GUINT_TO_POINTER (token)); return token; } static guint32 create_typespec (MonoDynamicImage *assembly, MonoType *type) { MonoDynamicTable *table; guint32 *values; guint32 token; SigBuffer buf; if ((token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->typespec, type)))) return token; sigbuffer_init (&buf, 32); switch (type->type) { case MONO_TYPE_FNPTR: case MONO_TYPE_PTR: case MONO_TYPE_SZARRAY: case MONO_TYPE_ARRAY: case MONO_TYPE_VAR: case MONO_TYPE_MVAR: case MONO_TYPE_GENERICINST: encode_type (assembly, type, &buf); break; case MONO_TYPE_CLASS: case MONO_TYPE_VALUETYPE: { MonoClass *k = mono_class_from_mono_type (type); if (!k || !k->generic_container) { sigbuffer_free (&buf); return 0; } encode_type (assembly, type, &buf); break; } default: sigbuffer_free (&buf); return 0; } table = &assembly->tables [MONO_TABLE_TYPESPEC]; if (assembly->save) { token = sigbuffer_add_to_blob_cached (assembly, &buf); alloc_table (table, table->rows + 1); values = table->values + table->next_idx * MONO_TYPESPEC_SIZE; values [MONO_TYPESPEC_SIGNATURE] = token; } sigbuffer_free (&buf); token = MONO_TYPEDEFORREF_TYPESPEC | (table->next_idx << MONO_TYPEDEFORREF_BITS); g_hash_table_insert (assembly->typespec, type, GUINT_TO_POINTER(token)); table->next_idx ++; return token; } static guint32 mono_image_typedef_or_ref_full (MonoDynamicImage *assembly, MonoType *type, gboolean try_typespec) { MonoDynamicTable *table; guint32 *values; guint32 token, scope, enclosing; MonoClass *klass; /* if the type requires a typespec, we must try that first*/ if (try_typespec && (token = create_typespec (assembly, type))) return token; token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->typeref, type)); if (token) return token; klass = mono_class_from_mono_type (type); if (!klass) klass = mono_class_from_mono_type (type); /* * If it's in the same module and not a generic type parameter: */ if ((klass->image == &assembly->image) && (type->type != MONO_TYPE_VAR) && (type->type != MONO_TYPE_MVAR)) { MonoReflectionTypeBuilder *tb = mono_class_get_ref_info (klass); token = MONO_TYPEDEFORREF_TYPEDEF | (tb->table_idx << MONO_TYPEDEFORREF_BITS); mono_g_hash_table_insert (assembly->tokens, GUINT_TO_POINTER (token), mono_class_get_ref_info (klass)); return token; } if (klass->nested_in) { enclosing = mono_image_typedef_or_ref_full (assembly, &klass->nested_in->byval_arg, FALSE); /* get the typeref idx of the enclosing type */ enclosing >>= MONO_TYPEDEFORREF_BITS; scope = (enclosing << MONO_RESOLTION_SCOPE_BITS) | MONO_RESOLTION_SCOPE_TYPEREF; } else { scope = resolution_scope_from_image (assembly, klass->image); } table = &assembly->tables [MONO_TABLE_TYPEREF]; if (assembly->save) { alloc_table (table, table->rows + 1); values = table->values + table->next_idx * MONO_TYPEREF_SIZE; values [MONO_TYPEREF_SCOPE] = scope; values [MONO_TYPEREF_NAME] = string_heap_insert (&assembly->sheap, klass->name); values [MONO_TYPEREF_NAMESPACE] = string_heap_insert (&assembly->sheap, klass->name_space); } token = MONO_TYPEDEFORREF_TYPEREF | (table->next_idx << MONO_TYPEDEFORREF_BITS); /* typeref */ g_hash_table_insert (assembly->typeref, type, GUINT_TO_POINTER(token)); table->next_idx ++; mono_g_hash_table_insert (assembly->tokens, GUINT_TO_POINTER (token), mono_class_get_ref_info (klass)); return token; } /* * Despite the name, we handle also TypeSpec (with the above helper). */ static guint32 mono_image_typedef_or_ref (MonoDynamicImage *assembly, MonoType *type) { return mono_image_typedef_or_ref_full (assembly, type, TRUE); } #ifndef DISABLE_REFLECTION_EMIT static guint32 mono_image_add_memberef_row (MonoDynamicImage *assembly, guint32 parent, const char *name, guint32 sig) { MonoDynamicTable *table; guint32 *values; guint32 token, pclass; switch (parent & MONO_TYPEDEFORREF_MASK) { case MONO_TYPEDEFORREF_TYPEREF: pclass = MONO_MEMBERREF_PARENT_TYPEREF; break; case MONO_TYPEDEFORREF_TYPESPEC: pclass = MONO_MEMBERREF_PARENT_TYPESPEC; break; case MONO_TYPEDEFORREF_TYPEDEF: pclass = MONO_MEMBERREF_PARENT_TYPEDEF; break; default: g_warning ("unknown typeref or def token 0x%08x for %s", parent, name); return 0; } /* extract the index */ parent >>= MONO_TYPEDEFORREF_BITS; table = &assembly->tables [MONO_TABLE_MEMBERREF]; if (assembly->save) { alloc_table (table, table->rows + 1); values = table->values + table->next_idx * MONO_MEMBERREF_SIZE; values [MONO_MEMBERREF_CLASS] = pclass | (parent << MONO_MEMBERREF_PARENT_BITS); values [MONO_MEMBERREF_NAME] = string_heap_insert (&assembly->sheap, name); values [MONO_MEMBERREF_SIGNATURE] = sig; } token = MONO_TOKEN_MEMBER_REF | table->next_idx; table->next_idx ++; return token; } /* * Insert a memberef row into the metadata: the token that point to the memberref * is returned. Caching is done in the caller (mono_image_get_methodref_token() or * mono_image_get_fieldref_token()). * The sig param is an index to an already built signature. */ static guint32 mono_image_get_memberref_token (MonoDynamicImage *assembly, MonoType *type, const char *name, guint32 sig) { guint32 parent = mono_image_typedef_or_ref (assembly, type); return mono_image_add_memberef_row (assembly, parent, name, sig); } static guint32 mono_image_get_methodref_token (MonoDynamicImage *assembly, MonoMethod *method, gboolean create_typespec) { guint32 token; MonoMethodSignature *sig; create_typespec = create_typespec && method->is_generic && method->klass->image != &assembly->image; if (create_typespec) { token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->handleref, GUINT_TO_POINTER (GPOINTER_TO_UINT (method) + 1))); if (token) return token; } token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->handleref, method)); if (token && !create_typespec) return token; g_assert (!method->is_inflated); if (!token) { /* * A methodref signature can't contain an unmanaged calling convention. */ sig = mono_metadata_signature_dup (mono_method_signature (method)); if ((sig->call_convention != MONO_CALL_DEFAULT) && (sig->call_convention != MONO_CALL_VARARG)) sig->call_convention = MONO_CALL_DEFAULT; token = mono_image_get_memberref_token (assembly, &method->klass->byval_arg, method->name, method_encode_signature (assembly, sig)); g_free (sig); g_hash_table_insert (assembly->handleref, method, GUINT_TO_POINTER(token)); } if (create_typespec) { MonoDynamicTable *table = &assembly->tables [MONO_TABLE_METHODSPEC]; g_assert (mono_metadata_token_table (token) == MONO_TABLE_MEMBERREF); token = (mono_metadata_token_index (token) << MONO_METHODDEFORREF_BITS) | MONO_METHODDEFORREF_METHODREF; if (assembly->save) { guint32 *values; alloc_table (table, table->rows + 1); values = table->values + table->next_idx * MONO_METHODSPEC_SIZE; values [MONO_METHODSPEC_METHOD] = token; values [MONO_METHODSPEC_SIGNATURE] = encode_generic_method_sig (assembly, &mono_method_get_generic_container (method)->context); } token = MONO_TOKEN_METHOD_SPEC | table->next_idx; table->next_idx ++; /*methodspec and memberef tokens are diferent, */ g_hash_table_insert (assembly->handleref, GUINT_TO_POINTER (GPOINTER_TO_UINT (method) + 1), GUINT_TO_POINTER (token)); return token; } return token; } static guint32 mono_image_get_methodref_token_for_methodbuilder (MonoDynamicImage *assembly, MonoReflectionMethodBuilder *method) { guint32 token, parent, sig; ReflectionMethodBuilder rmb; char *name; MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder *)method->type; token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->handleref, method)); if (token) return token; name = mono_string_to_utf8 (method->name); reflection_methodbuilder_from_method_builder (&rmb, method); /* * A methodref signature can't contain an unmanaged calling convention. * Since some flags are encoded as part of call_conv, we need to check against it. */ if ((rmb.call_conv & ~0x60) != MONO_CALL_DEFAULT && (rmb.call_conv & ~0x60) != MONO_CALL_VARARG) rmb.call_conv = (rmb.call_conv & 0x60) | MONO_CALL_DEFAULT; sig = method_builder_encode_signature (assembly, &rmb); if (tb->generic_params) parent = create_generic_typespec (assembly, tb); else parent = mono_image_typedef_or_ref (assembly, mono_reflection_type_get_handle ((MonoReflectionType*)rmb.type)); token = mono_image_add_memberef_row (assembly, parent, name, sig); g_free (name); g_hash_table_insert (assembly->handleref, method, GUINT_TO_POINTER(token)); return token; } static guint32 mono_image_get_varargs_method_token (MonoDynamicImage *assembly, guint32 original, const gchar *name, guint32 sig) { MonoDynamicTable *table; guint32 token; guint32 *values; table = &assembly->tables [MONO_TABLE_MEMBERREF]; if (assembly->save) { alloc_table (table, table->rows + 1); values = table->values + table->next_idx * MONO_MEMBERREF_SIZE; values [MONO_MEMBERREF_CLASS] = original; values [MONO_MEMBERREF_NAME] = string_heap_insert (&assembly->sheap, name); values [MONO_MEMBERREF_SIGNATURE] = sig; } token = MONO_TOKEN_MEMBER_REF | table->next_idx; table->next_idx ++; return token; } static guint32 encode_generic_method_definition_sig (MonoDynamicImage *assembly, MonoReflectionMethodBuilder *mb) { SigBuffer buf; int i; guint32 nparams = mono_array_length (mb->generic_params); guint32 idx; if (!assembly->save) return 0; sigbuffer_init (&buf, 32); sigbuffer_add_value (&buf, 0xa); sigbuffer_add_value (&buf, nparams); for (i = 0; i < nparams; i++) { sigbuffer_add_value (&buf, MONO_TYPE_MVAR); sigbuffer_add_value (&buf, i); } idx = sigbuffer_add_to_blob_cached (assembly, &buf); sigbuffer_free (&buf); return idx; } static guint32 mono_image_get_methodspec_token_for_generic_method_definition (MonoDynamicImage *assembly, MonoReflectionMethodBuilder *mb) { MonoDynamicTable *table; guint32 *values; guint32 token, mtoken = 0; token = GPOINTER_TO_UINT (mono_g_hash_table_lookup (assembly->methodspec, mb)); if (token) return token; table = &assembly->tables [MONO_TABLE_METHODSPEC]; mtoken = mono_image_get_methodref_token_for_methodbuilder (assembly, mb); switch (mono_metadata_token_table (mtoken)) { case MONO_TABLE_MEMBERREF: mtoken = (mono_metadata_token_index (mtoken) << MONO_METHODDEFORREF_BITS) | MONO_METHODDEFORREF_METHODREF; break; case MONO_TABLE_METHOD: mtoken = (mono_metadata_token_index (mtoken) << MONO_METHODDEFORREF_BITS) | MONO_METHODDEFORREF_METHODDEF; break; default: g_assert_not_reached (); } if (assembly->save) { alloc_table (table, table->rows + 1); values = table->values + table->next_idx * MONO_METHODSPEC_SIZE; values [MONO_METHODSPEC_METHOD] = mtoken; values [MONO_METHODSPEC_SIGNATURE] = encode_generic_method_definition_sig (assembly, mb); } token = MONO_TOKEN_METHOD_SPEC | table->next_idx; table->next_idx ++; mono_g_hash_table_insert (assembly->methodspec, mb, GUINT_TO_POINTER(token)); return token; } static guint32 mono_image_get_methodbuilder_token (MonoDynamicImage *assembly, MonoReflectionMethodBuilder *mb, gboolean create_methodspec) { guint32 token; if (mb->generic_params && create_methodspec) return mono_image_get_methodspec_token_for_generic_method_definition (assembly, mb); token = GPOINTER_TO_UINT (mono_g_hash_table_lookup (assembly->handleref_managed, mb)); if (token) return token; token = mono_image_get_methodref_token_for_methodbuilder (assembly, mb); mono_g_hash_table_insert (assembly->handleref_managed, mb, GUINT_TO_POINTER(token)); return token; } static guint32 mono_image_get_ctorbuilder_token (MonoDynamicImage *assembly, MonoReflectionCtorBuilder *mb) { guint32 token, parent, sig; ReflectionMethodBuilder rmb; char *name; MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder *)mb->type; token = GPOINTER_TO_UINT (mono_g_hash_table_lookup (assembly->handleref_managed, mb)); if (token) return token; g_assert (tb->generic_params); reflection_methodbuilder_from_ctor_builder (&rmb, mb); parent = create_generic_typespec (assembly, tb); name = mono_string_to_utf8 (rmb.name); sig = method_builder_encode_signature (assembly, &rmb); token = mono_image_add_memberef_row (assembly, parent, name, sig); g_free (name); mono_g_hash_table_insert (assembly->handleref_managed, mb, GUINT_TO_POINTER(token)); return token; } #endif static gboolean is_field_on_inst (MonoClassField *field) { return (field->parent->generic_class && field->parent->generic_class->is_dynamic && ((MonoDynamicGenericClass*)field->parent->generic_class)->fields); } /* * If FIELD is a field of a MonoDynamicGenericClass, return its non-inflated type. */ static MonoType* get_field_on_inst_generic_type (MonoClassField *field) { MonoClass *class, *gtd; MonoDynamicGenericClass *dgclass; int field_index; g_assert (is_field_on_inst (field)); dgclass = (MonoDynamicGenericClass*)field->parent->generic_class; if (field >= dgclass->fields && field - dgclass->fields < dgclass->count_fields) { field_index = field - dgclass->fields; return dgclass->field_generic_types [field_index]; } class = field->parent; gtd = class->generic_class->container_class; if (field >= class->fields && field - class->fields < class->field.count) { field_index = field - class->fields; return gtd->fields [field_index].type; } g_assert_not_reached (); return 0; } #ifndef DISABLE_REFLECTION_EMIT static guint32 mono_image_get_fieldref_token (MonoDynamicImage *assembly, MonoObject *f, MonoClassField *field) { MonoType *type; guint32 token; g_assert (field); g_assert (field->parent); token = GPOINTER_TO_UINT (mono_g_hash_table_lookup (assembly->handleref_managed, f)); if (token) return token; if (field->parent->generic_class && field->parent->generic_class->container_class && field->parent->generic_class->container_class->fields) { int index = field - field->parent->fields; type = field->parent->generic_class->container_class->fields [index].type; } else { if (is_field_on_inst (field)) type = get_field_on_inst_generic_type (field); else type = field->type; } token = mono_image_get_memberref_token (assembly, &field->parent->byval_arg, mono_field_get_name (field), fieldref_encode_signature (assembly, field->parent->image, type)); mono_g_hash_table_insert (assembly->handleref_managed, f, GUINT_TO_POINTER(token)); return token; } static guint32 mono_image_get_field_on_inst_token (MonoDynamicImage *assembly, MonoReflectionFieldOnTypeBuilderInst *f) { guint32 token; MonoClass *klass; MonoGenericClass *gclass; MonoDynamicGenericClass *dgclass; MonoType *type; char *name; token = GPOINTER_TO_UINT (mono_g_hash_table_lookup (assembly->handleref_managed, f)); if (token) return token; if (is_sre_field_builder (mono_object_class (f->fb))) { MonoReflectionFieldBuilder *fb = (MonoReflectionFieldBuilder *)f->fb; type = mono_reflection_type_get_handle ((MonoReflectionType*)f->inst); klass = mono_class_from_mono_type (type); gclass = type->data.generic_class; g_assert (gclass->is_dynamic); dgclass = (MonoDynamicGenericClass *) gclass; name = mono_string_to_utf8 (fb->name); token = mono_image_get_memberref_token (assembly, &klass->byval_arg, name, field_encode_signature (assembly, fb)); g_free (name); } else if (is_sr_mono_field (mono_object_class (f->fb))) { guint32 sig; MonoClassField *field = ((MonoReflectionField *)f->fb)->field; type = mono_reflection_type_get_handle ((MonoReflectionType*)f->inst); klass = mono_class_from_mono_type (type); sig = fieldref_encode_signature (assembly, field->parent->image, field->type); token = mono_image_get_memberref_token (assembly, &klass->byval_arg, field->name, sig); } else { char *name = mono_type_get_full_name (mono_object_class (f->fb)); g_error ("mono_image_get_field_on_inst_token: don't know how to handle %s", name); } mono_g_hash_table_insert (assembly->handleref_managed, f, GUINT_TO_POINTER (token)); return token; } static guint32 mono_image_get_ctor_on_inst_token (MonoDynamicImage *assembly, MonoReflectionCtorOnTypeBuilderInst *c, gboolean create_methodspec) { guint32 sig, token; MonoClass *klass; MonoGenericClass *gclass; MonoType *type; /* A ctor cannot be a generic method, so we can ignore create_methodspec */ token = GPOINTER_TO_UINT (mono_g_hash_table_lookup (assembly->handleref_managed, c)); if (token) return token; if (is_sre_ctor_builder (mono_object_class (c->cb))) { MonoReflectionCtorBuilder *cb = (MonoReflectionCtorBuilder *)c->cb; MonoDynamicGenericClass *dgclass; ReflectionMethodBuilder rmb; char *name; type = mono_reflection_type_get_handle ((MonoReflectionType*)c->inst); klass = mono_class_from_mono_type (type); gclass = type->data.generic_class; g_assert (gclass->is_dynamic); dgclass = (MonoDynamicGenericClass *) gclass; reflection_methodbuilder_from_ctor_builder (&rmb, cb); name = mono_string_to_utf8 (rmb.name); sig = method_builder_encode_signature (assembly, &rmb); token = mono_image_get_memberref_token (assembly, &klass->byval_arg, name, sig); g_free (name); } else if (is_sr_mono_cmethod (mono_object_class (c->cb))) { MonoMethod *mm = ((MonoReflectionMethod *)c->cb)->method; type = mono_reflection_type_get_handle ((MonoReflectionType*)c->inst); klass = mono_class_from_mono_type (type); sig = method_encode_signature (assembly, mono_method_signature (mm)); token = mono_image_get_memberref_token (assembly, &klass->byval_arg, mm->name, sig); } else { char *name = mono_type_get_full_name (mono_object_class (c->cb)); g_error ("mono_image_get_method_on_inst_token: don't know how to handle %s", name); } mono_g_hash_table_insert (assembly->handleref_managed, c, GUINT_TO_POINTER (token)); return token; } static MonoMethod* mono_reflection_method_on_tb_inst_get_handle (MonoReflectionMethodOnTypeBuilderInst *m) { MonoClass *klass; MonoGenericContext tmp_context; MonoType **type_argv; MonoGenericInst *ginst; MonoMethod *method, *inflated; int count, i; init_type_builder_generics ((MonoObject*)m->inst); method = inflate_method (m->inst, (MonoObject*)m->mb); klass = method->klass; if (m->method_args == NULL) return method; if (method->is_inflated) method = ((MonoMethodInflated *) method)->declaring; count = mono_array_length (m->method_args); type_argv = g_new0 (MonoType *, count); for (i = 0; i < count; i++) { MonoReflectionType *garg = mono_array_get (m->method_args, gpointer, i); type_argv [i] = mono_reflection_type_get_handle (garg); } ginst = mono_metadata_get_generic_inst (count, type_argv); g_free (type_argv); tmp_context.class_inst = klass->generic_class ? klass->generic_class->context.class_inst : NULL; tmp_context.method_inst = ginst; inflated = mono_class_inflate_generic_method (method, &tmp_context); return inflated; } static guint32 mono_image_get_method_on_inst_token (MonoDynamicImage *assembly, MonoReflectionMethodOnTypeBuilderInst *m, gboolean create_methodspec) { guint32 sig, token = 0; MonoType *type; MonoClass *klass; if (m->method_args) { MonoMethod *inflated; inflated = mono_reflection_method_on_tb_inst_get_handle (m); if (create_methodspec) token = mono_image_get_methodspec_token (assembly, inflated); else token = mono_image_get_inflated_method_token (assembly, inflated); return token; } token = GPOINTER_TO_UINT (mono_g_hash_table_lookup (assembly->handleref_managed, m)); if (token) return token; if (is_sre_method_builder (mono_object_class (m->mb))) { MonoReflectionMethodBuilder *mb = (MonoReflectionMethodBuilder *)m->mb; MonoGenericClass *gclass; ReflectionMethodBuilder rmb; char *name; type = mono_reflection_type_get_handle ((MonoReflectionType*)m->inst); klass = mono_class_from_mono_type (type); gclass = type->data.generic_class; g_assert (gclass->is_dynamic); reflection_methodbuilder_from_method_builder (&rmb, mb); name = mono_string_to_utf8 (rmb.name); sig = method_builder_encode_signature (assembly, &rmb); token = mono_image_get_memberref_token (assembly, &klass->byval_arg, name, sig); g_free (name); } else if (is_sr_mono_method (mono_object_class (m->mb))) { MonoMethod *mm = ((MonoReflectionMethod *)m->mb)->method; type = mono_reflection_type_get_handle ((MonoReflectionType*)m->inst); klass = mono_class_from_mono_type (type); sig = method_encode_signature (assembly, mono_method_signature (mm)); token = mono_image_get_memberref_token (assembly, &klass->byval_arg, mm->name, sig); } else { char *name = mono_type_get_full_name (mono_object_class (m->mb)); g_error ("mono_image_get_method_on_inst_token: don't know how to handle %s", name); } mono_g_hash_table_insert (assembly->handleref_managed, m, GUINT_TO_POINTER (token)); return token; } static guint32 encode_generic_method_sig (MonoDynamicImage *assembly, MonoGenericContext *context) { SigBuffer buf; int i; guint32 nparams = context->method_inst->type_argc; guint32 idx; if (!assembly->save) return 0; sigbuffer_init (&buf, 32); /* * FIXME: vararg, explicit_this, differenc call_conv values... */ sigbuffer_add_value (&buf, 0xa); /* FIXME FIXME FIXME */ sigbuffer_add_value (&buf, nparams); for (i = 0; i < nparams; i++) encode_type (assembly, context->method_inst->type_argv [i], &buf); idx = sigbuffer_add_to_blob_cached (assembly, &buf); sigbuffer_free (&buf); return idx; } static guint32 method_encode_methodspec (MonoDynamicImage *assembly, MonoMethod *method) { MonoDynamicTable *table; guint32 *values; guint32 token, mtoken = 0, sig; MonoMethodInflated *imethod; MonoMethod *declaring; table = &assembly->tables [MONO_TABLE_METHODSPEC]; g_assert (method->is_inflated); imethod = (MonoMethodInflated *) method; declaring = imethod->declaring; sig = method_encode_signature (assembly, mono_method_signature (declaring)); mtoken = mono_image_get_memberref_token (assembly, &method->klass->byval_arg, declaring->name, sig); if (!mono_method_signature (declaring)->generic_param_count) return mtoken; switch (mono_metadata_token_table (mtoken)) { case MONO_TABLE_MEMBERREF: mtoken = (mono_metadata_token_index (mtoken) << MONO_METHODDEFORREF_BITS) | MONO_METHODDEFORREF_METHODREF; break; case MONO_TABLE_METHOD: mtoken = (mono_metadata_token_index (mtoken) << MONO_METHODDEFORREF_BITS) | MONO_METHODDEFORREF_METHODDEF; break; default: g_assert_not_reached (); } sig = encode_generic_method_sig (assembly, mono_method_get_context (method)); if (assembly->save) { alloc_table (table, table->rows + 1); values = table->values + table->next_idx * MONO_METHODSPEC_SIZE; values [MONO_METHODSPEC_METHOD] = mtoken; values [MONO_METHODSPEC_SIGNATURE] = sig; } token = MONO_TOKEN_METHOD_SPEC | table->next_idx; table->next_idx ++; return token; } static guint32 mono_image_get_methodspec_token (MonoDynamicImage *assembly, MonoMethod *method) { MonoMethodInflated *imethod; guint32 token; token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->handleref, method)); if (token) return token; g_assert (method->is_inflated); imethod = (MonoMethodInflated *) method; if (mono_method_signature (imethod->declaring)->generic_param_count) { token = method_encode_methodspec (assembly, method); } else { guint32 sig = method_encode_signature ( assembly, mono_method_signature (imethod->declaring)); token = mono_image_get_memberref_token ( assembly, &method->klass->byval_arg, method->name, sig); } g_hash_table_insert (assembly->handleref, method, GUINT_TO_POINTER(token)); return token; } static guint32 mono_image_get_inflated_method_token (MonoDynamicImage *assembly, MonoMethod *m) { MonoMethodInflated *imethod = (MonoMethodInflated *) m; guint32 sig, token; sig = method_encode_signature (assembly, mono_method_signature (imethod->declaring)); token = mono_image_get_memberref_token ( assembly, &m->klass->byval_arg, m->name, sig); return token; } static guint32 create_generic_typespec (MonoDynamicImage *assembly, MonoReflectionTypeBuilder *tb) { MonoDynamicTable *table; MonoClass *klass; MonoType *type; guint32 *values; guint32 token; SigBuffer buf; int count, i; /* * We're creating a TypeSpec for the TypeBuilder of a generic type declaration, * ie. what we'd normally use as the generic type in a TypeSpec signature. * Because of this, we must not insert it into the `typeref' hash table. */ type = mono_reflection_type_get_handle ((MonoReflectionType*)tb); token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->typespec, type)); if (token) return token; sigbuffer_init (&buf, 32); g_assert (tb->generic_params); klass = mono_class_from_mono_type (type); if (tb->generic_container) mono_reflection_create_generic_class (tb); sigbuffer_add_value (&buf, MONO_TYPE_GENERICINST); g_assert (klass->generic_container); sigbuffer_add_value (&buf, klass->byval_arg.type); sigbuffer_add_value (&buf, mono_image_typedef_or_ref_full (assembly, &klass->byval_arg, FALSE)); count = mono_array_length (tb->generic_params); sigbuffer_add_value (&buf, count); for (i = 0; i < count; i++) { MonoReflectionGenericParam *gparam; gparam = mono_array_get (tb->generic_params, MonoReflectionGenericParam *, i); encode_type (assembly, mono_reflection_type_get_handle ((MonoReflectionType*)gparam), &buf); } table = &assembly->tables [MONO_TABLE_TYPESPEC]; if (assembly->save) { token = sigbuffer_add_to_blob_cached (assembly, &buf); alloc_table (table, table->rows + 1); values = table->values + table->next_idx * MONO_TYPESPEC_SIZE; values [MONO_TYPESPEC_SIGNATURE] = token; } sigbuffer_free (&buf); token = MONO_TYPEDEFORREF_TYPESPEC | (table->next_idx << MONO_TYPEDEFORREF_BITS); g_hash_table_insert (assembly->typespec, type, GUINT_TO_POINTER(token)); table->next_idx ++; return token; } /* * Return a copy of TYPE, adding the custom modifiers in MODREQ and MODOPT. */ static MonoType* add_custom_modifiers (MonoDynamicImage *assembly, MonoType *type, MonoArray *modreq, MonoArray *modopt) { int i, count, len, pos; MonoType *t; count = 0; if (modreq) count += mono_array_length (modreq); if (modopt) count += mono_array_length (modopt); if (count == 0) return mono_metadata_type_dup (NULL, type); len = MONO_SIZEOF_TYPE + ((gint32)count) * sizeof (MonoCustomMod); t = g_malloc (len); memcpy (t, type, MONO_SIZEOF_TYPE); t->num_mods = count; pos = 0; if (modreq) { for (i = 0; i < mono_array_length (modreq); ++i) { MonoType *mod = mono_type_array_get_and_resolve (modreq, i); t->modifiers [pos].required = 1; t->modifiers [pos].token = mono_image_typedef_or_ref (assembly, mod); pos ++; } } if (modopt) { for (i = 0; i < mono_array_length (modopt); ++i) { MonoType *mod = mono_type_array_get_and_resolve (modopt, i); t->modifiers [pos].required = 0; t->modifiers [pos].token = mono_image_typedef_or_ref (assembly, mod); pos ++; } } return t; } static void init_type_builder_generics (MonoObject *type) { MonoReflectionTypeBuilder *tb; if (!is_sre_type_builder(mono_object_class (type))) return; tb = (MonoReflectionTypeBuilder *)type; if (tb && tb->generic_container) mono_reflection_create_generic_class (tb); } static guint32 mono_image_get_generic_field_token (MonoDynamicImage *assembly, MonoReflectionFieldBuilder *fb) { MonoDynamicTable *table; MonoClass *klass; MonoType *custom = NULL, *type; guint32 *values; guint32 token, pclass, parent, sig; gchar *name; token = GPOINTER_TO_UINT (mono_g_hash_table_lookup (assembly->handleref_managed, fb)); if (token) return token; klass = mono_class_from_mono_type (mono_reflection_type_get_handle (fb->typeb)); name = mono_string_to_utf8 (fb->name); /*FIXME this is one more layer of ugliness due how types are created.*/ init_type_builder_generics (fb->type); /* fb->type does not include the custom modifiers */ /* FIXME: We should do this in one place when a fieldbuilder is created */ type = mono_reflection_type_get_handle ((MonoReflectionType*)fb->type); if (fb->modreq || fb->modopt) type = custom = add_custom_modifiers (assembly, type, fb->modreq, fb->modopt); sig = fieldref_encode_signature (assembly, NULL, type); g_free (custom); parent = create_generic_typespec (assembly, (MonoReflectionTypeBuilder *) fb->typeb); g_assert ((parent & MONO_TYPEDEFORREF_MASK) == MONO_TYPEDEFORREF_TYPESPEC); pclass = MONO_MEMBERREF_PARENT_TYPESPEC; parent >>= MONO_TYPEDEFORREF_BITS; table = &assembly->tables [MONO_TABLE_MEMBERREF]; if (assembly->save) { alloc_table (table, table->rows + 1); values = table->values + table->next_idx * MONO_MEMBERREF_SIZE; values [MONO_MEMBERREF_CLASS] = pclass | (parent << MONO_MEMBERREF_PARENT_BITS); values [MONO_MEMBERREF_NAME] = string_heap_insert (&assembly->sheap, name); values [MONO_MEMBERREF_SIGNATURE] = sig; } token = MONO_TOKEN_MEMBER_REF | table->next_idx; table->next_idx ++; mono_g_hash_table_insert (assembly->handleref_managed, fb, GUINT_TO_POINTER(token)); g_free (name); return token; } static guint32 mono_reflection_encode_sighelper (MonoDynamicImage *assembly, MonoReflectionSigHelper *helper) { SigBuffer buf; guint32 nargs; guint32 size; guint32 i, idx; if (!assembly->save) return 0; /* FIXME: this means SignatureHelper.SignatureHelpType.HELPER_METHOD */ g_assert (helper->type == 2); if (helper->arguments) nargs = mono_array_length (helper->arguments); else nargs = 0; size = 10 + (nargs * 10); sigbuffer_init (&buf, 32); /* Encode calling convention */ /* Change Any to Standard */ if ((helper->call_conv & 0x03) == 0x03) helper->call_conv = 0x01; /* explicit_this implies has_this */ if (helper->call_conv & 0x40) helper->call_conv &= 0x20; if (helper->call_conv == 0) { /* Unmanaged */ idx = helper->unmanaged_call_conv - 1; } else { /* Managed */ idx = helper->call_conv & 0x60; /* has_this + explicit_this */ if (helper->call_conv & 0x02) /* varargs */ idx += 0x05; } sigbuffer_add_byte (&buf, idx); sigbuffer_add_value (&buf, nargs); encode_reflection_type (assembly, helper->return_type, &buf); for (i = 0; i < nargs; ++i) { MonoArray *modreqs = NULL; MonoArray *modopts = NULL; MonoReflectionType *pt; if (helper->modreqs && (i < mono_array_length (helper->modreqs))) modreqs = mono_array_get (helper->modreqs, MonoArray*, i); if (helper->modopts && (i < mono_array_length (helper->modopts))) modopts = mono_array_get (helper->modopts, MonoArray*, i); encode_custom_modifiers (assembly, modreqs, modopts, &buf); pt = mono_array_get (helper->arguments, MonoReflectionType*, i); encode_reflection_type (assembly, pt, &buf); } idx = sigbuffer_add_to_blob_cached (assembly, &buf); sigbuffer_free (&buf); return idx; } static guint32 mono_image_get_sighelper_token (MonoDynamicImage *assembly, MonoReflectionSigHelper *helper) { guint32 idx; MonoDynamicTable *table; guint32 *values; table = &assembly->tables [MONO_TABLE_STANDALONESIG]; idx = table->next_idx ++; table->rows ++; alloc_table (table, table->rows); values = table->values + idx * MONO_STAND_ALONE_SIGNATURE_SIZE; values [MONO_STAND_ALONE_SIGNATURE] = mono_reflection_encode_sighelper (assembly, helper); return idx; } static int reflection_cc_to_file (int call_conv) { switch (call_conv & 0x3) { case 0: case 1: return MONO_CALL_DEFAULT; case 2: return MONO_CALL_VARARG; default: g_assert_not_reached (); } return 0; } #endif /* !DISABLE_REFLECTION_EMIT */ typedef struct { MonoType *parent; MonoMethodSignature *sig; char *name; guint32 token; } ArrayMethod; #ifndef DISABLE_REFLECTION_EMIT static guint32 mono_image_get_array_token (MonoDynamicImage *assembly, MonoReflectionArrayMethod *m) { guint32 nparams, i; GList *tmp; char *name; MonoMethodSignature *sig; ArrayMethod *am; MonoType *mtype; name = mono_string_to_utf8 (m->name); nparams = mono_array_length (m->parameters); sig = g_malloc0 (MONO_SIZEOF_METHOD_SIGNATURE + sizeof (MonoType*) * nparams); sig->hasthis = 1; sig->sentinelpos = -1; sig->call_convention = reflection_cc_to_file (m->call_conv); sig->param_count = nparams; sig->ret = m->ret ? mono_reflection_type_get_handle (m->ret): &mono_defaults.void_class->byval_arg; mtype = mono_reflection_type_get_handle (m->parent); for (i = 0; i < nparams; ++i) sig->params [i] = mono_type_array_get_and_resolve (m->parameters, i); for (tmp = assembly->array_methods; tmp; tmp = tmp->next) { am = tmp->data; if (strcmp (name, am->name) == 0 && mono_metadata_type_equal (am->parent, mtype) && mono_metadata_signature_equal (am->sig, sig)) { g_free (name); g_free (sig); m->table_idx = am->token & 0xffffff; return am->token; } } am = g_new0 (ArrayMethod, 1); am->name = name; am->sig = sig; am->parent = mtype; am->token = mono_image_get_memberref_token (assembly, am->parent, name, method_encode_signature (assembly, sig)); assembly->array_methods = g_list_prepend (assembly->array_methods, am); m->table_idx = am->token & 0xffffff; return am->token; } /* * Insert into the metadata tables all the info about the TypeBuilder tb. * Data in the tables is inserted in a predefined order, since some tables need to be sorted. */ static void mono_image_get_type_info (MonoDomain *domain, MonoReflectionTypeBuilder *tb, MonoDynamicImage *assembly) { MonoDynamicTable *table; guint *values; int i, is_object = 0, is_system = 0; char *n; table = &assembly->tables [MONO_TABLE_TYPEDEF]; values = table->values + tb->table_idx * MONO_TYPEDEF_SIZE; values [MONO_TYPEDEF_FLAGS] = tb->attrs; n = mono_string_to_utf8 (tb->name); if (strcmp (n, "Object") == 0) is_object++; values [MONO_TYPEDEF_NAME] = string_heap_insert (&assembly->sheap, n); g_free (n); n = mono_string_to_utf8 (tb->nspace); if (strcmp (n, "System") == 0) is_system++; values [MONO_TYPEDEF_NAMESPACE] = string_heap_insert (&assembly->sheap, n); g_free (n); if (tb->parent && !(is_system && is_object) && !(tb->attrs & TYPE_ATTRIBUTE_INTERFACE)) { /* interfaces don't have a parent */ values [MONO_TYPEDEF_EXTENDS] = mono_image_typedef_or_ref (assembly, mono_reflection_type_get_handle ((MonoReflectionType*)tb->parent)); } else { values [MONO_TYPEDEF_EXTENDS] = 0; } values [MONO_TYPEDEF_FIELD_LIST] = assembly->tables [MONO_TABLE_FIELD].next_idx; values [MONO_TYPEDEF_METHOD_LIST] = assembly->tables [MONO_TABLE_METHOD].next_idx; /* * if we have explicitlayout or sequentiallayouts, output data in the * ClassLayout table. */ if (((tb->attrs & TYPE_ATTRIBUTE_LAYOUT_MASK) != TYPE_ATTRIBUTE_AUTO_LAYOUT) && ((tb->class_size > 0) || (tb->packing_size > 0))) { table = &assembly->tables [MONO_TABLE_CLASSLAYOUT]; table->rows++; alloc_table (table, table->rows); values = table->values + table->rows * MONO_CLASS_LAYOUT_SIZE; values [MONO_CLASS_LAYOUT_PARENT] = tb->table_idx; values [MONO_CLASS_LAYOUT_CLASS_SIZE] = tb->class_size; values [MONO_CLASS_LAYOUT_PACKING_SIZE] = tb->packing_size; } /* handle interfaces */ if (tb->interfaces) { table = &assembly->tables [MONO_TABLE_INTERFACEIMPL]; i = table->rows; table->rows += mono_array_length (tb->interfaces); alloc_table (table, table->rows); values = table->values + (i + 1) * MONO_INTERFACEIMPL_SIZE; for (i = 0; i < mono_array_length (tb->interfaces); ++i) { MonoReflectionType* iface = (MonoReflectionType*) mono_array_get (tb->interfaces, gpointer, i); values [MONO_INTERFACEIMPL_CLASS] = tb->table_idx; values [MONO_INTERFACEIMPL_INTERFACE] = mono_image_typedef_or_ref (assembly, mono_reflection_type_get_handle (iface)); values += MONO_INTERFACEIMPL_SIZE; } } /* handle fields */ if (tb->fields) { table = &assembly->tables [MONO_TABLE_FIELD]; table->rows += tb->num_fields; alloc_table (table, table->rows); for (i = 0; i < tb->num_fields; ++i) mono_image_get_field_info ( mono_array_get (tb->fields, MonoReflectionFieldBuilder*, i), assembly); } /* handle constructors */ if (tb->ctors) { table = &assembly->tables [MONO_TABLE_METHOD]; table->rows += mono_array_length (tb->ctors); alloc_table (table, table->rows); for (i = 0; i < mono_array_length (tb->ctors); ++i) mono_image_get_ctor_info (domain, mono_array_get (tb->ctors, MonoReflectionCtorBuilder*, i), assembly); } /* handle methods */ if (tb->methods) { table = &assembly->tables [MONO_TABLE_METHOD]; table->rows += tb->num_methods; alloc_table (table, table->rows); for (i = 0; i < tb->num_methods; ++i) mono_image_get_method_info ( mono_array_get (tb->methods, MonoReflectionMethodBuilder*, i), assembly); } /* Do the same with properties etc.. */ if (tb->events && mono_array_length (tb->events)) { table = &assembly->tables [MONO_TABLE_EVENT]; table->rows += mono_array_length (tb->events); alloc_table (table, table->rows); table = &assembly->tables [MONO_TABLE_EVENTMAP]; table->rows ++; alloc_table (table, table->rows); values = table->values + table->rows * MONO_EVENT_MAP_SIZE; values [MONO_EVENT_MAP_PARENT] = tb->table_idx; values [MONO_EVENT_MAP_EVENTLIST] = assembly->tables [MONO_TABLE_EVENT].next_idx; for (i = 0; i < mono_array_length (tb->events); ++i) mono_image_get_event_info ( mono_array_get (tb->events, MonoReflectionEventBuilder*, i), assembly); } if (tb->properties && mono_array_length (tb->properties)) { table = &assembly->tables [MONO_TABLE_PROPERTY]; table->rows += mono_array_length (tb->properties); alloc_table (table, table->rows); table = &assembly->tables [MONO_TABLE_PROPERTYMAP]; table->rows ++; alloc_table (table, table->rows); values = table->values + table->rows * MONO_PROPERTY_MAP_SIZE; values [MONO_PROPERTY_MAP_PARENT] = tb->table_idx; values [MONO_PROPERTY_MAP_PROPERTY_LIST] = assembly->tables [MONO_TABLE_PROPERTY].next_idx; for (i = 0; i < mono_array_length (tb->properties); ++i) mono_image_get_property_info ( mono_array_get (tb->properties, MonoReflectionPropertyBuilder*, i), assembly); } /* handle generic parameters */ if (tb->generic_params) { table = &assembly->tables [MONO_TABLE_GENERICPARAM]; table->rows += mono_array_length (tb->generic_params); alloc_table (table, table->rows); for (i = 0; i < mono_array_length (tb->generic_params); ++i) { guint32 owner = MONO_TYPEORMETHOD_TYPE | (tb->table_idx << MONO_TYPEORMETHOD_BITS); mono_image_get_generic_param_info ( mono_array_get (tb->generic_params, MonoReflectionGenericParam*, i), owner, assembly); } } mono_image_add_decl_security (assembly, mono_metadata_make_token (MONO_TABLE_TYPEDEF, tb->table_idx), tb->permissions); if (tb->subtypes) { MonoDynamicTable *ntable; ntable = &assembly->tables [MONO_TABLE_NESTEDCLASS]; ntable->rows += mono_array_length (tb->subtypes); alloc_table (ntable, ntable->rows); values = ntable->values + ntable->next_idx * MONO_NESTED_CLASS_SIZE; for (i = 0; i < mono_array_length (tb->subtypes); ++i) { MonoReflectionTypeBuilder *subtype = mono_array_get (tb->subtypes, MonoReflectionTypeBuilder*, i); values [MONO_NESTED_CLASS_NESTED] = subtype->table_idx; values [MONO_NESTED_CLASS_ENCLOSING] = tb->table_idx; /*g_print ("nesting %s (%d) in %s (%d) (rows %d/%d)\n", mono_string_to_utf8 (subtype->name), subtype->table_idx, mono_string_to_utf8 (tb->name), tb->table_idx, ntable->next_idx, ntable->rows);*/ values += MONO_NESTED_CLASS_SIZE; ntable->next_idx++; } } } #endif static void collect_types (MonoPtrArray *types, MonoReflectionTypeBuilder *type) { int i; mono_ptr_array_append (*types, type); if (!type->subtypes) return; for (i = 0; i < mono_array_length (type->subtypes); ++i) { MonoReflectionTypeBuilder *subtype = mono_array_get (type->subtypes, MonoReflectionTypeBuilder*, i); collect_types (types, subtype); } } static gint compare_types_by_table_idx (MonoReflectionTypeBuilder **type1, MonoReflectionTypeBuilder **type2) { if ((*type1)->table_idx < (*type2)->table_idx) return -1; else if ((*type1)->table_idx > (*type2)->table_idx) return 1; else return 0; } static void params_add_cattrs (MonoDynamicImage *assembly, MonoArray *pinfo) { int i; if (!pinfo) return; for (i = 0; i < mono_array_length (pinfo); ++i) { MonoReflectionParamBuilder *pb; pb = mono_array_get (pinfo, MonoReflectionParamBuilder *, i); if (!pb) continue; mono_image_add_cattrs (assembly, pb->table_idx, MONO_CUSTOM_ATTR_PARAMDEF, pb->cattrs); } } static void type_add_cattrs (MonoDynamicImage *assembly, MonoReflectionTypeBuilder *tb) { int i; mono_image_add_cattrs (assembly, tb->table_idx, MONO_CUSTOM_ATTR_TYPEDEF, tb->cattrs); if (tb->fields) { for (i = 0; i < tb->num_fields; ++i) { MonoReflectionFieldBuilder* fb; fb = mono_array_get (tb->fields, MonoReflectionFieldBuilder*, i); mono_image_add_cattrs (assembly, fb->table_idx, MONO_CUSTOM_ATTR_FIELDDEF, fb->cattrs); } } if (tb->events) { for (i = 0; i < mono_array_length (tb->events); ++i) { MonoReflectionEventBuilder* eb; eb = mono_array_get (tb->events, MonoReflectionEventBuilder*, i); mono_image_add_cattrs (assembly, eb->table_idx, MONO_CUSTOM_ATTR_EVENT, eb->cattrs); } } if (tb->properties) { for (i = 0; i < mono_array_length (tb->properties); ++i) { MonoReflectionPropertyBuilder* pb; pb = mono_array_get (tb->properties, MonoReflectionPropertyBuilder*, i); mono_image_add_cattrs (assembly, pb->table_idx, MONO_CUSTOM_ATTR_PROPERTY, pb->cattrs); } } if (tb->ctors) { for (i = 0; i < mono_array_length (tb->ctors); ++i) { MonoReflectionCtorBuilder* cb; cb = mono_array_get (tb->ctors, MonoReflectionCtorBuilder*, i); mono_image_add_cattrs (assembly, cb->table_idx, MONO_CUSTOM_ATTR_METHODDEF, cb->cattrs); params_add_cattrs (assembly, cb->pinfo); } } if (tb->methods) { for (i = 0; i < tb->num_methods; ++i) { MonoReflectionMethodBuilder* mb; mb = mono_array_get (tb->methods, MonoReflectionMethodBuilder*, i); mono_image_add_cattrs (assembly, mb->table_idx, MONO_CUSTOM_ATTR_METHODDEF, mb->cattrs); params_add_cattrs (assembly, mb->pinfo); } } if (tb->subtypes) { for (i = 0; i < mono_array_length (tb->subtypes); ++i) type_add_cattrs (assembly, mono_array_get (tb->subtypes, MonoReflectionTypeBuilder*, i)); } } static void module_add_cattrs (MonoDynamicImage *assembly, MonoReflectionModuleBuilder *moduleb) { int i; mono_image_add_cattrs (assembly, moduleb->table_idx, MONO_CUSTOM_ATTR_MODULE, moduleb->cattrs); if (moduleb->global_methods) { for (i = 0; i < mono_array_length (moduleb->global_methods); ++i) { MonoReflectionMethodBuilder* mb = mono_array_get (moduleb->global_methods, MonoReflectionMethodBuilder*, i); mono_image_add_cattrs (assembly, mb->table_idx, MONO_CUSTOM_ATTR_METHODDEF, mb->cattrs); params_add_cattrs (assembly, mb->pinfo); } } if (moduleb->global_fields) { for (i = 0; i < mono_array_length (moduleb->global_fields); ++i) { MonoReflectionFieldBuilder *fb = mono_array_get (moduleb->global_fields, MonoReflectionFieldBuilder*, i); mono_image_add_cattrs (assembly, fb->table_idx, MONO_CUSTOM_ATTR_FIELDDEF, fb->cattrs); } } if (moduleb->types) { for (i = 0; i < moduleb->num_types; ++i) type_add_cattrs (assembly, mono_array_get (moduleb->types, MonoReflectionTypeBuilder*, i)); } } static void mono_image_fill_file_table (MonoDomain *domain, MonoReflectionModule *module, MonoDynamicImage *assembly) { MonoDynamicTable *table; guint32 *values; char blob_size [6]; guchar hash [20]; char *b = blob_size; char *dir, *path; table = &assembly->tables [MONO_TABLE_FILE]; table->rows++; alloc_table (table, table->rows); values = table->values + table->next_idx * MONO_FILE_SIZE; values [MONO_FILE_FLAGS] = FILE_CONTAINS_METADATA; values [MONO_FILE_NAME] = string_heap_insert (&assembly->sheap, module->image->module_name); if (module->image->dynamic) { /* This depends on the fact that the main module is emitted last */ dir = mono_string_to_utf8 (((MonoReflectionModuleBuilder*)module)->assemblyb->dir); path = g_strdup_printf ("%s%c%s", dir, G_DIR_SEPARATOR, module->image->module_name); } else { dir = NULL; path = g_strdup (module->image->name); } mono_sha1_get_digest_from_file (path, hash); g_free (dir); g_free (path); mono_metadata_encode_value (20, b, &b); values [MONO_FILE_HASH_VALUE] = mono_image_add_stream_data (&assembly->blob, blob_size, b-blob_size); mono_image_add_stream_data (&assembly->blob, (char*)hash, 20); table->next_idx ++; } static void mono_image_fill_module_table (MonoDomain *domain, MonoReflectionModuleBuilder *mb, MonoDynamicImage *assembly) { MonoDynamicTable *table; int i; table = &assembly->tables [MONO_TABLE_MODULE]; mb->table_idx = table->next_idx ++; table->values [mb->table_idx * MONO_MODULE_SIZE + MONO_MODULE_NAME] = string_heap_insert_mstring (&assembly->sheap, mb->module.name); i = mono_image_add_stream_data (&assembly->guid, mono_array_addr (mb->guid, char, 0), 16); i /= 16; ++i; table->values [mb->table_idx * MONO_MODULE_SIZE + MONO_MODULE_GENERATION] = 0; table->values [mb->table_idx * MONO_MODULE_SIZE + MONO_MODULE_MVID] = i; table->values [mb->table_idx * MONO_MODULE_SIZE + MONO_MODULE_ENC] = 0; table->values [mb->table_idx * MONO_MODULE_SIZE + MONO_MODULE_ENCBASE] = 0; } static guint32 mono_image_fill_export_table_from_class (MonoDomain *domain, MonoClass *klass, guint32 module_index, guint32 parent_index, MonoDynamicImage *assembly) { MonoDynamicTable *table; guint32 *values; guint32 visib, res; visib = klass->flags & TYPE_ATTRIBUTE_VISIBILITY_MASK; if (! ((visib & TYPE_ATTRIBUTE_PUBLIC) || (visib & TYPE_ATTRIBUTE_NESTED_PUBLIC))) return 0; table = &assembly->tables [MONO_TABLE_EXPORTEDTYPE]; table->rows++; alloc_table (table, table->rows); values = table->values + table->next_idx * MONO_EXP_TYPE_SIZE; values [MONO_EXP_TYPE_FLAGS] = klass->flags; values [MONO_EXP_TYPE_TYPEDEF] = klass->type_token; if (klass->nested_in) values [MONO_EXP_TYPE_IMPLEMENTATION] = (parent_index << MONO_IMPLEMENTATION_BITS) + MONO_IMPLEMENTATION_EXP_TYPE; else values [MONO_EXP_TYPE_IMPLEMENTATION] = (module_index << MONO_IMPLEMENTATION_BITS) + MONO_IMPLEMENTATION_FILE; values [MONO_EXP_TYPE_NAME] = string_heap_insert (&assembly->sheap, klass->name); values [MONO_EXP_TYPE_NAMESPACE] = string_heap_insert (&assembly->sheap, klass->name_space); res = table->next_idx; table->next_idx ++; /* Emit nested types */ if (klass->ext && klass->ext->nested_classes) { GList *tmp; for (tmp = klass->ext->nested_classes; tmp; tmp = tmp->next) mono_image_fill_export_table_from_class (domain, tmp->data, module_index, table->next_idx - 1, assembly); } return res; } static void mono_image_fill_export_table (MonoDomain *domain, MonoReflectionTypeBuilder *tb, guint32 module_index, guint32 parent_index, MonoDynamicImage *assembly) { MonoClass *klass; guint32 idx, i; klass = mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)tb)); klass->type_token = mono_metadata_make_token (MONO_TABLE_TYPEDEF, tb->table_idx); idx = mono_image_fill_export_table_from_class (domain, klass, module_index, parent_index, assembly); /* * Emit nested types * We need to do this ourselves since klass->nested_classes is not set up. */ if (tb->subtypes) { for (i = 0; i < mono_array_length (tb->subtypes); ++i) mono_image_fill_export_table (domain, mono_array_get (tb->subtypes, MonoReflectionTypeBuilder*, i), module_index, idx, assembly); } } static void mono_image_fill_export_table_from_module (MonoDomain *domain, MonoReflectionModule *module, guint32 module_index, MonoDynamicImage *assembly) { MonoImage *image = module->image; MonoTableInfo *t; guint32 i; t = &image->tables [MONO_TABLE_TYPEDEF]; for (i = 0; i < t->rows; ++i) { MonoClass *klass = mono_class_get (image, mono_metadata_make_token (MONO_TABLE_TYPEDEF, i + 1)); if (klass->flags & TYPE_ATTRIBUTE_PUBLIC) mono_image_fill_export_table_from_class (domain, klass, module_index, 0, assembly); } } static void add_exported_type (MonoReflectionAssemblyBuilder *assemblyb, MonoDynamicImage *assembly, MonoClass *klass, guint32 parent_index) { MonoDynamicTable *table; guint32 *values; guint32 scope, scope_idx, impl, current_idx; gboolean forwarder = TRUE; gpointer iter = NULL; MonoClass *nested; if (klass->nested_in) { impl = (parent_index << MONO_IMPLEMENTATION_BITS) + MONO_IMPLEMENTATION_EXP_TYPE; forwarder = FALSE; } else { scope = resolution_scope_from_image (assembly, klass->image); g_assert ((scope & MONO_RESOLTION_SCOPE_MASK) == MONO_RESOLTION_SCOPE_ASSEMBLYREF); scope_idx = scope >> MONO_RESOLTION_SCOPE_BITS; impl = (scope_idx << MONO_IMPLEMENTATION_BITS) + MONO_IMPLEMENTATION_ASSEMBLYREF; } table = &assembly->tables [MONO_TABLE_EXPORTEDTYPE]; table->rows++; alloc_table (table, table->rows); current_idx = table->next_idx; values = table->values + current_idx * MONO_EXP_TYPE_SIZE; values [MONO_EXP_TYPE_FLAGS] = forwarder ? TYPE_ATTRIBUTE_FORWARDER : 0; values [MONO_EXP_TYPE_TYPEDEF] = 0; values [MONO_EXP_TYPE_IMPLEMENTATION] = impl; values [MONO_EXP_TYPE_NAME] = string_heap_insert (&assembly->sheap, klass->name); values [MONO_EXP_TYPE_NAMESPACE] = string_heap_insert (&assembly->sheap, klass->name_space); table->next_idx++; while ((nested = mono_class_get_nested_types (klass, &iter))) add_exported_type (assemblyb, assembly, nested, current_idx); } static void mono_image_fill_export_table_from_type_forwarders (MonoReflectionAssemblyBuilder *assemblyb, MonoDynamicImage *assembly) { MonoClass *klass; int i; if (!assemblyb->type_forwarders) return; for (i = 0; i < mono_array_length (assemblyb->type_forwarders); ++i) { MonoReflectionType *t = mono_array_get (assemblyb->type_forwarders, MonoReflectionType *, i); MonoType *type; if (!t) continue; type = mono_reflection_type_get_handle (t); g_assert (type); klass = mono_class_from_mono_type (type); add_exported_type (assemblyb, assembly, klass, 0); } } #define align_pointer(base,p)\ do {\ guint32 __diff = (unsigned char*)(p)-(unsigned char*)(base);\ if (__diff & 3)\ (p) += 4 - (__diff & 3);\ } while (0) static int compare_constants (const void *a, const void *b) { const guint32 *a_values = a; const guint32 *b_values = b; return a_values [MONO_CONSTANT_PARENT] - b_values [MONO_CONSTANT_PARENT]; } static int compare_semantics (const void *a, const void *b) { const guint32 *a_values = a; const guint32 *b_values = b; int assoc = a_values [MONO_METHOD_SEMA_ASSOCIATION] - b_values [MONO_METHOD_SEMA_ASSOCIATION]; if (assoc) return assoc; return a_values [MONO_METHOD_SEMA_SEMANTICS] - b_values [MONO_METHOD_SEMA_SEMANTICS]; } static int compare_custom_attrs (const void *a, const void *b) { const guint32 *a_values = a; const guint32 *b_values = b; return a_values [MONO_CUSTOM_ATTR_PARENT] - b_values [MONO_CUSTOM_ATTR_PARENT]; } static int compare_field_marshal (const void *a, const void *b) { const guint32 *a_values = a; const guint32 *b_values = b; return a_values [MONO_FIELD_MARSHAL_PARENT] - b_values [MONO_FIELD_MARSHAL_PARENT]; } static int compare_nested (const void *a, const void *b) { const guint32 *a_values = a; const guint32 *b_values = b; return a_values [MONO_NESTED_CLASS_NESTED] - b_values [MONO_NESTED_CLASS_NESTED]; } static int compare_genericparam (const void *a, const void *b) { const GenericParamTableEntry **a_entry = (const GenericParamTableEntry **) a; const GenericParamTableEntry **b_entry = (const GenericParamTableEntry **) b; if ((*b_entry)->owner == (*a_entry)->owner) return mono_type_get_generic_param_num (mono_reflection_type_get_handle ((MonoReflectionType*)(*a_entry)->gparam)) - mono_type_get_generic_param_num (mono_reflection_type_get_handle ((MonoReflectionType*)(*b_entry)->gparam)); else return (*a_entry)->owner - (*b_entry)->owner; } static int compare_declsecurity_attrs (const void *a, const void *b) { const guint32 *a_values = a; const guint32 *b_values = b; return a_values [MONO_DECL_SECURITY_PARENT] - b_values [MONO_DECL_SECURITY_PARENT]; } static int compare_interface_impl (const void *a, const void *b) { const guint32 *a_values = a; const guint32 *b_values = b; int klass = a_values [MONO_INTERFACEIMPL_CLASS] - b_values [MONO_INTERFACEIMPL_CLASS]; if (klass) return klass; return a_values [MONO_INTERFACEIMPL_INTERFACE] - b_values [MONO_INTERFACEIMPL_INTERFACE]; } static void pad_heap (MonoDynamicStream *sh) { if (sh->index & 3) { int sz = 4 - (sh->index & 3); memset (sh->data + sh->index, 0, sz); sh->index += sz; } } struct StreamDesc { const char *name; MonoDynamicStream *stream; }; /* * build_compressed_metadata() fills in the blob of data that represents the * raw metadata as it will be saved in the PE file. The five streams are output * and the metadata tables are comnpressed from the guint32 array representation, * to the compressed on-disk format. */ static void build_compressed_metadata (MonoDynamicImage *assembly) { MonoDynamicTable *table; int i; guint64 valid_mask = 0; guint64 sorted_mask; guint32 heapt_size = 0; guint32 meta_size = 256; /* allow for header and other stuff */ guint32 table_offset; guint32 ntables = 0; guint64 *int64val; guint32 *int32val; guint16 *int16val; MonoImage *meta; unsigned char *p; struct StreamDesc stream_desc [5]; qsort (assembly->gen_params->pdata, assembly->gen_params->len, sizeof (gpointer), compare_genericparam); for (i = 0; i < assembly->gen_params->len; i++){ GenericParamTableEntry *entry = g_ptr_array_index (assembly->gen_params, i); write_generic_param_entry (assembly, entry); } stream_desc [0].name = "#~"; stream_desc [0].stream = &assembly->tstream; stream_desc [1].name = "#Strings"; stream_desc [1].stream = &assembly->sheap; stream_desc [2].name = "#US"; stream_desc [2].stream = &assembly->us; stream_desc [3].name = "#Blob"; stream_desc [3].stream = &assembly->blob; stream_desc [4].name = "#GUID"; stream_desc [4].stream = &assembly->guid; /* tables that are sorted */ sorted_mask = ((guint64)1 << MONO_TABLE_CONSTANT) | ((guint64)1 << MONO_TABLE_FIELDMARSHAL) | ((guint64)1 << MONO_TABLE_METHODSEMANTICS) | ((guint64)1 << MONO_TABLE_CLASSLAYOUT) | ((guint64)1 << MONO_TABLE_FIELDLAYOUT) | ((guint64)1 << MONO_TABLE_FIELDRVA) | ((guint64)1 << MONO_TABLE_IMPLMAP) | ((guint64)1 << MONO_TABLE_NESTEDCLASS) | ((guint64)1 << MONO_TABLE_METHODIMPL) | ((guint64)1 << MONO_TABLE_CUSTOMATTRIBUTE) | ((guint64)1 << MONO_TABLE_DECLSECURITY) | ((guint64)1 << MONO_TABLE_GENERICPARAM) | ((guint64)1 << MONO_TABLE_INTERFACEIMPL); /* Compute table sizes */ /* the MonoImage has already been created in mono_image_basic_init() */ meta = &assembly->image; /* sizes should be multiple of 4 */ pad_heap (&assembly->blob); pad_heap (&assembly->guid); pad_heap (&assembly->sheap); pad_heap (&assembly->us); /* Setup the info used by compute_sizes () */ meta->idx_blob_wide = assembly->blob.index >= 65536 ? 1 : 0; meta->idx_guid_wide = assembly->guid.index >= 65536 ? 1 : 0; meta->idx_string_wide = assembly->sheap.index >= 65536 ? 1 : 0; meta_size += assembly->blob.index; meta_size += assembly->guid.index; meta_size += assembly->sheap.index; meta_size += assembly->us.index; for (i=0; i < MONO_TABLE_NUM; ++i) meta->tables [i].rows = assembly->tables [i].rows; for (i = 0; i < MONO_TABLE_NUM; i++){ if (meta->tables [i].rows == 0) continue; valid_mask |= (guint64)1 << i; ntables ++; meta->tables [i].row_size = mono_metadata_compute_size ( meta, i, &meta->tables [i].size_bitfield); heapt_size += meta->tables [i].row_size * meta->tables [i].rows; } heapt_size += 24; /* #~ header size */ heapt_size += ntables * 4; /* make multiple of 4 */ heapt_size += 3; heapt_size &= ~3; meta_size += heapt_size; meta->raw_metadata = g_malloc0 (meta_size); p = (unsigned char*)meta->raw_metadata; /* the metadata signature */ *p++ = 'B'; *p++ = 'S'; *p++ = 'J'; *p++ = 'B'; /* version numbers and 4 bytes reserved */ int16val = (guint16*)p; *int16val++ = GUINT16_TO_LE (meta->md_version_major); *int16val = GUINT16_TO_LE (meta->md_version_minor); p += 8; /* version string */ int32val = (guint32*)p; *int32val = GUINT32_TO_LE ((strlen (meta->version) + 3) & (~3)); /* needs to be multiple of 4 */ p += 4; memcpy (p, meta->version, strlen (meta->version)); p += GUINT32_FROM_LE (*int32val); align_pointer (meta->raw_metadata, p); int16val = (guint16*)p; *int16val++ = GUINT16_TO_LE (0); /* flags must be 0 */ *int16val = GUINT16_TO_LE (5); /* number of streams */ p += 4; /* * write the stream info. */ table_offset = (p - (unsigned char*)meta->raw_metadata) + 5 * 8 + 40; /* room needed for stream headers */ table_offset += 3; table_offset &= ~3; assembly->tstream.index = heapt_size; for (i = 0; i < 5; ++i) { int32val = (guint32*)p; stream_desc [i].stream->offset = table_offset; *int32val++ = GUINT32_TO_LE (table_offset); *int32val = GUINT32_TO_LE (stream_desc [i].stream->index); table_offset += GUINT32_FROM_LE (*int32val); table_offset += 3; table_offset &= ~3; p += 8; strcpy ((char*)p, stream_desc [i].name); p += strlen (stream_desc [i].name) + 1; align_pointer (meta->raw_metadata, p); } /* * now copy the data, the table stream header and contents goes first. */ g_assert ((p - (unsigned char*)meta->raw_metadata) < assembly->tstream.offset); p = (guchar*)meta->raw_metadata + assembly->tstream.offset; int32val = (guint32*)p; *int32val = GUINT32_TO_LE (0); /* reserved */ p += 4; *p++ = 2; /* version */ *p++ = 0; if (meta->idx_string_wide) *p |= 0x01; if (meta->idx_guid_wide) *p |= 0x02; if (meta->idx_blob_wide) *p |= 0x04; ++p; *p++ = 1; /* reserved */ int64val = (guint64*)p; *int64val++ = GUINT64_TO_LE (valid_mask); *int64val++ = GUINT64_TO_LE (valid_mask & sorted_mask); /* bitvector of sorted tables */ p += 16; int32val = (guint32*)p; for (i = 0; i < MONO_TABLE_NUM; i++){ if (meta->tables [i].rows == 0) continue; *int32val++ = GUINT32_TO_LE (meta->tables [i].rows); } p = (unsigned char*)int32val; /* sort the tables that still need sorting */ table = &assembly->tables [MONO_TABLE_CONSTANT]; if (table->rows) qsort (table->values + MONO_CONSTANT_SIZE, table->rows, sizeof (guint32) * MONO_CONSTANT_SIZE, compare_constants); table = &assembly->tables [MONO_TABLE_METHODSEMANTICS]; if (table->rows) qsort (table->values + MONO_METHOD_SEMA_SIZE, table->rows, sizeof (guint32) * MONO_METHOD_SEMA_SIZE, compare_semantics); table = &assembly->tables [MONO_TABLE_CUSTOMATTRIBUTE]; if (table->rows) qsort (table->values + MONO_CUSTOM_ATTR_SIZE, table->rows, sizeof (guint32) * MONO_CUSTOM_ATTR_SIZE, compare_custom_attrs); table = &assembly->tables [MONO_TABLE_FIELDMARSHAL]; if (table->rows) qsort (table->values + MONO_FIELD_MARSHAL_SIZE, table->rows, sizeof (guint32) * MONO_FIELD_MARSHAL_SIZE, compare_field_marshal); table = &assembly->tables [MONO_TABLE_NESTEDCLASS]; if (table->rows) qsort (table->values + MONO_NESTED_CLASS_SIZE, table->rows, sizeof (guint32) * MONO_NESTED_CLASS_SIZE, compare_nested); /* Section 21.11 DeclSecurity in Partition II doesn't specify this to be sorted by MS implementation requires it */ table = &assembly->tables [MONO_TABLE_DECLSECURITY]; if (table->rows) qsort (table->values + MONO_DECL_SECURITY_SIZE, table->rows, sizeof (guint32) * MONO_DECL_SECURITY_SIZE, compare_declsecurity_attrs); table = &assembly->tables [MONO_TABLE_INTERFACEIMPL]; if (table->rows) qsort (table->values + MONO_INTERFACEIMPL_SIZE, table->rows, sizeof (guint32) * MONO_INTERFACEIMPL_SIZE, compare_interface_impl); /* compress the tables */ for (i = 0; i < MONO_TABLE_NUM; i++){ int row, col; guint32 *values; guint32 bitfield = meta->tables [i].size_bitfield; if (!meta->tables [i].rows) continue; if (assembly->tables [i].columns != mono_metadata_table_count (bitfield)) g_error ("col count mismatch in %d: %d %d", i, assembly->tables [i].columns, mono_metadata_table_count (bitfield)); meta->tables [i].base = (char*)p; for (row = 1; row <= meta->tables [i].rows; ++row) { values = assembly->tables [i].values + row * assembly->tables [i].columns; for (col = 0; col < assembly->tables [i].columns; ++col) { switch (mono_metadata_table_size (bitfield, col)) { case 1: *p++ = values [col]; break; case 2: *p++ = values [col] & 0xff; *p++ = (values [col] >> 8) & 0xff; break; case 4: *p++ = values [col] & 0xff; *p++ = (values [col] >> 8) & 0xff; *p++ = (values [col] >> 16) & 0xff; *p++ = (values [col] >> 24) & 0xff; break; default: g_assert_not_reached (); } } } g_assert ((p - (const unsigned char*)meta->tables [i].base) == (meta->tables [i].rows * meta->tables [i].row_size)); } g_assert (assembly->guid.offset + assembly->guid.index < meta_size); memcpy (meta->raw_metadata + assembly->sheap.offset, assembly->sheap.data, assembly->sheap.index); memcpy (meta->raw_metadata + assembly->us.offset, assembly->us.data, assembly->us.index); memcpy (meta->raw_metadata + assembly->blob.offset, assembly->blob.data, assembly->blob.index); memcpy (meta->raw_metadata + assembly->guid.offset, assembly->guid.data, assembly->guid.index); assembly->meta_size = assembly->guid.offset + assembly->guid.index; } /* * Some tables in metadata need to be sorted according to some criteria, but * when methods and fields are first created with reflection, they may be assigned a token * that doesn't correspond to the final token they will get assigned after the sorting. * ILGenerator.cs keeps a fixup table that maps the position of tokens in the IL code stream * with the reflection objects that represent them. Once all the tables are set up, the * reflection objects will contains the correct table index. fixup_method() will fixup the * tokens for the method with ILGenerator @ilgen. */ static void fixup_method (MonoReflectionILGen *ilgen, gpointer value, MonoDynamicImage *assembly) { guint32 code_idx = GPOINTER_TO_UINT (value); MonoReflectionILTokenInfo *iltoken; MonoReflectionFieldBuilder *field; MonoReflectionCtorBuilder *ctor; MonoReflectionMethodBuilder *method; MonoReflectionTypeBuilder *tb; MonoReflectionArrayMethod *am; guint32 i, idx = 0; unsigned char *target; for (i = 0; i < ilgen->num_token_fixups; ++i) { iltoken = (MonoReflectionILTokenInfo *)mono_array_addr_with_size (ilgen->token_fixups, sizeof (MonoReflectionILTokenInfo), i); target = (guchar*)assembly->code.data + code_idx + iltoken->code_pos; switch (target [3]) { case MONO_TABLE_FIELD: if (!strcmp (iltoken->member->vtable->klass->name, "FieldBuilder")) { field = (MonoReflectionFieldBuilder *)iltoken->member; idx = field->table_idx; } else if (!strcmp (iltoken->member->vtable->klass->name, "MonoField")) { MonoClassField *f = ((MonoReflectionField*)iltoken->member)->field; idx = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->field_to_table_idx, f)); } else { g_assert_not_reached (); } break; case MONO_TABLE_METHOD: if (!strcmp (iltoken->member->vtable->klass->name, "MethodBuilder")) { method = (MonoReflectionMethodBuilder *)iltoken->member; idx = method->table_idx; } else if (!strcmp (iltoken->member->vtable->klass->name, "ConstructorBuilder")) { ctor = (MonoReflectionCtorBuilder *)iltoken->member; idx = ctor->table_idx; } else if (!strcmp (iltoken->member->vtable->klass->name, "MonoMethod") || !strcmp (iltoken->member->vtable->klass->name, "MonoCMethod")) { MonoMethod *m = ((MonoReflectionMethod*)iltoken->member)->method; idx = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->method_to_table_idx, m)); } else { g_assert_not_reached (); } break; case MONO_TABLE_TYPEDEF: if (strcmp (iltoken->member->vtable->klass->name, "TypeBuilder")) g_assert_not_reached (); tb = (MonoReflectionTypeBuilder *)iltoken->member; idx = tb->table_idx; break; case MONO_TABLE_MEMBERREF: if (!strcmp (iltoken->member->vtable->klass->name, "MonoArrayMethod")) { am = (MonoReflectionArrayMethod*)iltoken->member; idx = am->table_idx; } else if (!strcmp (iltoken->member->vtable->klass->name, "MonoMethod") || !strcmp (iltoken->member->vtable->klass->name, "MonoCMethod") || !strcmp (iltoken->member->vtable->klass->name, "MonoGenericMethod") || !strcmp (iltoken->member->vtable->klass->name, "MonoGenericCMethod")) { MonoMethod *m = ((MonoReflectionMethod*)iltoken->member)->method; g_assert (m->klass->generic_class || m->klass->generic_container); continue; } else if (!strcmp (iltoken->member->vtable->klass->name, "FieldBuilder")) { continue; } else if (!strcmp (iltoken->member->vtable->klass->name, "MonoField")) { MonoClassField *f = ((MonoReflectionField*)iltoken->member)->field; g_assert (is_field_on_inst (f)); continue; } else if (!strcmp (iltoken->member->vtable->klass->name, "MethodBuilder") || !strcmp (iltoken->member->vtable->klass->name, "ConstructorBuilder")) { continue; } else if (!strcmp (iltoken->member->vtable->klass->name, "FieldOnTypeBuilderInst")) { continue; } else if (!strcmp (iltoken->member->vtable->klass->name, "MethodOnTypeBuilderInst")) { continue; } else if (!strcmp (iltoken->member->vtable->klass->name, "ConstructorOnTypeBuilderInst")) { continue; } else { g_assert_not_reached (); } break; case MONO_TABLE_METHODSPEC: if (!strcmp (iltoken->member->vtable->klass->name, "MonoGenericMethod")) { MonoMethod *m = ((MonoReflectionMethod*)iltoken->member)->method; g_assert (mono_method_signature (m)->generic_param_count); continue; } else if (!strcmp (iltoken->member->vtable->klass->name, "MethodBuilder")) { continue; } else if (!strcmp (iltoken->member->vtable->klass->name, "MethodOnTypeBuilderInst")) { continue; } else { g_assert_not_reached (); } break; default: g_error ("got unexpected table 0x%02x in fixup", target [3]); } target [0] = idx & 0xff; target [1] = (idx >> 8) & 0xff; target [2] = (idx >> 16) & 0xff; } } /* * fixup_cattrs: * * The CUSTOM_ATTRIBUTE table might contain METHODDEF tokens whose final * value is not known when the table is emitted. */ static void fixup_cattrs (MonoDynamicImage *assembly) { MonoDynamicTable *table; guint32 *values; guint32 type, i, idx, token; MonoObject *ctor; table = &assembly->tables [MONO_TABLE_CUSTOMATTRIBUTE]; for (i = 0; i < table->rows; ++i) { values = table->values + ((i + 1) * MONO_CUSTOM_ATTR_SIZE); type = values [MONO_CUSTOM_ATTR_TYPE]; if ((type & MONO_CUSTOM_ATTR_TYPE_MASK) == MONO_CUSTOM_ATTR_TYPE_METHODDEF) { idx = type >> MONO_CUSTOM_ATTR_TYPE_BITS; token = mono_metadata_make_token (MONO_TABLE_METHOD, idx); ctor = mono_g_hash_table_lookup (assembly->tokens, GUINT_TO_POINTER (token)); g_assert (ctor); if (!strcmp (ctor->vtable->klass->name, "MonoCMethod")) { MonoMethod *m = ((MonoReflectionMethod*)ctor)->method; idx = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->method_to_table_idx, m)); values [MONO_CUSTOM_ATTR_TYPE] = (idx << MONO_CUSTOM_ATTR_TYPE_BITS) | MONO_CUSTOM_ATTR_TYPE_METHODDEF; } } } } static void assembly_add_resource_manifest (MonoReflectionModuleBuilder *mb, MonoDynamicImage *assembly, MonoReflectionResource *rsrc, guint32 implementation) { MonoDynamicTable *table; guint32 *values; table = &assembly->tables [MONO_TABLE_MANIFESTRESOURCE]; table->rows++; alloc_table (table, table->rows); values = table->values + table->next_idx * MONO_MANIFEST_SIZE; values [MONO_MANIFEST_OFFSET] = rsrc->offset; values [MONO_MANIFEST_FLAGS] = rsrc->attrs; values [MONO_MANIFEST_NAME] = string_heap_insert_mstring (&assembly->sheap, rsrc->name); values [MONO_MANIFEST_IMPLEMENTATION] = implementation; table->next_idx++; } static void assembly_add_resource (MonoReflectionModuleBuilder *mb, MonoDynamicImage *assembly, MonoReflectionResource *rsrc) { MonoDynamicTable *table; guint32 *values; char blob_size [6]; guchar hash [20]; char *b = blob_size; char *name, *sname; guint32 idx, offset; if (rsrc->filename) { name = mono_string_to_utf8 (rsrc->filename); sname = g_path_get_basename (name); table = &assembly->tables [MONO_TABLE_FILE]; table->rows++; alloc_table (table, table->rows); values = table->values + table->next_idx * MONO_FILE_SIZE; values [MONO_FILE_FLAGS] = FILE_CONTAINS_NO_METADATA; values [MONO_FILE_NAME] = string_heap_insert (&assembly->sheap, sname); g_free (sname); mono_sha1_get_digest_from_file (name, hash); mono_metadata_encode_value (20, b, &b); values [MONO_FILE_HASH_VALUE] = mono_image_add_stream_data (&assembly->blob, blob_size, b-blob_size); mono_image_add_stream_data (&assembly->blob, (char*)hash, 20); g_free (name); idx = table->next_idx++; rsrc->offset = 0; idx = MONO_IMPLEMENTATION_FILE | (idx << MONO_IMPLEMENTATION_BITS); } else { char sizebuf [4]; char *data; guint len; if (rsrc->data) { data = mono_array_addr (rsrc->data, char, 0); len = mono_array_length (rsrc->data); } else { data = NULL; len = 0; } offset = len; sizebuf [0] = offset; sizebuf [1] = offset >> 8; sizebuf [2] = offset >> 16; sizebuf [3] = offset >> 24; rsrc->offset = mono_image_add_stream_data (&assembly->resources, sizebuf, 4); mono_image_add_stream_data (&assembly->resources, data, len); if (!mb->is_main) /* * The entry should be emitted into the MANIFESTRESOURCE table of * the main module, but that needs to reference the FILE table * which isn't emitted yet. */ return; else idx = 0; } assembly_add_resource_manifest (mb, assembly, rsrc, idx); } static void set_version_from_string (MonoString *version, guint32 *values) { gchar *ver, *p, *str; guint32 i; values [MONO_ASSEMBLY_MAJOR_VERSION] = 0; values [MONO_ASSEMBLY_MINOR_VERSION] = 0; values [MONO_ASSEMBLY_REV_NUMBER] = 0; values [MONO_ASSEMBLY_BUILD_NUMBER] = 0; if (!version) return; ver = str = mono_string_to_utf8 (version); for (i = 0; i < 4; ++i) { values [MONO_ASSEMBLY_MAJOR_VERSION + i] = strtol (ver, &p, 10); switch (*p) { case '.': p++; break; case '*': /* handle Revision and Build */ p++; break; } ver = p; } g_free (str); } static guint32 load_public_key (MonoArray *pkey, MonoDynamicImage *assembly) { gsize len; guint32 token = 0; char blob_size [6]; char *b = blob_size; if (!pkey) return token; len = mono_array_length (pkey); mono_metadata_encode_value (len, b, &b); token = mono_image_add_stream_data (&assembly->blob, blob_size, b - blob_size); mono_image_add_stream_data (&assembly->blob, mono_array_addr (pkey, char, 0), len); assembly->public_key = g_malloc (len); memcpy (assembly->public_key, mono_array_addr (pkey, char, 0), len); assembly->public_key_len = len; /* Special case: check for ECMA key (16 bytes) */ if ((len == MONO_ECMA_KEY_LENGTH) && mono_is_ecma_key (mono_array_addr (pkey, char, 0), len)) { /* In this case we must reserve 128 bytes (1024 bits) for the signature */ assembly->strong_name_size = MONO_DEFAULT_PUBLIC_KEY_LENGTH; } else if (len >= MONO_PUBLIC_KEY_HEADER_LENGTH + MONO_MINIMUM_PUBLIC_KEY_LENGTH) { /* minimum key size (in 2.0) is 384 bits */ assembly->strong_name_size = len - MONO_PUBLIC_KEY_HEADER_LENGTH; } else { /* FIXME - verifier */ g_warning ("Invalid public key length: %d bits (total: %d)", (int)MONO_PUBLIC_KEY_BIT_SIZE (len), (int)len); assembly->strong_name_size = MONO_DEFAULT_PUBLIC_KEY_LENGTH; /* to be safe */ } assembly->strong_name = g_malloc0 (assembly->strong_name_size); return token; } static void mono_image_emit_manifest (MonoReflectionModuleBuilder *moduleb) { MonoDynamicTable *table; MonoDynamicImage *assembly; MonoReflectionAssemblyBuilder *assemblyb; MonoDomain *domain; guint32 *values; int i; guint32 module_index; assemblyb = moduleb->assemblyb; assembly = moduleb->dynamic_image; domain = mono_object_domain (assemblyb); /* Emit ASSEMBLY table */ table = &assembly->tables [MONO_TABLE_ASSEMBLY]; alloc_table (table, 1); values = table->values + MONO_ASSEMBLY_SIZE; values [MONO_ASSEMBLY_HASH_ALG] = assemblyb->algid? assemblyb->algid: ASSEMBLY_HASH_SHA1; values [MONO_ASSEMBLY_NAME] = string_heap_insert_mstring (&assembly->sheap, assemblyb->name); if (assemblyb->culture) { values [MONO_ASSEMBLY_CULTURE] = string_heap_insert_mstring (&assembly->sheap, assemblyb->culture); } else { values [MONO_ASSEMBLY_CULTURE] = string_heap_insert (&assembly->sheap, ""); } values [MONO_ASSEMBLY_PUBLIC_KEY] = load_public_key (assemblyb->public_key, assembly); values [MONO_ASSEMBLY_FLAGS] = assemblyb->flags; set_version_from_string (assemblyb->version, values); /* Emit FILE + EXPORTED_TYPE table */ module_index = 0; for (i = 0; i < mono_array_length (assemblyb->modules); ++i) { int j; MonoReflectionModuleBuilder *file_module = mono_array_get (assemblyb->modules, MonoReflectionModuleBuilder*, i); if (file_module != moduleb) { mono_image_fill_file_table (domain, (MonoReflectionModule*)file_module, assembly); module_index ++; if (file_module->types) { for (j = 0; j < file_module->num_types; ++j) { MonoReflectionTypeBuilder *tb = mono_array_get (file_module->types, MonoReflectionTypeBuilder*, j); mono_image_fill_export_table (domain, tb, module_index, 0, assembly); } } } } if (assemblyb->loaded_modules) { for (i = 0; i < mono_array_length (assemblyb->loaded_modules); ++i) { MonoReflectionModule *file_module = mono_array_get (assemblyb->loaded_modules, MonoReflectionModule*, i); mono_image_fill_file_table (domain, file_module, assembly); module_index ++; mono_image_fill_export_table_from_module (domain, file_module, module_index, assembly); } } if (assemblyb->type_forwarders) mono_image_fill_export_table_from_type_forwarders (assemblyb, assembly); /* Emit MANIFESTRESOURCE table */ module_index = 0; for (i = 0; i < mono_array_length (assemblyb->modules); ++i) { int j; MonoReflectionModuleBuilder *file_module = mono_array_get (assemblyb->modules, MonoReflectionModuleBuilder*, i); /* The table for the main module is emitted later */ if (file_module != moduleb) { module_index ++; if (file_module->resources) { int len = mono_array_length (file_module->resources); for (j = 0; j < len; ++j) { MonoReflectionResource* res = (MonoReflectionResource*)mono_array_addr (file_module->resources, MonoReflectionResource, j); assembly_add_resource_manifest (file_module, assembly, res, MONO_IMPLEMENTATION_FILE | (module_index << MONO_IMPLEMENTATION_BITS)); } } } } } #ifndef DISABLE_REFLECTION_EMIT_SAVE /* * mono_image_build_metadata() will fill the info in all the needed metadata tables * for the modulebuilder @moduleb. * At the end of the process, method and field tokens are fixed up and the * on-disk compressed metadata representation is created. */ void mono_image_build_metadata (MonoReflectionModuleBuilder *moduleb) { MonoDynamicTable *table; MonoDynamicImage *assembly; MonoReflectionAssemblyBuilder *assemblyb; MonoDomain *domain; MonoPtrArray types; guint32 *values; int i, j; assemblyb = moduleb->assemblyb; assembly = moduleb->dynamic_image; domain = mono_object_domain (assemblyb); if (assembly->text_rva) return; assembly->text_rva = START_TEXT_RVA; if (moduleb->is_main) { mono_image_emit_manifest (moduleb); } table = &assembly->tables [MONO_TABLE_TYPEDEF]; table->rows = 1; /* .<Module> */ table->next_idx++; alloc_table (table, table->rows); /* * Set the first entry. */ values = table->values + table->columns; values [MONO_TYPEDEF_FLAGS] = 0; values [MONO_TYPEDEF_NAME] = string_heap_insert (&assembly->sheap, "<Module>") ; values [MONO_TYPEDEF_NAMESPACE] = string_heap_insert (&assembly->sheap, "") ; values [MONO_TYPEDEF_EXTENDS] = 0; values [MONO_TYPEDEF_FIELD_LIST] = 1; values [MONO_TYPEDEF_METHOD_LIST] = 1; /* * handle global methods * FIXME: test what to do when global methods are defined in multiple modules. */ if (moduleb->global_methods) { table = &assembly->tables [MONO_TABLE_METHOD]; table->rows += mono_array_length (moduleb->global_methods); alloc_table (table, table->rows); for (i = 0; i < mono_array_length (moduleb->global_methods); ++i) mono_image_get_method_info ( mono_array_get (moduleb->global_methods, MonoReflectionMethodBuilder*, i), assembly); } if (moduleb->global_fields) { table = &assembly->tables [MONO_TABLE_FIELD]; table->rows += mono_array_length (moduleb->global_fields); alloc_table (table, table->rows); for (i = 0; i < mono_array_length (moduleb->global_fields); ++i) mono_image_get_field_info ( mono_array_get (moduleb->global_fields, MonoReflectionFieldBuilder*, i), assembly); } table = &assembly->tables [MONO_TABLE_MODULE]; alloc_table (table, 1); mono_image_fill_module_table (domain, moduleb, assembly); /* Collect all types into a list sorted by their table_idx */ mono_ptr_array_init (types, moduleb->num_types); if (moduleb->types) for (i = 0; i < moduleb->num_types; ++i) { MonoReflectionTypeBuilder *type = mono_array_get (moduleb->types, MonoReflectionTypeBuilder*, i); collect_types (&types, type); } mono_ptr_array_sort (types, (gpointer)compare_types_by_table_idx); table = &assembly->tables [MONO_TABLE_TYPEDEF]; table->rows += mono_ptr_array_size (types); alloc_table (table, table->rows); /* * Emit type names + namespaces at one place inside the string heap, * so load_class_names () needs to touch fewer pages. */ for (i = 0; i < mono_ptr_array_size (types); ++i) { MonoReflectionTypeBuilder *tb = mono_ptr_array_get (types, i); string_heap_insert_mstring (&assembly->sheap, tb->nspace); } for (i = 0; i < mono_ptr_array_size (types); ++i) { MonoReflectionTypeBuilder *tb = mono_ptr_array_get (types, i); string_heap_insert_mstring (&assembly->sheap, tb->name); } for (i = 0; i < mono_ptr_array_size (types); ++i) { MonoReflectionTypeBuilder *type = mono_ptr_array_get (types, i); mono_image_get_type_info (domain, type, assembly); } /* * table->rows is already set above and in mono_image_fill_module_table. */ /* add all the custom attributes at the end, once all the indexes are stable */ mono_image_add_cattrs (assembly, 1, MONO_CUSTOM_ATTR_ASSEMBLY, assemblyb->cattrs); /* CAS assembly permissions */ if (assemblyb->permissions_minimum) mono_image_add_decl_security (assembly, mono_metadata_make_token (MONO_TABLE_ASSEMBLY, 1), assemblyb->permissions_minimum); if (assemblyb->permissions_optional) mono_image_add_decl_security (assembly, mono_metadata_make_token (MONO_TABLE_ASSEMBLY, 1), assemblyb->permissions_optional); if (assemblyb->permissions_refused) mono_image_add_decl_security (assembly, mono_metadata_make_token (MONO_TABLE_ASSEMBLY, 1), assemblyb->permissions_refused); module_add_cattrs (assembly, moduleb); /* fixup tokens */ mono_g_hash_table_foreach (assembly->token_fixups, (GHFunc)fixup_method, assembly); /* Create the MethodImpl table. We do this after emitting all methods so we already know * the final tokens and don't need another fixup pass. */ if (moduleb->global_methods) { for (i = 0; i < mono_array_length (moduleb->global_methods); ++i) { MonoReflectionMethodBuilder *mb = mono_array_get ( moduleb->global_methods, MonoReflectionMethodBuilder*, i); mono_image_add_methodimpl (assembly, mb); } } for (i = 0; i < mono_ptr_array_size (types); ++i) { MonoReflectionTypeBuilder *type = mono_ptr_array_get (types, i); if (type->methods) { for (j = 0; j < type->num_methods; ++j) { MonoReflectionMethodBuilder *mb = mono_array_get ( type->methods, MonoReflectionMethodBuilder*, j); mono_image_add_methodimpl (assembly, mb); } } } mono_ptr_array_destroy (types); fixup_cattrs (assembly); } #else /* DISABLE_REFLECTION_EMIT_SAVE */ void mono_image_build_metadata (MonoReflectionModuleBuilder *moduleb) { g_error ("This mono runtime was configured with --enable-minimal=reflection_emit_save, so saving of dynamic assemblies is not supported."); } #endif /* DISABLE_REFLECTION_EMIT_SAVE */ typedef struct { guint32 import_lookup_table; guint32 timestamp; guint32 forwarder; guint32 name_rva; guint32 import_address_table_rva; } MonoIDT; typedef struct { guint32 name_rva; guint32 flags; } MonoILT; #ifndef DISABLE_REFLECTION_EMIT /* * mono_image_insert_string: * @module: module builder object * @str: a string * * Insert @str into the user string stream of @module. */ guint32 mono_image_insert_string (MonoReflectionModuleBuilder *module, MonoString *str) { MonoDynamicImage *assembly; guint32 idx; char buf [16]; char *b = buf; MONO_ARCH_SAVE_REGS; if (!module->dynamic_image) mono_image_module_basic_init (module); assembly = module->dynamic_image; if (assembly->save) { mono_metadata_encode_value (1 | (str->length * 2), b, &b); idx = mono_image_add_stream_data (&assembly->us, buf, b-buf); #if G_BYTE_ORDER != G_LITTLE_ENDIAN { char *swapped = g_malloc (2 * mono_string_length (str)); const char *p = (const char*)mono_string_chars (str); swap_with_size (swapped, p, 2, mono_string_length (str)); mono_image_add_stream_data (&assembly->us, swapped, str->length * 2); g_free (swapped); } #else mono_image_add_stream_data (&assembly->us, (const char*)mono_string_chars (str), str->length * 2); #endif mono_image_add_stream_data (&assembly->us, "", 1); } else { idx = assembly->us.index ++; } mono_g_hash_table_insert (assembly->tokens, GUINT_TO_POINTER (MONO_TOKEN_STRING | idx), str); return MONO_TOKEN_STRING | idx; } guint32 mono_image_create_method_token (MonoDynamicImage *assembly, MonoObject *obj, MonoArray *opt_param_types) { MonoClass *klass; guint32 token = 0; MonoMethodSignature *sig; klass = obj->vtable->klass; if (strcmp (klass->name, "MonoMethod") == 0) { MonoMethod *method = ((MonoReflectionMethod *)obj)->method; MonoMethodSignature *old; guint32 sig_token, parent; int nargs, i; g_assert (opt_param_types && (mono_method_signature (method)->sentinelpos >= 0)); nargs = mono_array_length (opt_param_types); old = mono_method_signature (method); sig = mono_metadata_signature_alloc ( &assembly->image, old->param_count + nargs); sig->hasthis = old->hasthis; sig->explicit_this = old->explicit_this; sig->call_convention = old->call_convention; sig->generic_param_count = old->generic_param_count; sig->param_count = old->param_count + nargs; sig->sentinelpos = old->param_count; sig->ret = old->ret; for (i = 0; i < old->param_count; i++) sig->params [i] = old->params [i]; for (i = 0; i < nargs; i++) { MonoReflectionType *rt = mono_array_get (opt_param_types, MonoReflectionType *, i); sig->params [old->param_count + i] = mono_reflection_type_get_handle (rt); } parent = mono_image_typedef_or_ref (assembly, &method->klass->byval_arg); g_assert ((parent & MONO_TYPEDEFORREF_MASK) == MONO_MEMBERREF_PARENT_TYPEREF); parent >>= MONO_TYPEDEFORREF_BITS; parent <<= MONO_MEMBERREF_PARENT_BITS; parent |= MONO_MEMBERREF_PARENT_TYPEREF; sig_token = method_encode_signature (assembly, sig); token = mono_image_get_varargs_method_token (assembly, parent, method->name, sig_token); } else if (strcmp (klass->name, "MethodBuilder") == 0) { MonoReflectionMethodBuilder *mb = (MonoReflectionMethodBuilder *)obj; ReflectionMethodBuilder rmb; guint32 parent, sig_token; int nopt_args, nparams, ngparams, i; char *name; reflection_methodbuilder_from_method_builder (&rmb, mb); rmb.opt_types = opt_param_types; nopt_args = mono_array_length (opt_param_types); nparams = rmb.parameters ? mono_array_length (rmb.parameters): 0; ngparams = rmb.generic_params ? mono_array_length (rmb.generic_params): 0; sig = mono_metadata_signature_alloc (&assembly->image, nparams + nopt_args); sig->hasthis = !(rmb.attrs & METHOD_ATTRIBUTE_STATIC); sig->explicit_this = (rmb.call_conv & 0x40) == 0x40; sig->call_convention = rmb.call_conv; sig->generic_param_count = ngparams; sig->param_count = nparams + nopt_args; sig->sentinelpos = nparams; sig->ret = mono_reflection_type_get_handle (rmb.rtype); for (i = 0; i < nparams; i++) { MonoReflectionType *rt = mono_array_get (rmb.parameters, MonoReflectionType *, i); sig->params [i] = mono_reflection_type_get_handle (rt); } for (i = 0; i < nopt_args; i++) { MonoReflectionType *rt = mono_array_get (opt_param_types, MonoReflectionType *, i); sig->params [nparams + i] = mono_reflection_type_get_handle (rt); } sig_token = method_builder_encode_signature (assembly, &rmb); parent = mono_image_create_token (assembly, obj, TRUE, TRUE); g_assert (mono_metadata_token_table (parent) == MONO_TABLE_METHOD); parent = mono_metadata_token_index (parent) << MONO_MEMBERREF_PARENT_BITS; parent |= MONO_MEMBERREF_PARENT_METHODDEF; name = mono_string_to_utf8 (rmb.name); token = mono_image_get_varargs_method_token ( assembly, parent, name, sig_token); g_free (name); } else { g_error ("requested method token for %s\n", klass->name); } g_hash_table_insert (assembly->vararg_aux_hash, GUINT_TO_POINTER (token), sig); mono_g_hash_table_insert (assembly->tokens, GUINT_TO_POINTER (token), obj); return token; } /* * mono_image_create_token: * @assembly: a dynamic assembly * @obj: * @register_token: Whenever to register the token in the assembly->tokens hash. * * Get a token to insert in the IL code stream for the given MemberInfo. * The metadata emission routines need to pass FALSE as REGISTER_TOKEN, since by that time, * the table_idx-es were recomputed, so registering the token would overwrite an existing * entry. */ guint32 mono_image_create_token (MonoDynamicImage *assembly, MonoObject *obj, gboolean create_open_instance, gboolean register_token) { MonoClass *klass; guint32 token = 0; klass = obj->vtable->klass; /* Check for user defined reflection objects */ /* TypeDelegator is the only corlib type which doesn't look like a MonoReflectionType */ if (klass->image != mono_defaults.corlib || (strcmp (klass->name, "TypeDelegator") == 0)) mono_raise_exception (mono_get_exception_not_supported ("User defined subclasses of System.Type are not yet supported")); \ if (strcmp (klass->name, "MethodBuilder") == 0) { MonoReflectionMethodBuilder *mb = (MonoReflectionMethodBuilder *)obj; MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder*)mb->type; if (tb->module->dynamic_image == assembly && !tb->generic_params && !mb->generic_params) token = mb->table_idx | MONO_TOKEN_METHOD_DEF; else token = mono_image_get_methodbuilder_token (assembly, mb, create_open_instance); /*g_print ("got token 0x%08x for %s\n", token, mono_string_to_utf8 (mb->name));*/ } else if (strcmp (klass->name, "ConstructorBuilder") == 0) { MonoReflectionCtorBuilder *mb = (MonoReflectionCtorBuilder *)obj; MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder*)mb->type; if (tb->module->dynamic_image == assembly && !tb->generic_params) token = mb->table_idx | MONO_TOKEN_METHOD_DEF; else token = mono_image_get_ctorbuilder_token (assembly, mb); /*g_print ("got token 0x%08x for %s\n", token, mono_string_to_utf8 (mb->name));*/ } else if (strcmp (klass->name, "FieldBuilder") == 0) { MonoReflectionFieldBuilder *fb = (MonoReflectionFieldBuilder *)obj; MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder *)fb->typeb; if (tb->generic_params) { token = mono_image_get_generic_field_token (assembly, fb); } else { if ((tb->module->dynamic_image == assembly)) { token = fb->table_idx | MONO_TOKEN_FIELD_DEF; } else { token = mono_image_get_fieldref_token (assembly, (MonoObject*)fb, fb->handle); } } } else if (strcmp (klass->name, "TypeBuilder") == 0) { MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder *)obj; if (create_open_instance && tb->generic_params) { MonoType *type; init_type_builder_generics (obj); type = mono_reflection_type_get_handle ((MonoReflectionType *)obj); token = mono_image_typedef_or_ref_full (assembly, type, TRUE); token = mono_metadata_token_from_dor (token); } else { token = tb->table_idx | MONO_TOKEN_TYPE_DEF; } } else if (strcmp (klass->name, "MonoType") == 0) { MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType *)obj); MonoClass *mc = mono_class_from_mono_type (type); if (!mono_class_init (mc)) mono_raise_exception (mono_class_get_exception_for_failure (mc)); token = mono_metadata_token_from_dor ( mono_image_typedef_or_ref_full (assembly, type, mc->generic_container == NULL || create_open_instance)); } else if (strcmp (klass->name, "GenericTypeParameterBuilder") == 0) { MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType *)obj); token = mono_metadata_token_from_dor ( mono_image_typedef_or_ref (assembly, type)); } else if (strcmp (klass->name, "MonoGenericClass") == 0) { MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType *)obj); token = mono_metadata_token_from_dor ( mono_image_typedef_or_ref (assembly, type)); } else if (strcmp (klass->name, "MonoCMethod") == 0 || strcmp (klass->name, "MonoMethod") == 0 || strcmp (klass->name, "MonoGenericMethod") == 0 || strcmp (klass->name, "MonoGenericCMethod") == 0) { MonoReflectionMethod *m = (MonoReflectionMethod *)obj; if (m->method->is_inflated) { if (create_open_instance) token = mono_image_get_methodspec_token (assembly, m->method); else token = mono_image_get_inflated_method_token (assembly, m->method); } else if ((m->method->klass->image == &assembly->image) && !m->method->klass->generic_class) { static guint32 method_table_idx = 0xffffff; if (m->method->klass->wastypebuilder) { /* we use the same token as the one that was assigned * to the Methodbuilder. * FIXME: do the equivalent for Fields. */ token = m->method->token; } else { /* * Each token should have a unique index, but the indexes are * assigned by managed code, so we don't know about them. An * easy solution is to count backwards... */ method_table_idx --; token = MONO_TOKEN_METHOD_DEF | method_table_idx; } } else { token = mono_image_get_methodref_token (assembly, m->method, create_open_instance); } /*g_print ("got token 0x%08x for %s\n", token, m->method->name);*/ } else if (strcmp (klass->name, "MonoField") == 0) { MonoReflectionField *f = (MonoReflectionField *)obj; if ((f->field->parent->image == &assembly->image) && !is_field_on_inst (f->field)) { static guint32 field_table_idx = 0xffffff; field_table_idx --; token = MONO_TOKEN_FIELD_DEF | field_table_idx; } else { token = mono_image_get_fieldref_token (assembly, (MonoObject*)f, f->field); } /*g_print ("got token 0x%08x for %s\n", token, f->field->name);*/ } else if (strcmp (klass->name, "MonoArrayMethod") == 0) { MonoReflectionArrayMethod *m = (MonoReflectionArrayMethod *)obj; token = mono_image_get_array_token (assembly, m); } else if (strcmp (klass->name, "SignatureHelper") == 0) { MonoReflectionSigHelper *s = (MonoReflectionSigHelper*)obj; token = MONO_TOKEN_SIGNATURE | mono_image_get_sighelper_token (assembly, s); } else if (strcmp (klass->name, "EnumBuilder") == 0) { MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType *)obj); token = mono_metadata_token_from_dor ( mono_image_typedef_or_ref (assembly, type)); } else if (strcmp (klass->name, "FieldOnTypeBuilderInst") == 0) { MonoReflectionFieldOnTypeBuilderInst *f = (MonoReflectionFieldOnTypeBuilderInst*)obj; token = mono_image_get_field_on_inst_token (assembly, f); } else if (strcmp (klass->name, "ConstructorOnTypeBuilderInst") == 0) { MonoReflectionCtorOnTypeBuilderInst *c = (MonoReflectionCtorOnTypeBuilderInst*)obj; token = mono_image_get_ctor_on_inst_token (assembly, c, create_open_instance); } else if (strcmp (klass->name, "MethodOnTypeBuilderInst") == 0) { MonoReflectionMethodOnTypeBuilderInst *m = (MonoReflectionMethodOnTypeBuilderInst*)obj; token = mono_image_get_method_on_inst_token (assembly, m, create_open_instance); } else if (is_sre_array (klass) || is_sre_byref (klass) || is_sre_pointer (klass)) { MonoReflectionType *type = (MonoReflectionType *)obj; token = mono_metadata_token_from_dor ( mono_image_typedef_or_ref (assembly, mono_reflection_type_get_handle (type))); } else { g_error ("requested token for %s\n", klass->name); } if (register_token) mono_image_register_token (assembly, token, obj); return token; } /* * mono_image_register_token: * * Register the TOKEN->OBJ mapping in the mapping table in ASSEMBLY. This is required for * the Module.ResolveXXXToken () methods to work. */ void mono_image_register_token (MonoDynamicImage *assembly, guint32 token, MonoObject *obj) { MonoObject *prev = mono_g_hash_table_lookup (assembly->tokens, GUINT_TO_POINTER (token)); if (prev) { /* There could be multiple MethodInfo objects with the same token */ //g_assert (prev == obj); } else { mono_g_hash_table_insert (assembly->tokens, GUINT_TO_POINTER (token), obj); } } static MonoDynamicImage* create_dynamic_mono_image (MonoDynamicAssembly *assembly, char *assembly_name, char *module_name) { static const guchar entrycode [16] = {0xff, 0x25, 0}; MonoDynamicImage *image; int i; const char *version; if (!strcmp (mono_get_runtime_info ()->framework_version, "2.1")) version = "v2.0.50727"; /* HACK: SL 2 enforces the .net 2 metadata version */ else version = mono_get_runtime_info ()->runtime_version; #if HAVE_BOEHM_GC /* The MonoGHashTable's need GC tracking */ image = GC_MALLOC (sizeof (MonoDynamicImage)); #else image = g_new0 (MonoDynamicImage, 1); #endif mono_profiler_module_event (&image->image, MONO_PROFILE_START_LOAD); /*g_print ("created image %p\n", image);*/ /* keep in sync with image.c */ image->image.name = assembly_name; image->image.assembly_name = image->image.name; /* they may be different */ image->image.module_name = module_name; image->image.version = g_strdup (version); image->image.md_version_major = 1; image->image.md_version_minor = 1; image->image.dynamic = TRUE; image->image.references = g_new0 (MonoAssembly*, 1); image->image.references [0] = NULL; mono_image_init (&image->image); image->token_fixups = mono_g_hash_table_new_type ((GHashFunc)mono_object_hash, NULL, MONO_HASH_KEY_GC); image->method_to_table_idx = g_hash_table_new (NULL, NULL); image->field_to_table_idx = g_hash_table_new (NULL, NULL); image->method_aux_hash = g_hash_table_new (NULL, NULL); image->vararg_aux_hash = g_hash_table_new (NULL, NULL); image->handleref = g_hash_table_new (NULL, NULL); image->handleref_managed = mono_g_hash_table_new_type ((GHashFunc)mono_object_hash, NULL, MONO_HASH_KEY_GC); image->tokens = mono_g_hash_table_new_type (NULL, NULL, MONO_HASH_VALUE_GC); image->generic_def_objects = mono_g_hash_table_new_type (NULL, NULL, MONO_HASH_VALUE_GC); image->methodspec = mono_g_hash_table_new_type ((GHashFunc)mono_object_hash, NULL, MONO_HASH_KEY_GC); image->typespec = g_hash_table_new ((GHashFunc)mono_metadata_type_hash, (GCompareFunc)mono_metadata_type_equal); image->typeref = g_hash_table_new ((GHashFunc)mono_metadata_type_hash, (GCompareFunc)mono_metadata_type_equal); image->blob_cache = g_hash_table_new ((GHashFunc)mono_blob_entry_hash, (GCompareFunc)mono_blob_entry_equal); image->gen_params = g_ptr_array_new (); /*g_print ("string heap create for image %p (%s)\n", image, module_name);*/ string_heap_init (&image->sheap); mono_image_add_stream_data (&image->us, "", 1); add_to_blob_cached (image, (char*) "", 1, NULL, 0); /* import tables... */ mono_image_add_stream_data (&image->code, (char*)entrycode, sizeof (entrycode)); image->iat_offset = mono_image_add_stream_zero (&image->code, 8); /* two IAT entries */ image->idt_offset = mono_image_add_stream_zero (&image->code, 2 * sizeof (MonoIDT)); /* two IDT entries */ image->imp_names_offset = mono_image_add_stream_zero (&image->code, 2); /* flags for name entry */ mono_image_add_stream_data (&image->code, "_CorExeMain", 12); mono_image_add_stream_data (&image->code, "mscoree.dll", 12); image->ilt_offset = mono_image_add_stream_zero (&image->code, 8); /* two ILT entries */ stream_data_align (&image->code); image->cli_header_offset = mono_image_add_stream_zero (&image->code, sizeof (MonoCLIHeader)); for (i=0; i < MONO_TABLE_NUM; ++i) { image->tables [i].next_idx = 1; image->tables [i].columns = table_sizes [i]; } image->image.assembly = (MonoAssembly*)assembly; image->run = assembly->run; image->save = assembly->save; image->pe_kind = 0x1; /* ILOnly */ image->machine = 0x14c; /* I386 */ mono_profiler_module_loaded (&image->image, MONO_PROFILE_OK); return image; } #endif static void free_blob_cache_entry (gpointer key, gpointer val, gpointer user_data) { g_free (key); } void mono_dynamic_image_free (MonoDynamicImage *image) { MonoDynamicImage *di = image; GList *list; int i; if (di->methodspec) mono_g_hash_table_destroy (di->methodspec); if (di->typespec) g_hash_table_destroy (di->typespec); if (di->typeref) g_hash_table_destroy (di->typeref); if (di->handleref) g_hash_table_destroy (di->handleref); if (di->handleref_managed) mono_g_hash_table_destroy (di->handleref_managed); if (di->tokens) mono_g_hash_table_destroy (di->tokens); if (di->generic_def_objects) mono_g_hash_table_destroy (di->generic_def_objects); if (di->blob_cache) { g_hash_table_foreach (di->blob_cache, free_blob_cache_entry, NULL); g_hash_table_destroy (di->blob_cache); } if (di->standalonesig_cache) g_hash_table_destroy (di->standalonesig_cache); for (list = di->array_methods; list; list = list->next) { ArrayMethod *am = (ArrayMethod *)list->data; g_free (am->sig); g_free (am->name); g_free (am); } g_list_free (di->array_methods); if (di->gen_params) { for (i = 0; i < di->gen_params->len; i++) { GenericParamTableEntry *entry = g_ptr_array_index (di->gen_params, i); if (entry->gparam->type.type) { MonoGenericParam *param = entry->gparam->type.type->data.generic_param; g_free ((char*)mono_generic_param_info (param)->name); g_free (param); } mono_gc_deregister_root ((char*) &entry->gparam); g_free (entry); } g_ptr_array_free (di->gen_params, TRUE); } if (di->token_fixups) mono_g_hash_table_destroy (di->token_fixups); if (di->method_to_table_idx) g_hash_table_destroy (di->method_to_table_idx); if (di->field_to_table_idx) g_hash_table_destroy (di->field_to_table_idx); if (di->method_aux_hash) g_hash_table_destroy (di->method_aux_hash); if (di->vararg_aux_hash) g_hash_table_destroy (di->vararg_aux_hash); g_free (di->strong_name); g_free (di->win32_res); if (di->public_key) g_free (di->public_key); /*g_print ("string heap destroy for image %p\n", di);*/ mono_dynamic_stream_reset (&di->sheap); mono_dynamic_stream_reset (&di->code); mono_dynamic_stream_reset (&di->resources); mono_dynamic_stream_reset (&di->us); mono_dynamic_stream_reset (&di->blob); mono_dynamic_stream_reset (&di->tstream); mono_dynamic_stream_reset (&di->guid); for (i = 0; i < MONO_TABLE_NUM; ++i) { g_free (di->tables [i].values); } } #ifndef DISABLE_REFLECTION_EMIT /* * mono_image_basic_init: * @assembly: an assembly builder object * * Create the MonoImage that represents the assembly builder and setup some * of the helper hash table and the basic metadata streams. */ void mono_image_basic_init (MonoReflectionAssemblyBuilder *assemblyb) { MonoDynamicAssembly *assembly; MonoDynamicImage *image; MonoDomain *domain = mono_object_domain (assemblyb); MONO_ARCH_SAVE_REGS; if (assemblyb->dynamic_assembly) return; #if HAVE_BOEHM_GC /* assembly->assembly.image might be GC allocated */ assembly = assemblyb->dynamic_assembly = GC_MALLOC (sizeof (MonoDynamicAssembly)); #else assembly = assemblyb->dynamic_assembly = g_new0 (MonoDynamicAssembly, 1); #endif mono_profiler_assembly_event (&assembly->assembly, MONO_PROFILE_START_LOAD); assembly->assembly.ref_count = 1; assembly->assembly.dynamic = TRUE; assembly->assembly.corlib_internal = assemblyb->corlib_internal; assemblyb->assembly.assembly = (MonoAssembly*)assembly; assembly->assembly.basedir = mono_string_to_utf8 (assemblyb->dir); if (assemblyb->culture) assembly->assembly.aname.culture = mono_string_to_utf8 (assemblyb->culture); else assembly->assembly.aname.culture = g_strdup (""); if (assemblyb->version) { char *vstr = mono_string_to_utf8 (assemblyb->version); char **version = g_strsplit (vstr, ".", 4); char **parts = version; assembly->assembly.aname.major = atoi (*parts++); assembly->assembly.aname.minor = atoi (*parts++); assembly->assembly.aname.build = *parts != NULL ? atoi (*parts++) : 0; assembly->assembly.aname.revision = *parts != NULL ? atoi (*parts) : 0; g_strfreev (version); g_free (vstr); } else { assembly->assembly.aname.major = 0; assembly->assembly.aname.minor = 0; assembly->assembly.aname.build = 0; assembly->assembly.aname.revision = 0; } assembly->run = assemblyb->access != 2; assembly->save = assemblyb->access != 1; assembly->domain = domain; image = create_dynamic_mono_image (assembly, mono_string_to_utf8 (assemblyb->name), g_strdup ("RefEmit_YouForgotToDefineAModule")); image->initial_image = TRUE; assembly->assembly.aname.name = image->image.name; assembly->assembly.image = &image->image; if (assemblyb->pktoken && assemblyb->pktoken->max_length) { /* -1 to correct for the trailing NULL byte */ if (assemblyb->pktoken->max_length != MONO_PUBLIC_KEY_TOKEN_LENGTH - 1) { g_error ("Public key token length invalid for assembly %s: %i", assembly->assembly.aname.name, assemblyb->pktoken->max_length); } memcpy (&assembly->assembly.aname.public_key_token, mono_array_addr (assemblyb->pktoken, guint8, 0), assemblyb->pktoken->max_length); } mono_domain_assemblies_lock (domain); domain->domain_assemblies = g_slist_prepend (domain->domain_assemblies, assembly); mono_domain_assemblies_unlock (domain); register_assembly (mono_object_domain (assemblyb), &assemblyb->assembly, &assembly->assembly); mono_profiler_assembly_loaded (&assembly->assembly, MONO_PROFILE_OK); mono_assembly_invoke_load_hook ((MonoAssembly*)assembly); } #endif /* !DISABLE_REFLECTION_EMIT */ #ifndef DISABLE_REFLECTION_EMIT_SAVE static int calc_section_size (MonoDynamicImage *assembly) { int nsections = 0; /* alignment constraints */ mono_image_add_stream_zero (&assembly->code, 4 - (assembly->code.index % 4)); g_assert ((assembly->code.index % 4) == 0); assembly->meta_size += 3; assembly->meta_size &= ~3; mono_image_add_stream_zero (&assembly->resources, 4 - (assembly->resources.index % 4)); g_assert ((assembly->resources.index % 4) == 0); assembly->sections [MONO_SECTION_TEXT].size = assembly->meta_size + assembly->code.index + assembly->resources.index + assembly->strong_name_size; assembly->sections [MONO_SECTION_TEXT].attrs = SECT_FLAGS_HAS_CODE | SECT_FLAGS_MEM_EXECUTE | SECT_FLAGS_MEM_READ; nsections++; if (assembly->win32_res) { guint32 res_size = (assembly->win32_res_size + 3) & ~3; assembly->sections [MONO_SECTION_RSRC].size = res_size; assembly->sections [MONO_SECTION_RSRC].attrs = SECT_FLAGS_HAS_INITIALIZED_DATA | SECT_FLAGS_MEM_READ; nsections++; } assembly->sections [MONO_SECTION_RELOC].size = 12; assembly->sections [MONO_SECTION_RELOC].attrs = SECT_FLAGS_MEM_READ | SECT_FLAGS_MEM_DISCARDABLE | SECT_FLAGS_HAS_INITIALIZED_DATA; nsections++; return nsections; } typedef struct { guint32 id; guint32 offset; GSList *children; MonoReflectionWin32Resource *win32_res; /* Only for leaf nodes */ } ResTreeNode; static int resource_tree_compare_by_id (gconstpointer a, gconstpointer b) { ResTreeNode *t1 = (ResTreeNode*)a; ResTreeNode *t2 = (ResTreeNode*)b; return t1->id - t2->id; } /* * resource_tree_create: * * Organize the resources into a resource tree. */ static ResTreeNode * resource_tree_create (MonoArray *win32_resources) { ResTreeNode *tree, *res_node, *type_node, *lang_node; GSList *l; int i; tree = g_new0 (ResTreeNode, 1); for (i = 0; i < mono_array_length (win32_resources); ++i) { MonoReflectionWin32Resource *win32_res = (MonoReflectionWin32Resource*)mono_array_addr (win32_resources, MonoReflectionWin32Resource, i); /* Create node */ /* FIXME: BUG: this stores managed references in unmanaged memory */ lang_node = g_new0 (ResTreeNode, 1); lang_node->id = win32_res->lang_id; lang_node->win32_res = win32_res; /* Create type node if neccesary */ type_node = NULL; for (l = tree->children; l; l = l->next) if (((ResTreeNode*)(l->data))->id == win32_res->res_type) { type_node = (ResTreeNode*)l->data; break; } if (!type_node) { type_node = g_new0 (ResTreeNode, 1); type_node->id = win32_res->res_type; /* * The resource types have to be sorted otherwise * Windows Explorer can't display the version information. */ tree->children = g_slist_insert_sorted (tree->children, type_node, resource_tree_compare_by_id); } /* Create res node if neccesary */ res_node = NULL; for (l = type_node->children; l; l = l->next) if (((ResTreeNode*)(l->data))->id == win32_res->res_id) { res_node = (ResTreeNode*)l->data; break; } if (!res_node) { res_node = g_new0 (ResTreeNode, 1); res_node->id = win32_res->res_id; type_node->children = g_slist_append (type_node->children, res_node); } res_node->children = g_slist_append (res_node->children, lang_node); } return tree; } /* * resource_tree_encode: * * Encode the resource tree into the format used in the PE file. */ static void resource_tree_encode (ResTreeNode *node, char *begin, char *p, char **endbuf) { char *entries; MonoPEResourceDir dir; MonoPEResourceDirEntry dir_entry; MonoPEResourceDataEntry data_entry; GSList *l; guint32 res_id_entries; /* * For the format of the resource directory, see the article * "An In-Depth Look into the Win32 Portable Executable File Format" by * Matt Pietrek */ memset (&dir, 0, sizeof (dir)); memset (&dir_entry, 0, sizeof (dir_entry)); memset (&data_entry, 0, sizeof (data_entry)); g_assert (sizeof (dir) == 16); g_assert (sizeof (dir_entry) == 8); g_assert (sizeof (data_entry) == 16); node->offset = p - begin; /* IMAGE_RESOURCE_DIRECTORY */ res_id_entries = g_slist_length (node->children); dir.res_id_entries = GUINT16_TO_LE (res_id_entries); memcpy (p, &dir, sizeof (dir)); p += sizeof (dir); /* Reserve space for entries */ entries = p; p += sizeof (dir_entry) * res_id_entries; /* Write children */ for (l = node->children; l; l = l->next) { ResTreeNode *child = (ResTreeNode*)l->data; if (child->win32_res) { guint32 size; child->offset = p - begin; /* IMAGE_RESOURCE_DATA_ENTRY */ data_entry.rde_data_offset = GUINT32_TO_LE (p - begin + sizeof (data_entry)); size = mono_array_length (child->win32_res->res_data); data_entry.rde_size = GUINT32_TO_LE (size); memcpy (p, &data_entry, sizeof (data_entry)); p += sizeof (data_entry); memcpy (p, mono_array_addr (child->win32_res->res_data, char, 0), size); p += size; } else { resource_tree_encode (child, begin, p, &p); } } /* IMAGE_RESOURCE_ENTRY */ for (l = node->children; l; l = l->next) { ResTreeNode *child = (ResTreeNode*)l->data; MONO_PE_RES_DIR_ENTRY_SET_NAME (dir_entry, FALSE, child->id); MONO_PE_RES_DIR_ENTRY_SET_DIR (dir_entry, !child->win32_res, child->offset); memcpy (entries, &dir_entry, sizeof (dir_entry)); entries += sizeof (dir_entry); } *endbuf = p; } static void resource_tree_free (ResTreeNode * node) { GSList * list; for (list = node->children; list; list = list->next) resource_tree_free ((ResTreeNode*)list->data); g_slist_free(node->children); g_free (node); } static void assembly_add_win32_resources (MonoDynamicImage *assembly, MonoReflectionAssemblyBuilder *assemblyb) { char *buf; char *p; guint32 size, i; MonoReflectionWin32Resource *win32_res; ResTreeNode *tree; if (!assemblyb->win32_resources) return; /* * Resources are stored in a three level tree inside the PE file. * - level one contains a node for each type of resource * - level two contains a node for each resource * - level three contains a node for each instance of a resource for a * specific language. */ tree = resource_tree_create (assemblyb->win32_resources); /* Estimate the size of the encoded tree */ size = 0; for (i = 0; i < mono_array_length (assemblyb->win32_resources); ++i) { win32_res = (MonoReflectionWin32Resource*)mono_array_addr (assemblyb->win32_resources, MonoReflectionWin32Resource, i); size += mono_array_length (win32_res->res_data); } /* Directory structure */ size += mono_array_length (assemblyb->win32_resources) * 256; p = buf = g_malloc (size); resource_tree_encode (tree, p, p, &p); g_assert (p - buf <= size); assembly->win32_res = g_malloc (p - buf); assembly->win32_res_size = p - buf; memcpy (assembly->win32_res, buf, p - buf); g_free (buf); resource_tree_free (tree); } static void fixup_resource_directory (char *res_section, char *p, guint32 rva) { MonoPEResourceDir *dir = (MonoPEResourceDir*)p; int i; p += sizeof (MonoPEResourceDir); for (i = 0; i < GUINT16_FROM_LE (dir->res_named_entries) + GUINT16_FROM_LE (dir->res_id_entries); ++i) { MonoPEResourceDirEntry *dir_entry = (MonoPEResourceDirEntry*)p; char *child = res_section + MONO_PE_RES_DIR_ENTRY_DIR_OFFSET (*dir_entry); if (MONO_PE_RES_DIR_ENTRY_IS_DIR (*dir_entry)) { fixup_resource_directory (res_section, child, rva); } else { MonoPEResourceDataEntry *data_entry = (MonoPEResourceDataEntry*)child; data_entry->rde_data_offset = GUINT32_TO_LE (GUINT32_FROM_LE (data_entry->rde_data_offset) + rva); } p += sizeof (MonoPEResourceDirEntry); } } static void checked_write_file (HANDLE f, gconstpointer buffer, guint32 numbytes) { guint32 dummy; if (!WriteFile (f, buffer, numbytes, &dummy, NULL)) g_error ("WriteFile returned %d\n", GetLastError ()); } /* * mono_image_create_pefile: * @mb: a module builder object * * This function creates the PE-COFF header, the image sections, the CLI header * etc. all the data is written in * assembly->pefile where it can be easily retrieved later in chunks. */ void mono_image_create_pefile (MonoReflectionModuleBuilder *mb, HANDLE file) { MonoMSDOSHeader *msdos; MonoDotNetHeader *header; MonoSectionTable *section; MonoCLIHeader *cli_header; guint32 size, image_size, virtual_base, text_offset; guint32 header_start, section_start, file_offset, virtual_offset; MonoDynamicImage *assembly; MonoReflectionAssemblyBuilder *assemblyb; MonoDynamicStream pefile_stream = {0}; MonoDynamicStream *pefile = &pefile_stream; int i, nsections; guint32 *rva, value; guchar *p; static const unsigned char msheader[] = { 0x4d, 0x5a, 0x90, 0x00, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x0e, 0x1f, 0xba, 0x0e, 0x00, 0xb4, 0x09, 0xcd, 0x21, 0xb8, 0x01, 0x4c, 0xcd, 0x21, 0x54, 0x68, 0x69, 0x73, 0x20, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x20, 0x63, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x20, 0x62, 0x65, 0x20, 0x72, 0x75, 0x6e, 0x20, 0x69, 0x6e, 0x20, 0x44, 0x4f, 0x53, 0x20, 0x6d, 0x6f, 0x64, 0x65, 0x2e, 0x0d, 0x0d, 0x0a, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; assemblyb = mb->assemblyb; mono_image_basic_init (assemblyb); assembly = mb->dynamic_image; assembly->pe_kind = assemblyb->pe_kind; assembly->machine = assemblyb->machine; ((MonoDynamicImage*)assemblyb->dynamic_assembly->assembly.image)->pe_kind = assemblyb->pe_kind; ((MonoDynamicImage*)assemblyb->dynamic_assembly->assembly.image)->machine = assemblyb->machine; mono_image_build_metadata (mb); if (mb->is_main && assemblyb->resources) { int len = mono_array_length (assemblyb->resources); for (i = 0; i < len; ++i) assembly_add_resource (mb, assembly, (MonoReflectionResource*)mono_array_addr (assemblyb->resources, MonoReflectionResource, i)); } if (mb->resources) { int len = mono_array_length (mb->resources); for (i = 0; i < len; ++i) assembly_add_resource (mb, assembly, (MonoReflectionResource*)mono_array_addr (mb->resources, MonoReflectionResource, i)); } build_compressed_metadata (assembly); if (mb->is_main) assembly_add_win32_resources (assembly, assemblyb); nsections = calc_section_size (assembly); /* The DOS header and stub */ g_assert (sizeof (MonoMSDOSHeader) == sizeof (msheader)); mono_image_add_stream_data (pefile, (char*)msheader, sizeof (msheader)); /* the dotnet header */ header_start = mono_image_add_stream_zero (pefile, sizeof (MonoDotNetHeader)); /* the section tables */ section_start = mono_image_add_stream_zero (pefile, sizeof (MonoSectionTable) * nsections); file_offset = section_start + sizeof (MonoSectionTable) * nsections; virtual_offset = VIRT_ALIGN; image_size = 0; for (i = 0; i < MONO_SECTION_MAX; ++i) { if (!assembly->sections [i].size) continue; /* align offsets */ file_offset += FILE_ALIGN - 1; file_offset &= ~(FILE_ALIGN - 1); virtual_offset += VIRT_ALIGN - 1; virtual_offset &= ~(VIRT_ALIGN - 1); assembly->sections [i].offset = file_offset; assembly->sections [i].rva = virtual_offset; file_offset += assembly->sections [i].size; virtual_offset += assembly->sections [i].size; image_size += (assembly->sections [i].size + VIRT_ALIGN - 1) & ~(VIRT_ALIGN - 1); } file_offset += FILE_ALIGN - 1; file_offset &= ~(FILE_ALIGN - 1); image_size += section_start + sizeof (MonoSectionTable) * nsections; /* back-patch info */ msdos = (MonoMSDOSHeader*)pefile->data; msdos->pe_offset = GUINT32_FROM_LE (sizeof (MonoMSDOSHeader)); header = (MonoDotNetHeader*)(pefile->data + header_start); header->pesig [0] = 'P'; header->pesig [1] = 'E'; header->coff.coff_machine = GUINT16_FROM_LE (assemblyb->machine); header->coff.coff_sections = GUINT16_FROM_LE (nsections); header->coff.coff_time = GUINT32_FROM_LE (time (NULL)); header->coff.coff_opt_header_size = GUINT16_FROM_LE (sizeof (MonoDotNetHeader) - sizeof (MonoCOFFHeader) - 4); if (assemblyb->pekind == 1) { /* it's a dll */ header->coff.coff_attributes = GUINT16_FROM_LE (0x210e); } else { /* it's an exe */ header->coff.coff_attributes = GUINT16_FROM_LE (0x010e); } virtual_base = 0x400000; /* FIXME: 0x10000000 if a DLL */ header->pe.pe_magic = GUINT16_FROM_LE (0x10B); header->pe.pe_major = 6; header->pe.pe_minor = 0; size = assembly->sections [MONO_SECTION_TEXT].size; size += FILE_ALIGN - 1; size &= ~(FILE_ALIGN - 1); header->pe.pe_code_size = GUINT32_FROM_LE(size); size = assembly->sections [MONO_SECTION_RSRC].size; size += FILE_ALIGN - 1; size &= ~(FILE_ALIGN - 1); header->pe.pe_data_size = GUINT32_FROM_LE(size); g_assert (START_TEXT_RVA == assembly->sections [MONO_SECTION_TEXT].rva); header->pe.pe_rva_code_base = GUINT32_FROM_LE (assembly->sections [MONO_SECTION_TEXT].rva); header->pe.pe_rva_data_base = GUINT32_FROM_LE (assembly->sections [MONO_SECTION_RSRC].rva); /* pe_rva_entry_point always at the beginning of the text section */ header->pe.pe_rva_entry_point = GUINT32_FROM_LE (assembly->sections [MONO_SECTION_TEXT].rva); header->nt.pe_image_base = GUINT32_FROM_LE (virtual_base); header->nt.pe_section_align = GUINT32_FROM_LE (VIRT_ALIGN); header->nt.pe_file_alignment = GUINT32_FROM_LE (FILE_ALIGN); header->nt.pe_os_major = GUINT16_FROM_LE (4); header->nt.pe_os_minor = GUINT16_FROM_LE (0); header->nt.pe_subsys_major = GUINT16_FROM_LE (4); size = section_start; size += FILE_ALIGN - 1; size &= ~(FILE_ALIGN - 1); header->nt.pe_header_size = GUINT32_FROM_LE (size); size = image_size; size += VIRT_ALIGN - 1; size &= ~(VIRT_ALIGN - 1); header->nt.pe_image_size = GUINT32_FROM_LE (size); /* // Translate the PEFileKind value to the value expected by the Windows loader */ { short kind; /* // PEFileKinds.Dll == 1 // PEFileKinds.ConsoleApplication == 2 // PEFileKinds.WindowApplication == 3 // // need to get: // IMAGE_SUBSYSTEM_WINDOWS_GUI 2 // Image runs in the Windows GUI subsystem. // IMAGE_SUBSYSTEM_WINDOWS_CUI 3 // Image runs in the Windows character subsystem. */ if (assemblyb->pekind == 3) kind = 2; else kind = 3; header->nt.pe_subsys_required = GUINT16_FROM_LE (kind); } header->nt.pe_stack_reserve = GUINT32_FROM_LE (0x00100000); header->nt.pe_stack_commit = GUINT32_FROM_LE (0x00001000); header->nt.pe_heap_reserve = GUINT32_FROM_LE (0x00100000); header->nt.pe_heap_commit = GUINT32_FROM_LE (0x00001000); header->nt.pe_loader_flags = GUINT32_FROM_LE (0); header->nt.pe_data_dir_count = GUINT32_FROM_LE (16); /* fill data directory entries */ header->datadir.pe_resource_table.size = GUINT32_FROM_LE (assembly->sections [MONO_SECTION_RSRC].size); header->datadir.pe_resource_table.rva = GUINT32_FROM_LE (assembly->sections [MONO_SECTION_RSRC].rva); header->datadir.pe_reloc_table.size = GUINT32_FROM_LE (assembly->sections [MONO_SECTION_RELOC].size); header->datadir.pe_reloc_table.rva = GUINT32_FROM_LE (assembly->sections [MONO_SECTION_RELOC].rva); header->datadir.pe_cli_header.size = GUINT32_FROM_LE (72); header->datadir.pe_cli_header.rva = GUINT32_FROM_LE (assembly->text_rva + assembly->cli_header_offset); header->datadir.pe_iat.size = GUINT32_FROM_LE (8); header->datadir.pe_iat.rva = GUINT32_FROM_LE (assembly->text_rva + assembly->iat_offset); /* patch entrypoint name */ if (assemblyb->pekind == 1) memcpy (assembly->code.data + assembly->imp_names_offset + 2, "_CorDllMain", 12); else memcpy (assembly->code.data + assembly->imp_names_offset + 2, "_CorExeMain", 12); /* patch imported function RVA name */ rva = (guint32*)(assembly->code.data + assembly->iat_offset); *rva = GUINT32_FROM_LE (assembly->text_rva + assembly->imp_names_offset); /* the import table */ header->datadir.pe_import_table.size = GUINT32_FROM_LE (79); /* FIXME: magic number? */ header->datadir.pe_import_table.rva = GUINT32_FROM_LE (assembly->text_rva + assembly->idt_offset); /* patch imported dll RVA name and other entries in the dir */ rva = (guint32*)(assembly->code.data + assembly->idt_offset + G_STRUCT_OFFSET (MonoIDT, name_rva)); *rva = GUINT32_FROM_LE (assembly->text_rva + assembly->imp_names_offset + 14); /* 14 is hint+strlen+1 of func name */ rva = (guint32*)(assembly->code.data + assembly->idt_offset + G_STRUCT_OFFSET (MonoIDT, import_address_table_rva)); *rva = GUINT32_FROM_LE (assembly->text_rva + assembly->iat_offset); rva = (guint32*)(assembly->code.data + assembly->idt_offset + G_STRUCT_OFFSET (MonoIDT, import_lookup_table)); *rva = GUINT32_FROM_LE (assembly->text_rva + assembly->ilt_offset); p = (guchar*)(assembly->code.data + assembly->ilt_offset); value = (assembly->text_rva + assembly->imp_names_offset); *p++ = (value) & 0xff; *p++ = (value >> 8) & (0xff); *p++ = (value >> 16) & (0xff); *p++ = (value >> 24) & (0xff); /* the CLI header info */ cli_header = (MonoCLIHeader*)(assembly->code.data + assembly->cli_header_offset); cli_header->ch_size = GUINT32_FROM_LE (72); cli_header->ch_runtime_major = GUINT16_FROM_LE (2); cli_header->ch_runtime_minor = GUINT16_FROM_LE (5); cli_header->ch_flags = GUINT32_FROM_LE (assemblyb->pe_kind); if (assemblyb->entry_point) { guint32 table_idx = 0; if (!strcmp (assemblyb->entry_point->object.vtable->klass->name, "MethodBuilder")) { MonoReflectionMethodBuilder *methodb = (MonoReflectionMethodBuilder*)assemblyb->entry_point; table_idx = methodb->table_idx; } else { table_idx = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->method_to_table_idx, assemblyb->entry_point->method)); } cli_header->ch_entry_point = GUINT32_FROM_LE (table_idx | MONO_TOKEN_METHOD_DEF); } else { cli_header->ch_entry_point = GUINT32_FROM_LE (0); } /* The embedded managed resources */ text_offset = assembly->text_rva + assembly->code.index; cli_header->ch_resources.rva = GUINT32_FROM_LE (text_offset); cli_header->ch_resources.size = GUINT32_FROM_LE (assembly->resources.index); text_offset += assembly->resources.index; cli_header->ch_metadata.rva = GUINT32_FROM_LE (text_offset); cli_header->ch_metadata.size = GUINT32_FROM_LE (assembly->meta_size); text_offset += assembly->meta_size; if (assembly->strong_name_size) { cli_header->ch_strong_name.rva = GUINT32_FROM_LE (text_offset); cli_header->ch_strong_name.size = GUINT32_FROM_LE (assembly->strong_name_size); text_offset += assembly->strong_name_size; } /* write the section tables and section content */ section = (MonoSectionTable*)(pefile->data + section_start); for (i = 0; i < MONO_SECTION_MAX; ++i) { static const char section_names [][7] = { ".text", ".rsrc", ".reloc" }; if (!assembly->sections [i].size) continue; strcpy (section->st_name, section_names [i]); /*g_print ("output section %s (%d), size: %d\n", section->st_name, i, assembly->sections [i].size);*/ section->st_virtual_address = GUINT32_FROM_LE (assembly->sections [i].rva); section->st_virtual_size = GUINT32_FROM_LE (assembly->sections [i].size); section->st_raw_data_size = GUINT32_FROM_LE (GUINT32_TO_LE (section->st_virtual_size) + (FILE_ALIGN - 1)); section->st_raw_data_size &= GUINT32_FROM_LE (~(FILE_ALIGN - 1)); section->st_raw_data_ptr = GUINT32_FROM_LE (assembly->sections [i].offset); section->st_flags = GUINT32_FROM_LE (assembly->sections [i].attrs); section ++; } checked_write_file (file, pefile->data, pefile->index); mono_dynamic_stream_reset (pefile); for (i = 0; i < MONO_SECTION_MAX; ++i) { if (!assembly->sections [i].size) continue; if (SetFilePointer (file, assembly->sections [i].offset, NULL, FILE_BEGIN) == INVALID_SET_FILE_POINTER) g_error ("SetFilePointer returned %d\n", GetLastError ()); switch (i) { case MONO_SECTION_TEXT: /* patch entry point */ p = (guchar*)(assembly->code.data + 2); value = (virtual_base + assembly->text_rva + assembly->iat_offset); *p++ = (value) & 0xff; *p++ = (value >> 8) & 0xff; *p++ = (value >> 16) & 0xff; *p++ = (value >> 24) & 0xff; checked_write_file (file, assembly->code.data, assembly->code.index); checked_write_file (file, assembly->resources.data, assembly->resources.index); checked_write_file (file, assembly->image.raw_metadata, assembly->meta_size); checked_write_file (file, assembly->strong_name, assembly->strong_name_size); g_free (assembly->image.raw_metadata); break; case MONO_SECTION_RELOC: { struct { guint32 page_rva; guint32 block_size; guint16 type_and_offset; guint16 term; } reloc; g_assert (sizeof (reloc) == 12); reloc.page_rva = GUINT32_FROM_LE (assembly->text_rva); reloc.block_size = GUINT32_FROM_LE (12); /* * the entrypoint is always at the start of the text section * 3 is IMAGE_REL_BASED_HIGHLOW * 2 is patch_size_rva - text_rva */ reloc.type_and_offset = GUINT16_FROM_LE ((3 << 12) + (2)); reloc.term = 0; checked_write_file (file, &reloc, sizeof (reloc)); break; } case MONO_SECTION_RSRC: if (assembly->win32_res) { /* Fixup the offsets in the IMAGE_RESOURCE_DATA_ENTRY structures */ fixup_resource_directory (assembly->win32_res, assembly->win32_res, assembly->sections [i].rva); checked_write_file (file, assembly->win32_res, assembly->win32_res_size); } break; default: g_assert_not_reached (); } } /* check that the file is properly padded */ if (SetFilePointer (file, file_offset, NULL, FILE_BEGIN) == INVALID_SET_FILE_POINTER) g_error ("SetFilePointer returned %d\n", GetLastError ()); if (! SetEndOfFile (file)) g_error ("SetEndOfFile returned %d\n", GetLastError ()); mono_dynamic_stream_reset (&assembly->code); mono_dynamic_stream_reset (&assembly->us); mono_dynamic_stream_reset (&assembly->blob); mono_dynamic_stream_reset (&assembly->guid); mono_dynamic_stream_reset (&assembly->sheap); g_hash_table_foreach (assembly->blob_cache, (GHFunc)g_free, NULL); g_hash_table_destroy (assembly->blob_cache); assembly->blob_cache = NULL; } #else /* DISABLE_REFLECTION_EMIT_SAVE */ void mono_image_create_pefile (MonoReflectionModuleBuilder *mb, HANDLE file) { g_assert_not_reached (); } #endif /* DISABLE_REFLECTION_EMIT_SAVE */ #ifndef DISABLE_REFLECTION_EMIT MonoReflectionModule * mono_image_load_module_dynamic (MonoReflectionAssemblyBuilder *ab, MonoString *fileName) { char *name; MonoImage *image; MonoImageOpenStatus status; MonoDynamicAssembly *assembly; guint32 module_count; MonoImage **new_modules; gboolean *new_modules_loaded; name = mono_string_to_utf8 (fileName); image = mono_image_open (name, &status); if (!image) { MonoException *exc; if (status == MONO_IMAGE_ERROR_ERRNO) exc = mono_get_exception_file_not_found (fileName); else exc = mono_get_exception_bad_image_format (name); g_free (name); mono_raise_exception (exc); } g_free (name); assembly = ab->dynamic_assembly; image->assembly = (MonoAssembly*)assembly; module_count = image->assembly->image->module_count; new_modules = g_new0 (MonoImage *, module_count + 1); new_modules_loaded = g_new0 (gboolean, module_count + 1); if (image->assembly->image->modules) memcpy (new_modules, image->assembly->image->modules, module_count * sizeof (MonoImage *)); if (image->assembly->image->modules_loaded) memcpy (new_modules_loaded, image->assembly->image->modules_loaded, module_count * sizeof (gboolean)); new_modules [module_count] = image; new_modules_loaded [module_count] = TRUE; mono_image_addref (image); g_free (image->assembly->image->modules); image->assembly->image->modules = new_modules; image->assembly->image->modules_loaded = new_modules_loaded; image->assembly->image->module_count ++; mono_assembly_load_references (image, &status); if (status) { mono_image_close (image); mono_raise_exception (mono_get_exception_file_not_found (fileName)); } return mono_module_get_object (mono_domain_get (), image); } #endif /* DISABLE_REFLECTION_EMIT */ /* * We need to return always the same object for MethodInfo, FieldInfo etc.. * but we need to consider the reflected type. * type uses a different hash, since it uses custom hash/equal functions. */ typedef struct { gpointer item; MonoClass *refclass; } ReflectedEntry; static gboolean reflected_equal (gconstpointer a, gconstpointer b) { const ReflectedEntry *ea = a; const ReflectedEntry *eb = b; return (ea->item == eb->item) && (ea->refclass == eb->refclass); } static guint reflected_hash (gconstpointer a) { const ReflectedEntry *ea = a; return mono_aligned_addr_hash (ea->item); } #define CHECK_OBJECT(t,p,k) \ do { \ t _obj; \ ReflectedEntry e; \ e.item = (p); \ e.refclass = (k); \ mono_domain_lock (domain); \ if (!domain->refobject_hash) \ domain->refobject_hash = mono_g_hash_table_new_type (reflected_hash, reflected_equal, MONO_HASH_VALUE_GC); \ if ((_obj = mono_g_hash_table_lookup (domain->refobject_hash, &e))) { \ mono_domain_unlock (domain); \ return _obj; \ } \ mono_domain_unlock (domain); \ } while (0) #ifdef HAVE_BOEHM_GC /* ReflectedEntry doesn't need to be GC tracked */ #define ALLOC_REFENTRY g_new0 (ReflectedEntry, 1) #define FREE_REFENTRY(entry) g_free ((entry)) #define REFENTRY_REQUIRES_CLEANUP #else #define ALLOC_REFENTRY mono_mempool_alloc (domain->mp, sizeof (ReflectedEntry)) /* FIXME: */ #define FREE_REFENTRY(entry) #endif #define CACHE_OBJECT(t,p,o,k) \ do { \ t _obj; \ ReflectedEntry pe; \ pe.item = (p); \ pe.refclass = (k); \ mono_domain_lock (domain); \ if (!domain->refobject_hash) \ domain->refobject_hash = mono_g_hash_table_new_type (reflected_hash, reflected_equal, MONO_HASH_VALUE_GC); \ _obj = mono_g_hash_table_lookup (domain->refobject_hash, &pe); \ if (!_obj) { \ ReflectedEntry *e = ALLOC_REFENTRY; \ e->item = (p); \ e->refclass = (k); \ mono_g_hash_table_insert (domain->refobject_hash, e,o); \ _obj = o; \ } \ mono_domain_unlock (domain); \ return _obj; \ } while (0) static void clear_cached_object (MonoDomain *domain, gpointer o, MonoClass *klass) { mono_domain_lock (domain); if (domain->refobject_hash) { ReflectedEntry pe; gpointer orig_pe, orig_value; pe.item = o; pe.refclass = klass; if (mono_g_hash_table_lookup_extended (domain->refobject_hash, &pe, &orig_pe, &orig_value)) { mono_g_hash_table_remove (domain->refobject_hash, &pe); FREE_REFENTRY (orig_pe); } } mono_domain_unlock (domain); } #ifdef REFENTRY_REQUIRES_CLEANUP static void cleanup_refobject_hash (gpointer key, gpointer value, gpointer user_data) { FREE_REFENTRY (key); } #endif void mono_reflection_cleanup_domain (MonoDomain *domain) { if (domain->refobject_hash) { /*let's avoid scanning the whole hashtable if not needed*/ #ifdef REFENTRY_REQUIRES_CLEANUP mono_g_hash_table_foreach (domain->refobject_hash, cleanup_refobject_hash, NULL); #endif mono_g_hash_table_destroy (domain->refobject_hash); domain->refobject_hash = NULL; } } #ifndef DISABLE_REFLECTION_EMIT static gpointer register_assembly (MonoDomain *domain, MonoReflectionAssembly *res, MonoAssembly *assembly) { CACHE_OBJECT (MonoReflectionAssembly *, assembly, res, NULL); } static gpointer register_module (MonoDomain *domain, MonoReflectionModuleBuilder *res, MonoDynamicImage *module) { CACHE_OBJECT (MonoReflectionModuleBuilder *, module, res, NULL); } void mono_image_module_basic_init (MonoReflectionModuleBuilder *moduleb) { MonoDynamicImage *image = moduleb->dynamic_image; MonoReflectionAssemblyBuilder *ab = moduleb->assemblyb; if (!image) { MonoError error; int module_count; MonoImage **new_modules; MonoImage *ass; char *name, *fqname; /* * FIXME: we already created an image in mono_image_basic_init (), but * we don't know which module it belongs to, since that is only * determined at assembly save time. */ /*image = (MonoDynamicImage*)ab->dynamic_assembly->assembly.image; */ name = mono_string_to_utf8 (ab->name); fqname = mono_string_to_utf8_checked (moduleb->module.fqname, &error); if (!mono_error_ok (&error)) { g_free (name); mono_error_raise_exception (&error); } image = create_dynamic_mono_image (ab->dynamic_assembly, name, fqname); moduleb->module.image = &image->image; moduleb->dynamic_image = image; register_module (mono_object_domain (moduleb), moduleb, image); /* register the module with the assembly */ ass = ab->dynamic_assembly->assembly.image; module_count = ass->module_count; new_modules = g_new0 (MonoImage *, module_count + 1); if (ass->modules) memcpy (new_modules, ass->modules, module_count * sizeof (MonoImage *)); new_modules [module_count] = &image->image; mono_image_addref (&image->image); g_free (ass->modules); ass->modules = new_modules; ass->module_count ++; } } void mono_image_set_wrappers_type (MonoReflectionModuleBuilder *moduleb, MonoReflectionType *type) { MonoDynamicImage *image = moduleb->dynamic_image; g_assert (type->type); image->wrappers_type = mono_class_from_mono_type (type->type); } #endif /* * mono_assembly_get_object: * @domain: an app domain * @assembly: an assembly * * Return an System.Reflection.Assembly object representing the MonoAssembly @assembly. */ MonoReflectionAssembly* mono_assembly_get_object (MonoDomain *domain, MonoAssembly *assembly) { static MonoClass *assembly_type; MonoReflectionAssembly *res; CHECK_OBJECT (MonoReflectionAssembly *, assembly, NULL); if (!assembly_type) { MonoClass *class = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoAssembly"); if (class == NULL) class = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "Assembly"); g_assert (class); assembly_type = class; } res = (MonoReflectionAssembly *)mono_object_new (domain, assembly_type); res->assembly = assembly; CACHE_OBJECT (MonoReflectionAssembly *, assembly, res, NULL); } MonoReflectionModule* mono_module_get_object (MonoDomain *domain, MonoImage *image) { static MonoClass *module_type; MonoReflectionModule *res; char* basename; CHECK_OBJECT (MonoReflectionModule *, image, NULL); if (!module_type) { MonoClass *class = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoModule"); if (class == NULL) class = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "Module"); g_assert (class); module_type = class; } res = (MonoReflectionModule *)mono_object_new (domain, module_type); res->image = image; MONO_OBJECT_SETREF (res, assembly, (MonoReflectionAssembly *) mono_assembly_get_object(domain, image->assembly)); MONO_OBJECT_SETREF (res, fqname, mono_string_new (domain, image->name)); basename = g_path_get_basename (image->name); MONO_OBJECT_SETREF (res, name, mono_string_new (domain, basename)); MONO_OBJECT_SETREF (res, scopename, mono_string_new (domain, image->module_name)); g_free (basename); if (image->assembly->image == image) { res->token = mono_metadata_make_token (MONO_TABLE_MODULE, 1); } else { int i; res->token = 0; if (image->assembly->image->modules) { for (i = 0; i < image->assembly->image->module_count; i++) { if (image->assembly->image->modules [i] == image) res->token = mono_metadata_make_token (MONO_TABLE_MODULEREF, i + 1); } g_assert (res->token); } } CACHE_OBJECT (MonoReflectionModule *, image, res, NULL); } MonoReflectionModule* mono_module_file_get_object (MonoDomain *domain, MonoImage *image, int table_index) { static MonoClass *module_type; MonoReflectionModule *res; MonoTableInfo *table; guint32 cols [MONO_FILE_SIZE]; const char *name; guint32 i, name_idx; const char *val; if (!module_type) { MonoClass *class = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoModule"); if (class == NULL) class = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "Module"); g_assert (class); module_type = class; } res = (MonoReflectionModule *)mono_object_new (domain, module_type); table = &image->tables [MONO_TABLE_FILE]; g_assert (table_index < table->rows); mono_metadata_decode_row (table, table_index, cols, MONO_FILE_SIZE); res->image = NULL; MONO_OBJECT_SETREF (res, assembly, (MonoReflectionAssembly *) mono_assembly_get_object(domain, image->assembly)); name = mono_metadata_string_heap (image, cols [MONO_FILE_NAME]); /* Check whenever the row has a corresponding row in the moduleref table */ table = &image->tables [MONO_TABLE_MODULEREF]; for (i = 0; i < table->rows; ++i) { name_idx = mono_metadata_decode_row_col (table, i, MONO_MODULEREF_NAME); val = mono_metadata_string_heap (image, name_idx); if (strcmp (val, name) == 0) res->image = image->modules [i]; } MONO_OBJECT_SETREF (res, fqname, mono_string_new (domain, name)); MONO_OBJECT_SETREF (res, name, mono_string_new (domain, name)); MONO_OBJECT_SETREF (res, scopename, mono_string_new (domain, name)); res->is_resource = cols [MONO_FILE_FLAGS] && FILE_CONTAINS_NO_METADATA; res->token = mono_metadata_make_token (MONO_TABLE_FILE, table_index + 1); return res; } static gboolean mymono_metadata_type_equal (MonoType *t1, MonoType *t2) { if ((t1->type != t2->type) || (t1->byref != t2->byref)) return FALSE; switch (t1->type) { case MONO_TYPE_VOID: case MONO_TYPE_BOOLEAN: case MONO_TYPE_CHAR: case MONO_TYPE_I1: case MONO_TYPE_U1: case MONO_TYPE_I2: case MONO_TYPE_U2: case MONO_TYPE_I4: case MONO_TYPE_U4: case MONO_TYPE_I8: case MONO_TYPE_U8: case MONO_TYPE_R4: case MONO_TYPE_R8: case MONO_TYPE_STRING: case MONO_TYPE_I: case MONO_TYPE_U: case MONO_TYPE_OBJECT: case MONO_TYPE_TYPEDBYREF: return TRUE; case MONO_TYPE_VALUETYPE: case MONO_TYPE_CLASS: case MONO_TYPE_SZARRAY: return t1->data.klass == t2->data.klass; case MONO_TYPE_PTR: return mymono_metadata_type_equal (t1->data.type, t2->data.type); case MONO_TYPE_ARRAY: if (t1->data.array->rank != t2->data.array->rank) return FALSE; return t1->data.array->eklass == t2->data.array->eklass; case MONO_TYPE_GENERICINST: { int i; MonoGenericInst *i1 = t1->data.generic_class->context.class_inst; MonoGenericInst *i2 = t2->data.generic_class->context.class_inst; if (i1->type_argc != i2->type_argc) return FALSE; if (!mono_metadata_type_equal (&t1->data.generic_class->container_class->byval_arg, &t2->data.generic_class->container_class->byval_arg)) return FALSE; /* FIXME: we should probably just compare the instance pointers directly. */ for (i = 0; i < i1->type_argc; ++i) { if (!mono_metadata_type_equal (i1->type_argv [i], i2->type_argv [i])) return FALSE; } return TRUE; } case MONO_TYPE_VAR: case MONO_TYPE_MVAR: return t1->data.generic_param == t2->data.generic_param; default: g_error ("implement type compare for %0x!", t1->type); return FALSE; } return FALSE; } static guint mymono_metadata_type_hash (MonoType *t1) { guint hash; hash = t1->type; hash |= t1->byref << 6; /* do not collide with t1->type values */ switch (t1->type) { case MONO_TYPE_VALUETYPE: case MONO_TYPE_CLASS: case MONO_TYPE_SZARRAY: /* check if the distribution is good enough */ return ((hash << 5) - hash) ^ mono_aligned_addr_hash (t1->data.klass); case MONO_TYPE_PTR: return ((hash << 5) - hash) ^ mymono_metadata_type_hash (t1->data.type); case MONO_TYPE_GENERICINST: { int i; MonoGenericInst *inst = t1->data.generic_class->context.class_inst; hash += g_str_hash (t1->data.generic_class->container_class->name); hash *= 13; for (i = 0; i < inst->type_argc; ++i) { hash += mymono_metadata_type_hash (inst->type_argv [i]); hash *= 13; } return hash; } case MONO_TYPE_VAR: case MONO_TYPE_MVAR: return ((hash << 5) - hash) ^ GPOINTER_TO_UINT (t1->data.generic_param); } return hash; } static gboolean verify_safe_for_managed_space (MonoType *type) { switch (type->type) { #ifdef DEBUG_HARDER case MONO_TYPE_ARRAY: return verify_safe_for_managed_space (&type->data.array->eklass->byval_arg); case MONO_TYPE_PTR: return verify_safe_for_managed_space (type->data.type); case MONO_TYPE_SZARRAY: return verify_safe_for_managed_space (&type->data.klass->byval_arg); case MONO_TYPE_GENERICINST: { MonoGenericInst *inst = type->data.generic_class->inst; int i; if (!inst->is_open) break; for (i = 0; i < inst->type_argc; ++i) if (!verify_safe_for_managed_space (inst->type_argv [i])) return FALSE; break; } #endif case MONO_TYPE_VAR: case MONO_TYPE_MVAR: return TRUE; } return TRUE; } static MonoType* mono_type_normalize (MonoType *type) { int i; MonoGenericClass *gclass; MonoGenericInst *ginst; MonoClass *gtd; MonoGenericContainer *gcontainer; MonoType **argv = NULL; gboolean is_denorm_gtd = TRUE, requires_rebind = FALSE; if (type->type != MONO_TYPE_GENERICINST) return type; gclass = type->data.generic_class; ginst = gclass->context.class_inst; if (!ginst->is_open) return type; gtd = gclass->container_class; gcontainer = gtd->generic_container; argv = g_newa (MonoType*, ginst->type_argc); for (i = 0; i < ginst->type_argc; ++i) { MonoType *t = ginst->type_argv [i], *norm; if (t->type != MONO_TYPE_VAR || t->data.generic_param->num != i || t->data.generic_param->owner != gcontainer) is_denorm_gtd = FALSE; norm = mono_type_normalize (t); argv [i] = norm; if (norm != t) requires_rebind = TRUE; } if (is_denorm_gtd) return type->byref == gtd->byval_arg.byref ? &gtd->byval_arg : &gtd->this_arg; if (requires_rebind) { MonoClass *klass = mono_class_bind_generic_parameters (gtd, ginst->type_argc, argv, gclass->is_dynamic); return type->byref == klass->byval_arg.byref ? &klass->byval_arg : &klass->this_arg; } return type; } /* * mono_type_get_object: * @domain: an app domain * @type: a type * * Return an System.MonoType object representing the type @type. */ MonoReflectionType* mono_type_get_object (MonoDomain *domain, MonoType *type) { MonoType *norm_type; MonoReflectionType *res; MonoClass *klass = mono_class_from_mono_type (type); /*we must avoid using @type as it might have come * from a mono_metadata_type_dup and the caller * expects that is can be freed. * Using the right type from */ type = klass->byval_arg.byref == type->byref ? &klass->byval_arg : &klass->this_arg; /* void is very common */ if (type->type == MONO_TYPE_VOID && domain->typeof_void) return (MonoReflectionType*)domain->typeof_void; /* * If the vtable of the given class was already created, we can use * the MonoType from there and avoid all locking and hash table lookups. * * We cannot do this for TypeBuilders as mono_reflection_create_runtime_class expects * that the resulting object is different. */ if (type == &klass->byval_arg && !klass->image->dynamic) { MonoVTable *vtable = mono_class_try_get_vtable (domain, klass); if (vtable && vtable->type) return vtable->type; } mono_loader_lock (); /*FIXME mono_class_init and mono_class_vtable acquire it*/ mono_domain_lock (domain); if (!domain->type_hash) domain->type_hash = mono_g_hash_table_new_type ((GHashFunc)mymono_metadata_type_hash, (GCompareFunc)mymono_metadata_type_equal, MONO_HASH_VALUE_GC); if ((res = mono_g_hash_table_lookup (domain->type_hash, type))) { mono_domain_unlock (domain); mono_loader_unlock (); return res; } /*Types must be normalized so a generic instance of the GTD get's the same inner type. * For example in: Foo<A,B>; Bar<A> : Foo<A, Bar<A>> * The second Bar will be encoded a generic instance of Bar with <A> as parameter. * On all other places, Bar<A> will be encoded as the GTD itself. This is an implementation * artifact of how generics are encoded and should be transparent to managed code so we * need to weed out this diference when retrieving managed System.Type objects. */ norm_type = mono_type_normalize (type); if (norm_type != type) { res = mono_type_get_object (domain, norm_type); mono_g_hash_table_insert (domain->type_hash, type, res); mono_domain_unlock (domain); mono_loader_unlock (); return res; } /* This MonoGenericClass hack is no longer necessary. Let's leave it here until we finish with the 2-stage type-builder setup.*/ if ((type->type == MONO_TYPE_GENERICINST) && type->data.generic_class->is_dynamic && !type->data.generic_class->container_class->wastypebuilder) g_assert (0); if (!verify_safe_for_managed_space (type)) { mono_domain_unlock (domain); mono_loader_unlock (); mono_raise_exception (mono_get_exception_invalid_operation ("This type cannot be propagated to managed space")); } if (mono_class_get_ref_info (klass) && !klass->wastypebuilder) { gboolean is_type_done = TRUE; /* Generic parameters have reflection_info set but they are not finished together with their enclosing type. * We must ensure that once a type is finished we don't return a GenericTypeParameterBuilder. * We can't simply close the types as this will interfere with other parts of the generics machinery. */ if (klass->byval_arg.type == MONO_TYPE_MVAR || klass->byval_arg.type == MONO_TYPE_VAR) { MonoGenericParam *gparam = klass->byval_arg.data.generic_param; if (gparam->owner && gparam->owner->is_method) { MonoMethod *method = gparam->owner->owner.method; if (method && mono_class_get_generic_type_definition (method->klass)->wastypebuilder) is_type_done = FALSE; } else if (gparam->owner && !gparam->owner->is_method) { MonoClass *klass = gparam->owner->owner.klass; if (klass && mono_class_get_generic_type_definition (klass)->wastypebuilder) is_type_done = FALSE; } } /* g_assert_not_reached (); */ /* should this be considered an error condition? */ if (is_type_done && !type->byref) { mono_domain_unlock (domain); mono_loader_unlock (); return mono_class_get_ref_info (klass); } } #ifdef HAVE_SGEN_GC res = (MonoReflectionType *)mono_gc_alloc_pinned_obj (mono_class_vtable (domain, mono_defaults.monotype_class), mono_class_instance_size (mono_defaults.monotype_class)); #else res = (MonoReflectionType *)mono_object_new (domain, mono_defaults.monotype_class); #endif res->type = type; mono_g_hash_table_insert (domain->type_hash, type, res); if (type->type == MONO_TYPE_VOID) domain->typeof_void = (MonoObject*)res; mono_domain_unlock (domain); mono_loader_unlock (); return res; } /* * mono_method_get_object: * @domain: an app domain * @method: a method * @refclass: the reflected type (can be NULL) * * Return an System.Reflection.MonoMethod object representing the method @method. */ MonoReflectionMethod* mono_method_get_object (MonoDomain *domain, MonoMethod *method, MonoClass *refclass) { /* * We use the same C representation for methods and constructors, but the type * name in C# is different. */ static MonoClass *System_Reflection_MonoMethod = NULL; static MonoClass *System_Reflection_MonoCMethod = NULL; static MonoClass *System_Reflection_MonoGenericMethod = NULL; static MonoClass *System_Reflection_MonoGenericCMethod = NULL; MonoClass *klass; MonoReflectionMethod *ret; if (method->is_inflated) { MonoReflectionGenericMethod *gret; refclass = method->klass; CHECK_OBJECT (MonoReflectionMethod *, method, refclass); if ((*method->name == '.') && (!strcmp (method->name, ".ctor") || !strcmp (method->name, ".cctor"))) { if (!System_Reflection_MonoGenericCMethod) System_Reflection_MonoGenericCMethod = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoGenericCMethod"); klass = System_Reflection_MonoGenericCMethod; } else { if (!System_Reflection_MonoGenericMethod) System_Reflection_MonoGenericMethod = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoGenericMethod"); klass = System_Reflection_MonoGenericMethod; } gret = (MonoReflectionGenericMethod*)mono_object_new (domain, klass); gret->method.method = method; MONO_OBJECT_SETREF (gret, method.name, mono_string_new (domain, method->name)); MONO_OBJECT_SETREF (gret, method.reftype, mono_type_get_object (domain, &refclass->byval_arg)); CACHE_OBJECT (MonoReflectionMethod *, method, (MonoReflectionMethod*)gret, refclass); } if (!refclass) refclass = method->klass; CHECK_OBJECT (MonoReflectionMethod *, method, refclass); if (*method->name == '.' && (strcmp (method->name, ".ctor") == 0 || strcmp (method->name, ".cctor") == 0)) { if (!System_Reflection_MonoCMethod) System_Reflection_MonoCMethod = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoCMethod"); klass = System_Reflection_MonoCMethod; } else { if (!System_Reflection_MonoMethod) System_Reflection_MonoMethod = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoMethod"); klass = System_Reflection_MonoMethod; } ret = (MonoReflectionMethod*)mono_object_new (domain, klass); ret->method = method; MONO_OBJECT_SETREF (ret, reftype, mono_type_get_object (domain, &refclass->byval_arg)); CACHE_OBJECT (MonoReflectionMethod *, method, ret, refclass); } /* * mono_method_clear_object: * * Clear the cached reflection objects for the dynamic method METHOD. */ void mono_method_clear_object (MonoDomain *domain, MonoMethod *method) { MonoClass *klass; g_assert (method->dynamic); klass = method->klass; while (klass) { clear_cached_object (domain, method, klass); klass = klass->parent; } /* Added by mono_param_get_objects () */ clear_cached_object (domain, &(method->signature), NULL); klass = method->klass; while (klass) { clear_cached_object (domain, &(method->signature), klass); klass = klass->parent; } } /* * mono_field_get_object: * @domain: an app domain * @klass: a type * @field: a field * * Return an System.Reflection.MonoField object representing the field @field * in class @klass. */ MonoReflectionField* mono_field_get_object (MonoDomain *domain, MonoClass *klass, MonoClassField *field) { MonoReflectionField *res; static MonoClass *monofield_klass; CHECK_OBJECT (MonoReflectionField *, field, klass); if (!monofield_klass) monofield_klass = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoField"); res = (MonoReflectionField *)mono_object_new (domain, monofield_klass); res->klass = klass; res->field = field; MONO_OBJECT_SETREF (res, name, mono_string_new (domain, mono_field_get_name (field))); if (is_field_on_inst (field)) { res->attrs = get_field_on_inst_generic_type (field)->attrs; MONO_OBJECT_SETREF (res, type, mono_type_get_object (domain, field->type)); } else { if (field->type) MONO_OBJECT_SETREF (res, type, mono_type_get_object (domain, field->type)); res->attrs = mono_field_get_flags (field); } CACHE_OBJECT (MonoReflectionField *, field, res, klass); } /* * mono_property_get_object: * @domain: an app domain * @klass: a type * @property: a property * * Return an System.Reflection.MonoProperty object representing the property @property * in class @klass. */ MonoReflectionProperty* mono_property_get_object (MonoDomain *domain, MonoClass *klass, MonoProperty *property) { MonoReflectionProperty *res; static MonoClass *monoproperty_klass; CHECK_OBJECT (MonoReflectionProperty *, property, klass); if (!monoproperty_klass) monoproperty_klass = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoProperty"); res = (MonoReflectionProperty *)mono_object_new (domain, monoproperty_klass); res->klass = klass; res->property = property; CACHE_OBJECT (MonoReflectionProperty *, property, res, klass); } /* * mono_event_get_object: * @domain: an app domain * @klass: a type * @event: a event * * Return an System.Reflection.MonoEvent object representing the event @event * in class @klass. */ MonoReflectionEvent* mono_event_get_object (MonoDomain *domain, MonoClass *klass, MonoEvent *event) { MonoReflectionEvent *res; MonoReflectionMonoEvent *mono_event; static MonoClass *monoevent_klass; CHECK_OBJECT (MonoReflectionEvent *, event, klass); if (!monoevent_klass) monoevent_klass = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoEvent"); mono_event = (MonoReflectionMonoEvent *)mono_object_new (domain, monoevent_klass); mono_event->klass = klass; mono_event->event = event; res = (MonoReflectionEvent*)mono_event; CACHE_OBJECT (MonoReflectionEvent *, event, res, klass); } /** * mono_get_reflection_missing_object: * @domain: Domain where the object lives * * Returns the System.Reflection.Missing.Value singleton object * (of type System.Reflection.Missing). * * Used as the value for ParameterInfo.DefaultValue when Optional * is present */ static MonoObject * mono_get_reflection_missing_object (MonoDomain *domain) { MonoObject *obj; static MonoClassField *missing_value_field = NULL; if (!missing_value_field) { MonoClass *missing_klass; missing_klass = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "Missing"); mono_class_init (missing_klass); missing_value_field = mono_class_get_field_from_name (missing_klass, "Value"); g_assert (missing_value_field); } obj = mono_field_get_value_object (domain, missing_value_field, NULL); g_assert (obj); return obj; } static MonoObject* get_dbnull (MonoDomain *domain, MonoObject **dbnull) { if (!*dbnull) *dbnull = mono_get_dbnull_object (domain); return *dbnull; } static MonoObject* get_reflection_missing (MonoDomain *domain, MonoObject **reflection_missing) { if (!*reflection_missing) *reflection_missing = mono_get_reflection_missing_object (domain); return *reflection_missing; } /* * mono_param_get_objects: * @domain: an app domain * @method: a method * * Return an System.Reflection.ParameterInfo array object representing the parameters * in the method @method. */ MonoArray* mono_param_get_objects_internal (MonoDomain *domain, MonoMethod *method, MonoClass *refclass) { static MonoClass *System_Reflection_ParameterInfo; static MonoClass *System_Reflection_ParameterInfo_array; MonoError error; MonoArray *res = NULL; MonoReflectionMethod *member = NULL; MonoReflectionParameter *param = NULL; char **names, **blobs = NULL; guint32 *types = NULL; MonoType *type = NULL; MonoObject *dbnull = NULL; MonoObject *missing = NULL; MonoMarshalSpec **mspecs; MonoMethodSignature *sig; MonoVTable *pinfo_vtable; int i; if (!System_Reflection_ParameterInfo_array) { MonoClass *klass; klass = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "ParameterInfo"); mono_memory_barrier (); System_Reflection_ParameterInfo = klass; klass = mono_array_class_get (klass, 1); mono_memory_barrier (); System_Reflection_ParameterInfo_array = klass; } sig = mono_method_signature_checked (method, &error); if (!mono_error_ok (&error)) mono_error_raise_exception (&error); if (!sig->param_count) return mono_array_new_specific (mono_class_vtable (domain, System_Reflection_ParameterInfo_array), 0); /* Note: the cache is based on the address of the signature into the method * since we already cache MethodInfos with the method as keys. */ CHECK_OBJECT (MonoArray*, &(method->signature), refclass); member = mono_method_get_object (domain, method, refclass); names = g_new (char *, sig->param_count); mono_method_get_param_names (method, (const char **) names); mspecs = g_new (MonoMarshalSpec*, sig->param_count + 1); mono_method_get_marshal_info (method, mspecs); res = mono_array_new_specific (mono_class_vtable (domain, System_Reflection_ParameterInfo_array), sig->param_count); pinfo_vtable = mono_class_vtable (domain, System_Reflection_ParameterInfo); for (i = 0; i < sig->param_count; ++i) { param = (MonoReflectionParameter *)mono_object_new_specific (pinfo_vtable); MONO_OBJECT_SETREF (param, ClassImpl, mono_type_get_object (domain, sig->params [i])); MONO_OBJECT_SETREF (param, MemberImpl, (MonoObject*)member); MONO_OBJECT_SETREF (param, NameImpl, mono_string_new (domain, names [i])); param->PositionImpl = i; param->AttrsImpl = sig->params [i]->attrs; if (!(param->AttrsImpl & PARAM_ATTRIBUTE_HAS_DEFAULT)) { if (param->AttrsImpl & PARAM_ATTRIBUTE_OPTIONAL) MONO_OBJECT_SETREF (param, DefaultValueImpl, get_reflection_missing (domain, &missing)); else MONO_OBJECT_SETREF (param, DefaultValueImpl, get_dbnull (domain, &dbnull)); } else { if (!blobs) { blobs = g_new0 (char *, sig->param_count); types = g_new0 (guint32, sig->param_count); get_default_param_value_blobs (method, blobs, types); } /* Build MonoType for the type from the Constant Table */ if (!type) type = g_new0 (MonoType, 1); type->type = types [i]; type->data.klass = NULL; if (types [i] == MONO_TYPE_CLASS) type->data.klass = mono_defaults.object_class; else if ((sig->params [i]->type == MONO_TYPE_VALUETYPE) && sig->params [i]->data.klass->enumtype) { /* For enums, types [i] contains the base type */ type->type = MONO_TYPE_VALUETYPE; type->data.klass = mono_class_from_mono_type (sig->params [i]); } else type->data.klass = mono_class_from_mono_type (type); MONO_OBJECT_SETREF (param, DefaultValueImpl, mono_get_object_from_blob (domain, type, blobs [i])); /* Type in the Constant table is MONO_TYPE_CLASS for nulls */ if (types [i] != MONO_TYPE_CLASS && !param->DefaultValueImpl) { if (param->AttrsImpl & PARAM_ATTRIBUTE_OPTIONAL) MONO_OBJECT_SETREF (param, DefaultValueImpl, get_reflection_missing (domain, &missing)); else MONO_OBJECT_SETREF (param, DefaultValueImpl, get_dbnull (domain, &dbnull)); } } if (mspecs [i + 1]) MONO_OBJECT_SETREF (param, MarshalAsImpl, (MonoObject*)mono_reflection_marshal_from_marshal_spec (domain, method->klass, mspecs [i + 1])); mono_array_setref (res, i, param); } g_free (names); g_free (blobs); g_free (types); g_free (type); for (i = mono_method_signature (method)->param_count; i >= 0; i--) if (mspecs [i]) mono_metadata_free_marshal_spec (mspecs [i]); g_free (mspecs); CACHE_OBJECT (MonoArray *, &(method->signature), res, refclass); } MonoArray* mono_param_get_objects (MonoDomain *domain, MonoMethod *method) { return mono_param_get_objects_internal (domain, method, NULL); } /* * mono_method_body_get_object: * @domain: an app domain * @method: a method * * Return an System.Reflection.MethodBody object representing the method @method. */ MonoReflectionMethodBody* mono_method_body_get_object (MonoDomain *domain, MonoMethod *method) { static MonoClass *System_Reflection_MethodBody = NULL; static MonoClass *System_Reflection_LocalVariableInfo = NULL; static MonoClass *System_Reflection_ExceptionHandlingClause = NULL; MonoReflectionMethodBody *ret; MonoMethodHeader *header; MonoImage *image; guint32 method_rva, local_var_sig_token; char *ptr; unsigned char format, flags; int i; if (!System_Reflection_MethodBody) System_Reflection_MethodBody = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MethodBody"); if (!System_Reflection_LocalVariableInfo) System_Reflection_LocalVariableInfo = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "LocalVariableInfo"); if (!System_Reflection_ExceptionHandlingClause) System_Reflection_ExceptionHandlingClause = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "ExceptionHandlingClause"); CHECK_OBJECT (MonoReflectionMethodBody *, method, NULL); if ((method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) || (method->flags & METHOD_ATTRIBUTE_ABSTRACT) || (method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) || (method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME)) return NULL; image = method->klass->image; header = mono_method_get_header (method); if (!image->dynamic) { /* Obtain local vars signature token */ method_rva = mono_metadata_decode_row_col (&image->tables [MONO_TABLE_METHOD], mono_metadata_token_index (method->token) - 1, MONO_METHOD_RVA); ptr = mono_image_rva_map (image, method_rva); flags = *(const unsigned char *) ptr; format = flags & METHOD_HEADER_FORMAT_MASK; switch (format){ case METHOD_HEADER_TINY_FORMAT: local_var_sig_token = 0; break; case METHOD_HEADER_FAT_FORMAT: ptr += 2; ptr += 2; ptr += 4; local_var_sig_token = read32 (ptr); break; default: g_assert_not_reached (); } } else local_var_sig_token = 0; //FIXME ret = (MonoReflectionMethodBody*)mono_object_new (domain, System_Reflection_MethodBody); ret->init_locals = header->init_locals; ret->max_stack = header->max_stack; ret->local_var_sig_token = local_var_sig_token; MONO_OBJECT_SETREF (ret, il, mono_array_new_cached (domain, mono_defaults.byte_class, header->code_size)); memcpy (mono_array_addr (ret->il, guint8, 0), header->code, header->code_size); /* Locals */ MONO_OBJECT_SETREF (ret, locals, mono_array_new_cached (domain, System_Reflection_LocalVariableInfo, header->num_locals)); for (i = 0; i < header->num_locals; ++i) { MonoReflectionLocalVariableInfo *info = (MonoReflectionLocalVariableInfo*)mono_object_new (domain, System_Reflection_LocalVariableInfo); MONO_OBJECT_SETREF (info, local_type, mono_type_get_object (domain, header->locals [i])); info->is_pinned = header->locals [i]->pinned; info->local_index = i; mono_array_setref (ret->locals, i, info); } /* Exceptions */ MONO_OBJECT_SETREF (ret, clauses, mono_array_new_cached (domain, System_Reflection_ExceptionHandlingClause, header->num_clauses)); for (i = 0; i < header->num_clauses; ++i) { MonoReflectionExceptionHandlingClause *info = (MonoReflectionExceptionHandlingClause*)mono_object_new (domain, System_Reflection_ExceptionHandlingClause); MonoExceptionClause *clause = &header->clauses [i]; info->flags = clause->flags; info->try_offset = clause->try_offset; info->try_length = clause->try_len; info->handler_offset = clause->handler_offset; info->handler_length = clause->handler_len; if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER) info->filter_offset = clause->data.filter_offset; else if (clause->data.catch_class) MONO_OBJECT_SETREF (info, catch_type, mono_type_get_object (mono_domain_get (), &clause->data.catch_class->byval_arg)); mono_array_setref (ret->clauses, i, info); } mono_metadata_free_mh (header); CACHE_OBJECT (MonoReflectionMethodBody *, method, ret, NULL); return ret; } /** * mono_get_dbnull_object: * @domain: Domain where the object lives * * Returns the System.DBNull.Value singleton object * * Used as the value for ParameterInfo.DefaultValue */ MonoObject * mono_get_dbnull_object (MonoDomain *domain) { MonoObject *obj; static MonoClassField *dbnull_value_field = NULL; if (!dbnull_value_field) { MonoClass *dbnull_klass; dbnull_klass = mono_class_from_name (mono_defaults.corlib, "System", "DBNull"); mono_class_init (dbnull_klass); dbnull_value_field = mono_class_get_field_from_name (dbnull_klass, "Value"); g_assert (dbnull_value_field); } obj = mono_field_get_value_object (domain, dbnull_value_field, NULL); g_assert (obj); return obj; } static void get_default_param_value_blobs (MonoMethod *method, char **blobs, guint32 *types) { guint32 param_index, i, lastp, crow = 0; guint32 param_cols [MONO_PARAM_SIZE], const_cols [MONO_CONSTANT_SIZE]; gint32 idx; MonoClass *klass = method->klass; MonoImage *image = klass->image; MonoMethodSignature *methodsig = mono_method_signature (method); MonoTableInfo *constt; MonoTableInfo *methodt; MonoTableInfo *paramt; if (!methodsig->param_count) return; mono_class_init (klass); if (klass->image->dynamic) { MonoReflectionMethodAux *aux; if (method->is_inflated) method = ((MonoMethodInflated*)method)->declaring; aux = g_hash_table_lookup (((MonoDynamicImage*)method->klass->image)->method_aux_hash, method); if (aux && aux->param_defaults) { memcpy (blobs, &(aux->param_defaults [1]), methodsig->param_count * sizeof (char*)); memcpy (types, &(aux->param_default_types [1]), methodsig->param_count * sizeof (guint32)); } return; } methodt = &klass->image->tables [MONO_TABLE_METHOD]; paramt = &klass->image->tables [MONO_TABLE_PARAM]; constt = &image->tables [MONO_TABLE_CONSTANT]; idx = mono_method_get_index (method) - 1; g_assert (idx != -1); param_index = mono_metadata_decode_row_col (methodt, idx, MONO_METHOD_PARAMLIST); if (idx + 1 < methodt->rows) lastp = mono_metadata_decode_row_col (methodt, idx + 1, MONO_METHOD_PARAMLIST); else lastp = paramt->rows + 1; for (i = param_index; i < lastp; ++i) { guint32 paramseq; mono_metadata_decode_row (paramt, i - 1, param_cols, MONO_PARAM_SIZE); paramseq = param_cols [MONO_PARAM_SEQUENCE]; if (!(param_cols [MONO_PARAM_FLAGS] & PARAM_ATTRIBUTE_HAS_DEFAULT)) continue; crow = mono_metadata_get_constant_index (image, MONO_TOKEN_PARAM_DEF | i, crow + 1); if (!crow) { continue; } mono_metadata_decode_row (constt, crow - 1, const_cols, MONO_CONSTANT_SIZE); blobs [paramseq - 1] = (gpointer) mono_metadata_blob_heap (image, const_cols [MONO_CONSTANT_VALUE]); types [paramseq - 1] = const_cols [MONO_CONSTANT_TYPE]; } return; } MonoObject * mono_get_object_from_blob (MonoDomain *domain, MonoType *type, const char *blob) { void *retval; MonoClass *klass; MonoObject *object; MonoType *basetype = type; if (!blob) return NULL; klass = mono_class_from_mono_type (type); if (klass->valuetype) { object = mono_object_new (domain, klass); retval = ((gchar *) object + sizeof (MonoObject)); if (klass->enumtype) basetype = mono_class_enum_basetype (klass); } else { retval = &object; } if (!mono_get_constant_value_from_blob (domain, basetype->type, blob, retval)) return object; else return NULL; } static int assembly_name_to_aname (MonoAssemblyName *assembly, char *p) { int found_sep; char *s; gboolean quoted = FALSE; memset (assembly, 0, sizeof (MonoAssemblyName)); assembly->culture = ""; memset (assembly->public_key_token, 0, MONO_PUBLIC_KEY_TOKEN_LENGTH); if (*p == '"') { quoted = TRUE; p++; } assembly->name = p; while (*p && (isalnum (*p) || *p == '.' || *p == '-' || *p == '_' || *p == '$' || *p == '@' || g_ascii_isspace (*p))) p++; if (quoted) { if (*p != '"') return 1; *p = 0; p++; } if (*p != ',') return 1; *p = 0; /* Remove trailing whitespace */ s = p - 1; while (*s && g_ascii_isspace (*s)) *s-- = 0; p ++; while (g_ascii_isspace (*p)) p++; while (*p) { if (*p == 'V' && g_ascii_strncasecmp (p, "Version=", 8) == 0) { p += 8; assembly->major = strtoul (p, &s, 10); if (s == p || *s != '.') return 1; p = ++s; assembly->minor = strtoul (p, &s, 10); if (s == p || *s != '.') return 1; p = ++s; assembly->build = strtoul (p, &s, 10); if (s == p || *s != '.') return 1; p = ++s; assembly->revision = strtoul (p, &s, 10); if (s == p) return 1; p = s; } else if (*p == 'C' && g_ascii_strncasecmp (p, "Culture=", 8) == 0) { p += 8; if (g_ascii_strncasecmp (p, "neutral", 7) == 0) { assembly->culture = ""; p += 7; } else { assembly->culture = p; while (*p && *p != ',') { p++; } } } else if (*p == 'P' && g_ascii_strncasecmp (p, "PublicKeyToken=", 15) == 0) { p += 15; if (strncmp (p, "null", 4) == 0) { p += 4; } else { int len; gchar *start = p; while (*p && *p != ',') { p++; } len = (p - start + 1); if (len > MONO_PUBLIC_KEY_TOKEN_LENGTH) len = MONO_PUBLIC_KEY_TOKEN_LENGTH; g_strlcpy ((char*)assembly->public_key_token, start, len); } } else { while (*p && *p != ',') p++; } found_sep = 0; while (g_ascii_isspace (*p) || *p == ',') { *p++ = 0; found_sep = 1; continue; } /* failed */ if (!found_sep) return 1; } return 0; } /* * mono_reflection_parse_type: * @name: type name * * Parse a type name as accepted by the GetType () method and output the info * extracted in the info structure. * the name param will be mangled, so, make a copy before passing it to this function. * The fields in info will be valid until the memory pointed to by name is valid. * * See also mono_type_get_name () below. * * Returns: 0 on parse error. */ static int _mono_reflection_parse_type (char *name, char **endptr, gboolean is_recursed, MonoTypeNameParse *info) { char *start, *p, *w, *temp, *last_point, *startn; int in_modifiers = 0; int isbyref = 0, rank, arity = 0, i; start = p = w = name; //FIXME could we just zero the whole struct? memset (&info, 0, sizeof (MonoTypeNameParse)) memset (&info->assembly, 0, sizeof (MonoAssemblyName)); info->name = info->name_space = NULL; info->nested = NULL; info->modifiers = NULL; info->type_arguments = NULL; /* last_point separates the namespace from the name */ last_point = NULL; /* Skips spaces */ while (*p == ' ') p++, start++, w++, name++; while (*p) { switch (*p) { case '+': *p = 0; /* NULL terminate the name */ startn = p + 1; info->nested = g_list_append (info->nested, startn); /* we have parsed the nesting namespace + name */ if (info->name) break; if (last_point) { info->name_space = start; *last_point = 0; info->name = last_point + 1; } else { info->name_space = (char *)""; info->name = start; } break; case '.': last_point = p; break; case '\\': ++p; break; case '&': case '*': case '[': case ',': case ']': in_modifiers = 1; break; case '`': ++p; i = strtol (p, &temp, 10); arity += i; if (p == temp) return 0; p = temp-1; break; default: break; } if (in_modifiers) break; // *w++ = *p++; p++; } if (!info->name) { if (last_point) { info->name_space = start; *last_point = 0; info->name = last_point + 1; } else { info->name_space = (char *)""; info->name = start; } } while (*p) { switch (*p) { case '&': if (isbyref) /* only one level allowed by the spec */ return 0; isbyref = 1; info->modifiers = g_list_append (info->modifiers, GUINT_TO_POINTER (0)); *p++ = 0; break; case '*': info->modifiers = g_list_append (info->modifiers, GUINT_TO_POINTER (-1)); *p++ = 0; break; case '[': if (arity != 0) { *p++ = 0; info->type_arguments = g_ptr_array_new (); for (i = 0; i < arity; i++) { MonoTypeNameParse *subinfo = g_new0 (MonoTypeNameParse, 1); gboolean fqname = FALSE; g_ptr_array_add (info->type_arguments, subinfo); if (*p == '[') { p++; fqname = TRUE; } if (!_mono_reflection_parse_type (p, &p, TRUE, subinfo)) return 0; /*MS is lenient on [] delimited parameters that aren't fqn - and F# uses them.*/ if (fqname && (*p != ']')) { char *aname; if (*p != ',') return 0; *p++ = 0; aname = p; while (*p && (*p != ']')) p++; if (*p != ']') return 0; *p++ = 0; while (*aname) { if (g_ascii_isspace (*aname)) { ++aname; continue; } break; } if (!*aname || !assembly_name_to_aname (&subinfo->assembly, aname)) return 0; } else if (fqname && (*p == ']')) { *p++ = 0; } if (i + 1 < arity) { if (*p != ',') return 0; } else { if (*p != ']') return 0; } *p++ = 0; } arity = 0; break; } rank = 1; *p++ = 0; while (*p) { if (*p == ']') break; if (*p == ',') rank++; else if (*p == '*') /* '*' means unknown lower bound */ info->modifiers = g_list_append (info->modifiers, GUINT_TO_POINTER (-2)); else return 0; ++p; } if (*p++ != ']') return 0; info->modifiers = g_list_append (info->modifiers, GUINT_TO_POINTER (rank)); break; case ']': if (is_recursed) goto end; return 0; case ',': if (is_recursed) goto end; *p++ = 0; while (*p) { if (g_ascii_isspace (*p)) { ++p; continue; } break; } if (!*p) return 0; /* missing assembly name */ if (!assembly_name_to_aname (&info->assembly, p)) return 0; break; default: return 0; } if (info->assembly.name) break; } // *w = 0; /* terminate class name */ end: if (!info->name || !*info->name) return 0; if (endptr) *endptr = p; /* add other consistency checks */ return 1; } int mono_reflection_parse_type (char *name, MonoTypeNameParse *info) { return _mono_reflection_parse_type (name, NULL, FALSE, info); } static MonoType* _mono_reflection_get_type_from_info (MonoTypeNameParse *info, MonoImage *image, gboolean ignorecase) { gboolean type_resolve = FALSE; MonoType *type; MonoImage *rootimage = image; if (info->assembly.name) { MonoAssembly *assembly = mono_assembly_loaded (&info->assembly); if (!assembly && image && image->assembly && mono_assembly_names_equal (&info->assembly, &image->assembly->aname)) /* * This could happen in the AOT compiler case when the search hook is not * installed. */ assembly = image->assembly; if (!assembly) { /* then we must load the assembly ourselve - see #60439 */ assembly = mono_assembly_load (&info->assembly, NULL, NULL); if (!assembly) return NULL; } image = assembly->image; } else if (!image) { image = mono_defaults.corlib; } type = mono_reflection_get_type_with_rootimage (rootimage, image, info, ignorecase, &type_resolve); if (type == NULL && !info->assembly.name && image != mono_defaults.corlib) { image = mono_defaults.corlib; type = mono_reflection_get_type_with_rootimage (rootimage, image, info, ignorecase, &type_resolve); } return type; } static MonoType* mono_reflection_get_type_internal (MonoImage *rootimage, MonoImage* image, MonoTypeNameParse *info, gboolean ignorecase) { MonoClass *klass; GList *mod; int modval; gboolean bounded = FALSE; if (!image) image = mono_defaults.corlib; if (ignorecase) klass = mono_class_from_name_case (image, info->name_space, info->name); else klass = mono_class_from_name (image, info->name_space, info->name); if (!klass) return NULL; for (mod = info->nested; mod; mod = mod->next) { gpointer iter = NULL; MonoClass *parent; parent = klass; mono_class_init (parent); while ((klass = mono_class_get_nested_types (parent, &iter))) { if (ignorecase) { if (mono_utf8_strcasecmp (klass->name, mod->data) == 0) break; } else { if (strcmp (klass->name, mod->data) == 0) break; } } if (!klass) break; } if (!klass) return NULL; if (info->type_arguments) { MonoType **type_args = g_new0 (MonoType *, info->type_arguments->len); MonoReflectionType *the_type; MonoType *instance; int i; for (i = 0; i < info->type_arguments->len; i++) { MonoTypeNameParse *subinfo = g_ptr_array_index (info->type_arguments, i); type_args [i] = _mono_reflection_get_type_from_info (subinfo, rootimage, ignorecase); if (!type_args [i]) { g_free (type_args); return NULL; } } the_type = mono_type_get_object (mono_domain_get (), &klass->byval_arg); instance = mono_reflection_bind_generic_parameters ( the_type, info->type_arguments->len, type_args); g_free (type_args); if (!instance) return NULL; klass = mono_class_from_mono_type (instance); } for (mod = info->modifiers; mod; mod = mod->next) { modval = GPOINTER_TO_UINT (mod->data); if (!modval) { /* byref: must be last modifier */ return &klass->this_arg; } else if (modval == -1) { klass = mono_ptr_class_get (&klass->byval_arg); } else if (modval == -2) { bounded = TRUE; } else { /* array rank */ klass = mono_bounded_array_class_get (klass, modval, bounded); } } return &klass->byval_arg; } /* * mono_reflection_get_type: * @image: a metadata context * @info: type description structure * @ignorecase: flag for case-insensitive string compares * @type_resolve: whenever type resolve was already tried * * Build a MonoType from the type description in @info. * */ MonoType* mono_reflection_get_type (MonoImage* image, MonoTypeNameParse *info, gboolean ignorecase, gboolean *type_resolve) { return mono_reflection_get_type_with_rootimage(image, image, info, ignorecase, type_resolve); } static MonoType* mono_reflection_get_type_internal_dynamic (MonoImage *rootimage, MonoAssembly *assembly, MonoTypeNameParse *info, gboolean ignorecase) { MonoReflectionAssemblyBuilder *abuilder; MonoType *type; int i; g_assert (assembly->dynamic); abuilder = (MonoReflectionAssemblyBuilder*)mono_assembly_get_object (((MonoDynamicAssembly*)assembly)->domain, assembly); /* Enumerate all modules */ type = NULL; if (abuilder->modules) { for (i = 0; i < mono_array_length (abuilder->modules); ++i) { MonoReflectionModuleBuilder *mb = mono_array_get (abuilder->modules, MonoReflectionModuleBuilder*, i); type = mono_reflection_get_type_internal (rootimage, &mb->dynamic_image->image, info, ignorecase); if (type) break; } } if (!type && abuilder->loaded_modules) { for (i = 0; i < mono_array_length (abuilder->loaded_modules); ++i) { MonoReflectionModule *mod = mono_array_get (abuilder->loaded_modules, MonoReflectionModule*, i); type = mono_reflection_get_type_internal (rootimage, mod->image, info, ignorecase); if (type) break; } } return type; } MonoType* mono_reflection_get_type_with_rootimage (MonoImage *rootimage, MonoImage* image, MonoTypeNameParse *info, gboolean ignorecase, gboolean *type_resolve) { MonoType *type; MonoReflectionAssembly *assembly; GString *fullName; GList *mod; if (image && image->dynamic) type = mono_reflection_get_type_internal_dynamic (rootimage, image->assembly, info, ignorecase); else type = mono_reflection_get_type_internal (rootimage, image, info, ignorecase); if (type) return type; if (!mono_domain_has_type_resolve (mono_domain_get ())) return NULL; if (type_resolve) { if (*type_resolve) return NULL; else *type_resolve = TRUE; } /* Reconstruct the type name */ fullName = g_string_new (""); if (info->name_space && (info->name_space [0] != '\0')) g_string_printf (fullName, "%s.%s", info->name_space, info->name); else g_string_printf (fullName, "%s", info->name); for (mod = info->nested; mod; mod = mod->next) g_string_append_printf (fullName, "+%s", (char*)mod->data); assembly = mono_domain_try_type_resolve ( mono_domain_get (), fullName->str, NULL); if (assembly) { if (assembly->assembly->dynamic) type = mono_reflection_get_type_internal_dynamic (rootimage, assembly->assembly, info, ignorecase); else type = mono_reflection_get_type_internal (rootimage, assembly->assembly->image, info, ignorecase); } g_string_free (fullName, TRUE); return type; } void mono_reflection_free_type_info (MonoTypeNameParse *info) { g_list_free (info->modifiers); g_list_free (info->nested); if (info->type_arguments) { int i; for (i = 0; i < info->type_arguments->len; i++) { MonoTypeNameParse *subinfo = g_ptr_array_index (info->type_arguments, i); mono_reflection_free_type_info (subinfo); /*We free the subinfo since it is allocated by _mono_reflection_parse_type*/ g_free (subinfo); } g_ptr_array_free (info->type_arguments, TRUE); } } /* * mono_reflection_type_from_name: * @name: type name. * @image: a metadata context (can be NULL). * * Retrieves a MonoType from its @name. If the name is not fully qualified, * it defaults to get the type from @image or, if @image is NULL or loading * from it fails, uses corlib. * */ MonoType* mono_reflection_type_from_name (char *name, MonoImage *image) { MonoType *type = NULL; MonoTypeNameParse info; char *tmp; /* Make a copy since parse_type modifies its argument */ tmp = g_strdup (name); /*g_print ("requested type %s\n", str);*/ if (mono_reflection_parse_type (tmp, &info)) { type = _mono_reflection_get_type_from_info (&info, image, FALSE); } g_free (tmp); mono_reflection_free_type_info (&info); return type; } /* * mono_reflection_get_token: * * Return the metadata token of OBJ which should be an object * representing a metadata element. */ guint32 mono_reflection_get_token (MonoObject *obj) { MonoClass *klass; guint32 token = 0; klass = obj->vtable->klass; if (strcmp (klass->name, "MethodBuilder") == 0) { MonoReflectionMethodBuilder *mb = (MonoReflectionMethodBuilder *)obj; token = mb->table_idx | MONO_TOKEN_METHOD_DEF; } else if (strcmp (klass->name, "ConstructorBuilder") == 0) { MonoReflectionCtorBuilder *mb = (MonoReflectionCtorBuilder *)obj; token = mb->table_idx | MONO_TOKEN_METHOD_DEF; } else if (strcmp (klass->name, "FieldBuilder") == 0) { MonoReflectionFieldBuilder *fb = (MonoReflectionFieldBuilder *)obj; token = fb->table_idx | MONO_TOKEN_FIELD_DEF; } else if (strcmp (klass->name, "TypeBuilder") == 0) { MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder *)obj; token = tb->table_idx | MONO_TOKEN_TYPE_DEF; } else if (strcmp (klass->name, "MonoType") == 0) { MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType*)obj); MonoClass *mc = mono_class_from_mono_type (type); if (!mono_class_init (mc)) mono_raise_exception (mono_class_get_exception_for_failure (mc)); token = mc->type_token; } else if (strcmp (klass->name, "MonoCMethod") == 0 || strcmp (klass->name, "MonoMethod") == 0 || strcmp (klass->name, "MonoGenericMethod") == 0 || strcmp (klass->name, "MonoGenericCMethod") == 0) { MonoReflectionMethod *m = (MonoReflectionMethod *)obj; if (m->method->is_inflated) { MonoMethodInflated *inflated = (MonoMethodInflated *) m->method; return inflated->declaring->token; } else { token = m->method->token; } } else if (strcmp (klass->name, "MonoField") == 0) { MonoReflectionField *f = (MonoReflectionField*)obj; if (is_field_on_inst (f->field)) { MonoDynamicGenericClass *dgclass = (MonoDynamicGenericClass*)f->field->parent->generic_class; int field_index = f->field - dgclass->fields; MonoObject *obj; g_assert (field_index >= 0 && field_index < dgclass->count_fields); obj = dgclass->field_objects [field_index]; return mono_reflection_get_token (obj); } token = mono_class_get_field_token (f->field); } else if (strcmp (klass->name, "MonoProperty") == 0) { MonoReflectionProperty *p = (MonoReflectionProperty*)obj; token = mono_class_get_property_token (p->property); } else if (strcmp (klass->name, "MonoEvent") == 0) { MonoReflectionMonoEvent *p = (MonoReflectionMonoEvent*)obj; token = mono_class_get_event_token (p->event); } else if (strcmp (klass->name, "ParameterInfo") == 0) { MonoReflectionParameter *p = (MonoReflectionParameter*)obj; MonoClass *member_class = mono_object_class (p->MemberImpl); g_assert (mono_class_is_reflection_method_or_constructor (member_class)); token = mono_method_get_param_token (((MonoReflectionMethod*)p->MemberImpl)->method, p->PositionImpl); } else if (strcmp (klass->name, "Module") == 0 || strcmp (klass->name, "MonoModule") == 0) { MonoReflectionModule *m = (MonoReflectionModule*)obj; token = m->token; } else if (strcmp (klass->name, "Assembly") == 0 || strcmp (klass->name, "MonoAssembly") == 0) { token = mono_metadata_make_token (MONO_TABLE_ASSEMBLY, 1); } else { gchar *msg = g_strdup_printf ("MetadataToken is not supported for type '%s.%s'", klass->name_space, klass->name); MonoException *ex = mono_get_exception_not_implemented (msg); g_free (msg); mono_raise_exception (ex); } return token; } static void* load_cattr_value (MonoImage *image, MonoType *t, const char *p, const char **end) { int slen, type = t->type; MonoClass *tklass = t->data.klass; handle_enum: switch (type) { case MONO_TYPE_U1: case MONO_TYPE_I1: case MONO_TYPE_BOOLEAN: { MonoBoolean *bval = g_malloc (sizeof (MonoBoolean)); *bval = *p; *end = p + 1; return bval; } case MONO_TYPE_CHAR: case MONO_TYPE_U2: case MONO_TYPE_I2: { guint16 *val = g_malloc (sizeof (guint16)); *val = read16 (p); *end = p + 2; return val; } #if SIZEOF_VOID_P == 4 case MONO_TYPE_U: case MONO_TYPE_I: #endif case MONO_TYPE_R4: case MONO_TYPE_U4: case MONO_TYPE_I4: { guint32 *val = g_malloc (sizeof (guint32)); *val = read32 (p); *end = p + 4; return val; } #if SIZEOF_VOID_P == 8 case MONO_TYPE_U: /* error out instead? this should probably not happen */ case MONO_TYPE_I: #endif case MONO_TYPE_U8: case MONO_TYPE_I8: { guint64 *val = g_malloc (sizeof (guint64)); *val = read64 (p); *end = p + 8; return val; } case MONO_TYPE_R8: { double *val = g_malloc (sizeof (double)); readr8 (p, val); *end = p + 8; return val; } case MONO_TYPE_VALUETYPE: if (t->data.klass->enumtype) { type = mono_class_enum_basetype (t->data.klass)->type; goto handle_enum; } else { MonoClass *k = t->data.klass; if (mono_is_corlib_image (k->image) && strcmp (k->name_space, "System") == 0 && strcmp (k->name, "DateTime") == 0){ guint64 *val = g_malloc (sizeof (guint64)); *val = read64 (p); *end = p + 8; return val; } } g_error ("generic valutype %s not handled in custom attr value decoding", t->data.klass->name); break; case MONO_TYPE_STRING: if (*p == (char)0xFF) { *end = p + 1; return NULL; } slen = mono_metadata_decode_value (p, &p); *end = p + slen; return mono_string_new_len (mono_domain_get (), p, slen); case MONO_TYPE_CLASS: { char *n; MonoType *t; if (*p == (char)0xFF) { *end = p + 1; return NULL; } handle_type: slen = mono_metadata_decode_value (p, &p); n = g_memdup (p, slen + 1); n [slen] = 0; t = mono_reflection_type_from_name (n, image); if (!t) g_warning ("Cannot load type '%s'", n); g_free (n); *end = p + slen; if (t) return mono_type_get_object (mono_domain_get (), t); else return NULL; } case MONO_TYPE_OBJECT: { char subt = *p++; MonoObject *obj; MonoClass *subc = NULL; void *val; if (subt == 0x50) { goto handle_type; } else if (subt == 0x0E) { type = MONO_TYPE_STRING; goto handle_enum; } else if (subt == 0x1D) { MonoType simple_type = {{0}}; int etype = *p; p ++; if (etype == 0x51) /* See Partition II, Appendix B3 */ etype = MONO_TYPE_OBJECT; type = MONO_TYPE_SZARRAY; simple_type.type = etype; tklass = mono_class_from_mono_type (&simple_type); goto handle_enum; } else if (subt == 0x55) { char *n; MonoType *t; slen = mono_metadata_decode_value (p, &p); n = g_memdup (p, slen + 1); n [slen] = 0; t = mono_reflection_type_from_name (n, image); if (!t) g_error ("Cannot load type '%s'", n); g_free (n); p += slen; subc = mono_class_from_mono_type (t); } else if (subt >= MONO_TYPE_BOOLEAN && subt <= MONO_TYPE_R8) { MonoType simple_type = {{0}}; simple_type.type = subt; subc = mono_class_from_mono_type (&simple_type); } else { g_error ("Unknown type 0x%02x for object type encoding in custom attr", subt); } val = load_cattr_value (image, &subc->byval_arg, p, end); obj = mono_object_new (mono_domain_get (), subc); g_assert (!subc->has_references); memcpy ((char*)obj + sizeof (MonoObject), val, mono_class_value_size (subc, NULL)); g_free (val); return obj; } case MONO_TYPE_SZARRAY: { MonoArray *arr; guint32 i, alen, basetype; alen = read32 (p); p += 4; if (alen == 0xffffffff) { *end = p; return NULL; } arr = mono_array_new (mono_domain_get(), tklass, alen); basetype = tklass->byval_arg.type; if (basetype == MONO_TYPE_VALUETYPE && tklass->enumtype) basetype = mono_class_enum_basetype (tklass)->type; switch (basetype) { case MONO_TYPE_U1: case MONO_TYPE_I1: case MONO_TYPE_BOOLEAN: for (i = 0; i < alen; i++) { MonoBoolean val = *p++; mono_array_set (arr, MonoBoolean, i, val); } break; case MONO_TYPE_CHAR: case MONO_TYPE_U2: case MONO_TYPE_I2: for (i = 0; i < alen; i++) { guint16 val = read16 (p); mono_array_set (arr, guint16, i, val); p += 2; } break; case MONO_TYPE_R4: case MONO_TYPE_U4: case MONO_TYPE_I4: for (i = 0; i < alen; i++) { guint32 val = read32 (p); mono_array_set (arr, guint32, i, val); p += 4; } break; case MONO_TYPE_R8: for (i = 0; i < alen; i++) { double val; readr8 (p, &val); mono_array_set (arr, double, i, val); p += 8; } break; case MONO_TYPE_U8: case MONO_TYPE_I8: for (i = 0; i < alen; i++) { guint64 val = read64 (p); mono_array_set (arr, guint64, i, val); p += 8; } break; case MONO_TYPE_CLASS: case MONO_TYPE_OBJECT: case MONO_TYPE_STRING: for (i = 0; i < alen; i++) { MonoObject *item = load_cattr_value (image, &tklass->byval_arg, p, &p); mono_array_setref (arr, i, item); } break; default: g_error ("Type 0x%02x not handled in custom attr array decoding", basetype); } *end=p; return arr; } default: g_error ("Type 0x%02x not handled in custom attr value decoding", type); } return NULL; } static MonoObject* create_cattr_typed_arg (MonoType *t, MonoObject *val) { static MonoClass *klass; static MonoMethod *ctor; MonoObject *retval; void *params [2], *unboxed; if (!klass) klass = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "CustomAttributeTypedArgument"); if (!ctor) ctor = mono_class_get_method_from_name (klass, ".ctor", 2); params [0] = mono_type_get_object (mono_domain_get (), t); params [1] = val; retval = mono_object_new (mono_domain_get (), klass); unboxed = mono_object_unbox (retval); mono_runtime_invoke (ctor, unboxed, params, NULL); return retval; } static MonoObject* create_cattr_named_arg (void *minfo, MonoObject *typedarg) { static MonoClass *klass; static MonoMethod *ctor; MonoObject *retval; void *unboxed, *params [2]; if (!klass) klass = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "CustomAttributeNamedArgument"); if (!ctor) ctor = mono_class_get_method_from_name (klass, ".ctor", 2); params [0] = minfo; params [1] = typedarg; retval = mono_object_new (mono_domain_get (), klass); unboxed = mono_object_unbox (retval); mono_runtime_invoke (ctor, unboxed, params, NULL); return retval; } static gboolean type_is_reference (MonoType *type) { switch (type->type) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_CHAR: case MONO_TYPE_U: case MONO_TYPE_I: case MONO_TYPE_U1: case MONO_TYPE_I1: case MONO_TYPE_U2: case MONO_TYPE_I2: case MONO_TYPE_U4: case MONO_TYPE_I4: case MONO_TYPE_U8: case MONO_TYPE_I8: case MONO_TYPE_R8: case MONO_TYPE_R4: case MONO_TYPE_VALUETYPE: return FALSE; default: return TRUE; } } static void free_param_data (MonoMethodSignature *sig, void **params) { int i; for (i = 0; i < sig->param_count; ++i) { if (!type_is_reference (sig->params [i])) g_free (params [i]); } } /* * Find the field index in the metadata FieldDef table. */ static guint32 find_field_index (MonoClass *klass, MonoClassField *field) { int i; for (i = 0; i < klass->field.count; ++i) { if (field == &klass->fields [i]) return klass->field.first + 1 + i; } return 0; } /* * Find the property index in the metadata Property table. */ static guint32 find_property_index (MonoClass *klass, MonoProperty *property) { int i; for (i = 0; i < klass->ext->property.count; ++i) { if (property == &klass->ext->properties [i]) return klass->ext->property.first + 1 + i; } return 0; } /* * Find the event index in the metadata Event table. */ static guint32 find_event_index (MonoClass *klass, MonoEvent *event) { int i; for (i = 0; i < klass->ext->event.count; ++i) { if (event == &klass->ext->events [i]) return klass->ext->event.first + 1 + i; } return 0; } static MonoObject* create_custom_attr (MonoImage *image, MonoMethod *method, const guchar *data, guint32 len) { const char *p = (const char*)data; const char *named; guint32 i, j, num_named; MonoObject *attr; void *params_buf [32]; void **params; MonoMethodSignature *sig; mono_class_init (method->klass); if (!mono_verifier_verify_cattr_content (image, method, data, len, NULL)) return NULL; if (len == 0) { attr = mono_object_new (mono_domain_get (), method->klass); mono_runtime_invoke (method, attr, NULL, NULL); return attr; } if (len < 2 || read16 (p) != 0x0001) /* Prolog */ return NULL; /*g_print ("got attr %s\n", method->klass->name);*/ sig = mono_method_signature (method); if (sig->param_count < 32) params = params_buf; else /* Allocate using GC so it gets GC tracking */ params = mono_gc_alloc_fixed (sig->param_count * sizeof (void*), NULL); /* skip prolog */ p += 2; for (i = 0; i < mono_method_signature (method)->param_count; ++i) { params [i] = load_cattr_value (image, mono_method_signature (method)->params [i], p, &p); } named = p; attr = mono_object_new (mono_domain_get (), method->klass); mono_runtime_invoke (method, attr, params, NULL); free_param_data (method->signature, params); num_named = read16 (named); named += 2; for (j = 0; j < num_named; j++) { gint name_len; char *name, named_type, data_type; named_type = *named++; data_type = *named++; /* type of data */ if (data_type == MONO_TYPE_SZARRAY) data_type = *named++; if (data_type == MONO_TYPE_ENUM) { gint type_len; char *type_name; type_len = mono_metadata_decode_blob_size (named, &named); type_name = g_malloc (type_len + 1); memcpy (type_name, named, type_len); type_name [type_len] = 0; named += type_len; /* FIXME: lookup the type and check type consistency */ g_free (type_name); } name_len = mono_metadata_decode_blob_size (named, &named); name = g_malloc (name_len + 1); memcpy (name, named, name_len); name [name_len] = 0; named += name_len; if (named_type == 0x53) { MonoClassField *field = mono_class_get_field_from_name (mono_object_class (attr), name); void *val = load_cattr_value (image, field->type, named, &named); mono_field_set_value (attr, field, val); if (!type_is_reference (field->type)) g_free (val); } else if (named_type == 0x54) { MonoProperty *prop; void *pparams [1]; MonoType *prop_type; prop = mono_class_get_property_from_name (mono_object_class (attr), name); /* can we have more that 1 arg in a custom attr named property? */ prop_type = prop->get? mono_method_signature (prop->get)->ret : mono_method_signature (prop->set)->params [mono_method_signature (prop->set)->param_count - 1]; pparams [0] = load_cattr_value (image, prop_type, named, &named); mono_property_set_value (prop, attr, pparams, NULL); if (!type_is_reference (prop_type)) g_free (pparams [0]); } g_free (name); } if (params != params_buf) mono_gc_free_fixed (params); return attr; } /* * mono_reflection_create_custom_attr_data_args: * * Create an array of typed and named arguments from the cattr blob given by DATA. * TYPED_ARGS and NAMED_ARGS will contain the objects representing the arguments, * NAMED_ARG_INFO will contain information about the named arguments. */ void mono_reflection_create_custom_attr_data_args (MonoImage *image, MonoMethod *method, const guchar *data, guint32 len, MonoArray **typed_args, MonoArray **named_args, CattrNamedArg **named_arg_info) { MonoArray *typedargs, *namedargs; MonoClass *attrklass; MonoDomain *domain; const char *p = (const char*)data; const char *named; guint32 i, j, num_named; CattrNamedArg *arginfo = NULL; if (!mono_verifier_verify_cattr_content (image, method, data, len, NULL)) return; mono_class_init (method->klass); *typed_args = NULL; *named_args = NULL; *named_arg_info = NULL; domain = mono_domain_get (); if (len < 2 || read16 (p) != 0x0001) /* Prolog */ return; typedargs = mono_array_new (domain, mono_get_object_class (), mono_method_signature (method)->param_count); /* skip prolog */ p += 2; for (i = 0; i < mono_method_signature (method)->param_count; ++i) { MonoObject *obj; void *val; val = load_cattr_value (image, mono_method_signature (method)->params [i], p, &p); obj = type_is_reference (mono_method_signature (method)->params [i]) ? val : mono_value_box (domain, mono_class_from_mono_type (mono_method_signature (method)->params [i]), val); mono_array_setref (typedargs, i, obj); if (!type_is_reference (mono_method_signature (method)->params [i])) g_free (val); } named = p; num_named = read16 (named); namedargs = mono_array_new (domain, mono_get_object_class (), num_named); named += 2; attrklass = method->klass; arginfo = g_new0 (CattrNamedArg, num_named); *named_arg_info = arginfo; for (j = 0; j < num_named; j++) { gint name_len; char *name, named_type, data_type; named_type = *named++; data_type = *named++; /* type of data */ if (data_type == MONO_TYPE_SZARRAY) data_type = *named++; if (data_type == MONO_TYPE_ENUM) { gint type_len; char *type_name; type_len = mono_metadata_decode_blob_size (named, &named); type_name = g_malloc (type_len + 1); memcpy (type_name, named, type_len); type_name [type_len] = 0; named += type_len; /* FIXME: lookup the type and check type consistency */ g_free (type_name); } name_len = mono_metadata_decode_blob_size (named, &named); name = g_malloc (name_len + 1); memcpy (name, named, name_len); name [name_len] = 0; named += name_len; if (named_type == 0x53) { MonoObject *obj; MonoClassField *field = mono_class_get_field_from_name (attrklass, name); void *val; arginfo [j].type = field->type; arginfo [j].field = field; val = load_cattr_value (image, field->type, named, &named); obj = type_is_reference (field->type) ? val : mono_value_box (domain, mono_class_from_mono_type (field->type), val); mono_array_setref (namedargs, j, obj); if (!type_is_reference (field->type)) g_free (val); } else if (named_type == 0x54) { MonoObject *obj; MonoType *prop_type; MonoProperty *prop = mono_class_get_property_from_name (attrklass, name); void *val; prop_type = prop->get? mono_method_signature (prop->get)->ret : mono_method_signature (prop->set)->params [mono_method_signature (prop->set)->param_count - 1]; arginfo [j].type = prop_type; arginfo [j].prop = prop; val = load_cattr_value (image, prop_type, named, &named); obj = type_is_reference (prop_type) ? val : mono_value_box (domain, mono_class_from_mono_type (prop_type), val); mono_array_setref (namedargs, j, obj); if (!type_is_reference (prop_type)) g_free (val); } g_free (name); } *typed_args = typedargs; *named_args = namedargs; } void mono_reflection_resolve_custom_attribute_data (MonoReflectionMethod *ref_method, MonoReflectionAssembly *assembly, gpointer data, guint32 len, MonoArray **ctor_args, MonoArray **named_args) { MonoDomain *domain; MonoArray *typedargs, *namedargs; MonoImage *image; MonoMethod *method; CattrNamedArg *arginfo; int i; *ctor_args = NULL; *named_args = NULL; if (len == 0) return; image = assembly->assembly->image; method = ref_method->method; domain = mono_object_domain (ref_method); if (!mono_class_init (method->klass)) mono_raise_exception (mono_class_get_exception_for_failure (method->klass)); mono_reflection_create_custom_attr_data_args (image, method, data, len, &typedargs, &namedargs, &arginfo); if (mono_loader_get_last_error ()) mono_raise_exception (mono_loader_error_prepare_exception (mono_loader_get_last_error ())); if (!typedargs || !namedargs) return; for (i = 0; i < mono_method_signature (method)->param_count; ++i) { MonoObject *obj = mono_array_get (typedargs, MonoObject*, i); MonoObject *typedarg; typedarg = create_cattr_typed_arg (mono_method_signature (method)->params [i], obj); mono_array_setref (typedargs, i, typedarg); } for (i = 0; i < mono_array_length (namedargs); ++i) { MonoObject *obj = mono_array_get (namedargs, MonoObject*, i); MonoObject *typedarg, *namedarg, *minfo; if (arginfo [i].prop) minfo = (MonoObject*)mono_property_get_object (domain, NULL, arginfo [i].prop); else minfo = (MonoObject*)mono_field_get_object (domain, NULL, arginfo [i].field); typedarg = create_cattr_typed_arg (arginfo [i].type, obj); namedarg = create_cattr_named_arg (minfo, typedarg); mono_array_setref (namedargs, i, namedarg); } *ctor_args = typedargs; *named_args = namedargs; } static MonoObject* create_custom_attr_data (MonoImage *image, MonoCustomAttrEntry *cattr) { static MonoMethod *ctor; MonoDomain *domain; MonoObject *attr; void *params [4]; g_assert (image->assembly); if (!ctor) ctor = mono_class_get_method_from_name (mono_defaults.customattribute_data_class, ".ctor", 4); domain = mono_domain_get (); attr = mono_object_new (domain, mono_defaults.customattribute_data_class); params [0] = mono_method_get_object (domain, cattr->ctor, NULL); params [1] = mono_assembly_get_object (domain, image->assembly); params [2] = (gpointer)&cattr->data; params [3] = &cattr->data_size; mono_runtime_invoke (ctor, attr, params, NULL); return attr; } MonoArray* mono_custom_attrs_construct (MonoCustomAttrInfo *cinfo) { MonoArray *result; MonoObject *attr; int i; result = mono_array_new_cached (mono_domain_get (), mono_defaults.attribute_class, cinfo->num_attrs); for (i = 0; i < cinfo->num_attrs; ++i) { if (!cinfo->attrs [i].ctor) /* The cattr type is not finished yet */ /* We should include the type name but cinfo doesn't contain it */ mono_raise_exception (mono_get_exception_type_load (NULL, NULL)); attr = create_custom_attr (cinfo->image, cinfo->attrs [i].ctor, cinfo->attrs [i].data, cinfo->attrs [i].data_size); mono_array_setref (result, i, attr); } return result; } static MonoArray* mono_custom_attrs_construct_by_type (MonoCustomAttrInfo *cinfo, MonoClass *attr_klass) { MonoArray *result; MonoObject *attr; int i, n; n = 0; for (i = 0; i < cinfo->num_attrs; ++i) { if (mono_class_is_assignable_from (attr_klass, cinfo->attrs [i].ctor->klass)) n ++; } result = mono_array_new_cached (mono_domain_get (), mono_defaults.attribute_class, n); n = 0; for (i = 0; i < cinfo->num_attrs; ++i) { if (mono_class_is_assignable_from (attr_klass, cinfo->attrs [i].ctor->klass)) { attr = create_custom_attr (cinfo->image, cinfo->attrs [i].ctor, cinfo->attrs [i].data, cinfo->attrs [i].data_size); mono_array_setref (result, n, attr); n ++; } } return result; } static MonoArray* mono_custom_attrs_data_construct (MonoCustomAttrInfo *cinfo) { MonoArray *result; MonoObject *attr; int i; result = mono_array_new (mono_domain_get (), mono_defaults.customattribute_data_class, cinfo->num_attrs); for (i = 0; i < cinfo->num_attrs; ++i) { attr = create_custom_attr_data (cinfo->image, &cinfo->attrs [i]); mono_array_setref (result, i, attr); } return result; } /** * mono_custom_attrs_from_index: * * Returns: NULL if no attributes are found or if a loading error occurs. */ MonoCustomAttrInfo* mono_custom_attrs_from_index (MonoImage *image, guint32 idx) { guint32 mtoken, i, len; guint32 cols [MONO_CUSTOM_ATTR_SIZE]; MonoTableInfo *ca; MonoCustomAttrInfo *ainfo; GList *tmp, *list = NULL; const char *data; ca = &image->tables [MONO_TABLE_CUSTOMATTRIBUTE]; i = mono_metadata_custom_attrs_from_index (image, idx); if (!i) return NULL; i --; while (i < ca->rows) { if (mono_metadata_decode_row_col (ca, i, MONO_CUSTOM_ATTR_PARENT) != idx) break; list = g_list_prepend (list, GUINT_TO_POINTER (i)); ++i; } len = g_list_length (list); if (!len) return NULL; ainfo = g_malloc0 (MONO_SIZEOF_CUSTOM_ATTR_INFO + sizeof (MonoCustomAttrEntry) * len); ainfo->num_attrs = len; ainfo->image = image; for (i = 0, tmp = list; i < len; ++i, tmp = tmp->next) { mono_metadata_decode_row (ca, GPOINTER_TO_UINT (tmp->data), cols, MONO_CUSTOM_ATTR_SIZE); mtoken = cols [MONO_CUSTOM_ATTR_TYPE] >> MONO_CUSTOM_ATTR_TYPE_BITS; switch (cols [MONO_CUSTOM_ATTR_TYPE] & MONO_CUSTOM_ATTR_TYPE_MASK) { case MONO_CUSTOM_ATTR_TYPE_METHODDEF: mtoken |= MONO_TOKEN_METHOD_DEF; break; case MONO_CUSTOM_ATTR_TYPE_MEMBERREF: mtoken |= MONO_TOKEN_MEMBER_REF; break; default: g_error ("Unknown table for custom attr type %08x", cols [MONO_CUSTOM_ATTR_TYPE]); break; } ainfo->attrs [i].ctor = mono_get_method (image, mtoken, NULL); if (!ainfo->attrs [i].ctor) { g_warning ("Can't find custom attr constructor image: %s mtoken: 0x%08x", image->name, mtoken); g_list_free (list); g_free (ainfo); return NULL; } if (!mono_verifier_verify_cattr_blob (image, cols [MONO_CUSTOM_ATTR_VALUE], NULL)) { /*FIXME raising an exception here doesn't make any sense*/ g_warning ("Invalid custom attribute blob on image %s for index %x", image->name, idx); g_list_free (list); g_free (ainfo); return NULL; } data = mono_metadata_blob_heap (image, cols [MONO_CUSTOM_ATTR_VALUE]); ainfo->attrs [i].data_size = mono_metadata_decode_value (data, &data); ainfo->attrs [i].data = (guchar*)data; } g_list_free (list); return ainfo; } MonoCustomAttrInfo* mono_custom_attrs_from_method (MonoMethod *method) { guint32 idx; /* * An instantiated method has the same cattrs as the generic method definition. * * LAMESPEC: The .NET SRE throws an exception for instantiations of generic method builders * Note that this stanza is not necessary for non-SRE types, but it's a micro-optimization */ if (method->is_inflated) method = ((MonoMethodInflated *) method)->declaring; if (method->dynamic || method->klass->image->dynamic) return lookup_custom_attr (method->klass->image, method); if (!method->token) /* Synthetic methods */ return NULL; idx = mono_method_get_index (method); idx <<= MONO_CUSTOM_ATTR_BITS; idx |= MONO_CUSTOM_ATTR_METHODDEF; return mono_custom_attrs_from_index (method->klass->image, idx); } MonoCustomAttrInfo* mono_custom_attrs_from_class (MonoClass *klass) { guint32 idx; if (klass->generic_class) klass = klass->generic_class->container_class; if (klass->image->dynamic) return lookup_custom_attr (klass->image, klass); if (klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR) { idx = mono_metadata_token_index (klass->sizes.generic_param_token); idx <<= MONO_CUSTOM_ATTR_BITS; idx |= MONO_CUSTOM_ATTR_GENERICPAR; } else { idx = mono_metadata_token_index (klass->type_token); idx <<= MONO_CUSTOM_ATTR_BITS; idx |= MONO_CUSTOM_ATTR_TYPEDEF; } return mono_custom_attrs_from_index (klass->image, idx); } MonoCustomAttrInfo* mono_custom_attrs_from_assembly (MonoAssembly *assembly) { guint32 idx; if (assembly->image->dynamic) return lookup_custom_attr (assembly->image, assembly); idx = 1; /* there is only one assembly */ idx <<= MONO_CUSTOM_ATTR_BITS; idx |= MONO_CUSTOM_ATTR_ASSEMBLY; return mono_custom_attrs_from_index (assembly->image, idx); } static MonoCustomAttrInfo* mono_custom_attrs_from_module (MonoImage *image) { guint32 idx; if (image->dynamic) return lookup_custom_attr (image, image); idx = 1; /* there is only one module */ idx <<= MONO_CUSTOM_ATTR_BITS; idx |= MONO_CUSTOM_ATTR_MODULE; return mono_custom_attrs_from_index (image, idx); } MonoCustomAttrInfo* mono_custom_attrs_from_property (MonoClass *klass, MonoProperty *property) { guint32 idx; if (klass->image->dynamic) { property = mono_metadata_get_corresponding_property_from_generic_type_definition (property); return lookup_custom_attr (klass->image, property); } idx = find_property_index (klass, property); idx <<= MONO_CUSTOM_ATTR_BITS; idx |= MONO_CUSTOM_ATTR_PROPERTY; return mono_custom_attrs_from_index (klass->image, idx); } MonoCustomAttrInfo* mono_custom_attrs_from_event (MonoClass *klass, MonoEvent *event) { guint32 idx; if (klass->image->dynamic) { event = mono_metadata_get_corresponding_event_from_generic_type_definition (event); return lookup_custom_attr (klass->image, event); } idx = find_event_index (klass, event); idx <<= MONO_CUSTOM_ATTR_BITS; idx |= MONO_CUSTOM_ATTR_EVENT; return mono_custom_attrs_from_index (klass->image, idx); } MonoCustomAttrInfo* mono_custom_attrs_from_field (MonoClass *klass, MonoClassField *field) { guint32 idx; if (klass->image->dynamic) { field = mono_metadata_get_corresponding_field_from_generic_type_definition (field); return lookup_custom_attr (klass->image, field); } idx = find_field_index (klass, field); idx <<= MONO_CUSTOM_ATTR_BITS; idx |= MONO_CUSTOM_ATTR_FIELDDEF; return mono_custom_attrs_from_index (klass->image, idx); } MonoCustomAttrInfo* mono_custom_attrs_from_param (MonoMethod *method, guint32 param) { MonoTableInfo *ca; guint32 i, idx, method_index; guint32 param_list, param_last, param_pos, found; MonoImage *image; MonoReflectionMethodAux *aux; /* * An instantiated method has the same cattrs as the generic method definition. * * LAMESPEC: The .NET SRE throws an exception for instantiations of generic method builders * Note that this stanza is not necessary for non-SRE types, but it's a micro-optimization */ if (method->is_inflated) method = ((MonoMethodInflated *) method)->declaring; if (method->klass->image->dynamic) { MonoCustomAttrInfo *res, *ainfo; int size; aux = g_hash_table_lookup (((MonoDynamicImage*)method->klass->image)->method_aux_hash, method); if (!aux || !aux->param_cattr) return NULL; /* Need to copy since it will be freed later */ ainfo = aux->param_cattr [param]; if (!ainfo) return NULL; size = MONO_SIZEOF_CUSTOM_ATTR_INFO + sizeof (MonoCustomAttrEntry) * ainfo->num_attrs; res = g_malloc0 (size); memcpy (res, ainfo, size); return res; } image = method->klass->image; method_index = mono_method_get_index (method); if (!method_index) return NULL; ca = &image->tables [MONO_TABLE_METHOD]; param_list = mono_metadata_decode_row_col (ca, method_index - 1, MONO_METHOD_PARAMLIST); if (method_index == ca->rows) { ca = &image->tables [MONO_TABLE_PARAM]; param_last = ca->rows + 1; } else { param_last = mono_metadata_decode_row_col (ca, method_index, MONO_METHOD_PARAMLIST); ca = &image->tables [MONO_TABLE_PARAM]; } found = FALSE; for (i = param_list; i < param_last; ++i) { param_pos = mono_metadata_decode_row_col (ca, i - 1, MONO_PARAM_SEQUENCE); if (param_pos == param) { found = TRUE; break; } } if (!found) return NULL; idx = i; idx <<= MONO_CUSTOM_ATTR_BITS; idx |= MONO_CUSTOM_ATTR_PARAMDEF; return mono_custom_attrs_from_index (image, idx); } gboolean mono_custom_attrs_has_attr (MonoCustomAttrInfo *ainfo, MonoClass *attr_klass) { int i; MonoClass *klass; for (i = 0; i < ainfo->num_attrs; ++i) { klass = ainfo->attrs [i].ctor->klass; if (mono_class_has_parent (klass, attr_klass) || (MONO_CLASS_IS_INTERFACE (attr_klass) && mono_class_is_assignable_from (attr_klass, klass))) return TRUE; } return FALSE; } MonoObject* mono_custom_attrs_get_attr (MonoCustomAttrInfo *ainfo, MonoClass *attr_klass) { int i, attr_index; MonoClass *klass; MonoArray *attrs; attr_index = -1; for (i = 0; i < ainfo->num_attrs; ++i) { klass = ainfo->attrs [i].ctor->klass; if (mono_class_has_parent (klass, attr_klass)) { attr_index = i; break; } } if (attr_index == -1) return NULL; attrs = mono_custom_attrs_construct (ainfo); if (attrs) return mono_array_get (attrs, MonoObject*, attr_index); else return NULL; } /* * mono_reflection_get_custom_attrs_info: * @obj: a reflection object handle * * Return the custom attribute info for attributes defined for the * reflection handle @obj. The objects. * * FIXME this function leaks like a sieve for SRE objects. */ MonoCustomAttrInfo* mono_reflection_get_custom_attrs_info (MonoObject *obj) { MonoClass *klass; MonoCustomAttrInfo *cinfo = NULL; klass = obj->vtable->klass; if (klass == mono_defaults.monotype_class) { MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType *)obj); klass = mono_class_from_mono_type (type); /*We cannot mono_class_init the class from which we'll load the custom attributes since this must work with broken types.*/ cinfo = mono_custom_attrs_from_class (klass); } else if (strcmp ("Assembly", klass->name) == 0 || strcmp ("MonoAssembly", klass->name) == 0) { MonoReflectionAssembly *rassembly = (MonoReflectionAssembly*)obj; cinfo = mono_custom_attrs_from_assembly (rassembly->assembly); } else if (strcmp ("Module", klass->name) == 0 || strcmp ("MonoModule", klass->name) == 0) { MonoReflectionModule *module = (MonoReflectionModule*)obj; cinfo = mono_custom_attrs_from_module (module->image); } else if (strcmp ("MonoProperty", klass->name) == 0) { MonoReflectionProperty *rprop = (MonoReflectionProperty*)obj; cinfo = mono_custom_attrs_from_property (rprop->property->parent, rprop->property); } else if (strcmp ("MonoEvent", klass->name) == 0) { MonoReflectionMonoEvent *revent = (MonoReflectionMonoEvent*)obj; cinfo = mono_custom_attrs_from_event (revent->event->parent, revent->event); } else if (strcmp ("MonoField", klass->name) == 0) { MonoReflectionField *rfield = (MonoReflectionField*)obj; cinfo = mono_custom_attrs_from_field (rfield->field->parent, rfield->field); } else if ((strcmp ("MonoMethod", klass->name) == 0) || (strcmp ("MonoCMethod", klass->name) == 0)) { MonoReflectionMethod *rmethod = (MonoReflectionMethod*)obj; cinfo = mono_custom_attrs_from_method (rmethod->method); } else if ((strcmp ("MonoGenericMethod", klass->name) == 0) || (strcmp ("MonoGenericCMethod", klass->name) == 0)) { MonoReflectionMethod *rmethod = (MonoReflectionMethod*)obj; cinfo = mono_custom_attrs_from_method (rmethod->method); } else if (strcmp ("ParameterInfo", klass->name) == 0) { MonoReflectionParameter *param = (MonoReflectionParameter*)obj; MonoClass *member_class = mono_object_class (param->MemberImpl); if (mono_class_is_reflection_method_or_constructor (member_class)) { MonoReflectionMethod *rmethod = (MonoReflectionMethod*)param->MemberImpl; cinfo = mono_custom_attrs_from_param (rmethod->method, param->PositionImpl + 1); } else if (is_sr_mono_property (member_class)) { MonoReflectionProperty *prop = (MonoReflectionProperty *)param->MemberImpl; MonoMethod *method; if (!(method = prop->property->get)) method = prop->property->set; g_assert (method); cinfo = mono_custom_attrs_from_param (method, param->PositionImpl + 1); } #ifndef DISABLE_REFLECTION_EMIT else if (is_sre_method_on_tb_inst (member_class)) {/*XXX This is a workaround for Compiler Context*/ MonoMethod *method = mono_reflection_method_on_tb_inst_get_handle ((MonoReflectionMethodOnTypeBuilderInst*)param->MemberImpl); cinfo = mono_custom_attrs_from_param (method, param->PositionImpl + 1); } else if (is_sre_ctor_on_tb_inst (member_class)) { /*XX This is a workaround for Compiler Context*/ MonoReflectionCtorOnTypeBuilderInst *c = (MonoReflectionCtorOnTypeBuilderInst*)param->MemberImpl; MonoMethod *method = NULL; if (is_sre_ctor_builder (mono_object_class (c->cb))) method = ((MonoReflectionCtorBuilder *)c->cb)->mhandle; else if (is_sr_mono_cmethod (mono_object_class (c->cb))) method = ((MonoReflectionMethod *)c->cb)->method; else g_error ("mono_reflection_get_custom_attrs_info:: can't handle a CTBI with base_method of type %s", mono_type_get_full_name (member_class)); cinfo = mono_custom_attrs_from_param (method, param->PositionImpl + 1); } #endif else { char *type_name = mono_type_get_full_name (member_class); char *msg = g_strdup_printf ("Custom attributes on a ParamInfo with member %s are not supported", type_name); MonoException *ex = mono_get_exception_not_supported (msg); g_free (type_name); g_free (msg); mono_raise_exception (ex); } } else if (strcmp ("AssemblyBuilder", klass->name) == 0) { MonoReflectionAssemblyBuilder *assemblyb = (MonoReflectionAssemblyBuilder*)obj; cinfo = mono_custom_attrs_from_builders (NULL, assemblyb->assembly.assembly->image, assemblyb->cattrs); } else if (strcmp ("TypeBuilder", klass->name) == 0) { MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder*)obj; cinfo = mono_custom_attrs_from_builders (NULL, &tb->module->dynamic_image->image, tb->cattrs); } else if (strcmp ("ModuleBuilder", klass->name) == 0) { MonoReflectionModuleBuilder *mb = (MonoReflectionModuleBuilder*)obj; cinfo = mono_custom_attrs_from_builders (NULL, &mb->dynamic_image->image, mb->cattrs); } else if (strcmp ("ConstructorBuilder", klass->name) == 0) { MonoReflectionCtorBuilder *cb = (MonoReflectionCtorBuilder*)obj; cinfo = mono_custom_attrs_from_builders (NULL, cb->mhandle->klass->image, cb->cattrs); } else if (strcmp ("MethodBuilder", klass->name) == 0) { MonoReflectionMethodBuilder *mb = (MonoReflectionMethodBuilder*)obj; cinfo = mono_custom_attrs_from_builders (NULL, mb->mhandle->klass->image, mb->cattrs); } else if (strcmp ("FieldBuilder", klass->name) == 0) { MonoReflectionFieldBuilder *fb = (MonoReflectionFieldBuilder*)obj; cinfo = mono_custom_attrs_from_builders (NULL, &((MonoReflectionTypeBuilder*)fb->typeb)->module->dynamic_image->image, fb->cattrs); } else if (strcmp ("MonoGenericClass", klass->name) == 0) { MonoReflectionGenericClass *gclass = (MonoReflectionGenericClass*)obj; cinfo = mono_reflection_get_custom_attrs_info ((MonoObject*)gclass->generic_type); } else { /* handle other types here... */ g_error ("get custom attrs not yet supported for %s", klass->name); } return cinfo; } /* * mono_reflection_get_custom_attrs_by_type: * @obj: a reflection object handle * * Return an array with all the custom attributes defined of the * reflection handle @obj. If @attr_klass is non-NULL, only custom attributes * of that type are returned. The objects are fully build. Return NULL if a loading error * occurs. */ MonoArray* mono_reflection_get_custom_attrs_by_type (MonoObject *obj, MonoClass *attr_klass) { MonoArray *result; MonoCustomAttrInfo *cinfo; cinfo = mono_reflection_get_custom_attrs_info (obj); if (cinfo) { if (attr_klass) result = mono_custom_attrs_construct_by_type (cinfo, attr_klass); else result = mono_custom_attrs_construct (cinfo); if (!cinfo->cached) mono_custom_attrs_free (cinfo); } else { if (mono_loader_get_last_error ()) return NULL; result = mono_array_new_cached (mono_domain_get (), mono_defaults.attribute_class, 0); } return result; } /* * mono_reflection_get_custom_attrs: * @obj: a reflection object handle * * Return an array with all the custom attributes defined of the * reflection handle @obj. The objects are fully build. Return NULL if a loading error * occurs. */ MonoArray* mono_reflection_get_custom_attrs (MonoObject *obj) { return mono_reflection_get_custom_attrs_by_type (obj, NULL); } /* * mono_reflection_get_custom_attrs_data: * @obj: a reflection obj handle * * Returns an array of System.Reflection.CustomAttributeData, * which include information about attributes reflected on * types loaded using the Reflection Only methods */ MonoArray* mono_reflection_get_custom_attrs_data (MonoObject *obj) { MonoArray *result; MonoCustomAttrInfo *cinfo; cinfo = mono_reflection_get_custom_attrs_info (obj); if (cinfo) { result = mono_custom_attrs_data_construct (cinfo); if (!cinfo->cached) mono_custom_attrs_free (cinfo); } else result = mono_array_new (mono_domain_get (), mono_defaults.customattribute_data_class, 0); return result; } static MonoReflectionType* mono_reflection_type_get_underlying_system_type (MonoReflectionType* t) { static MonoMethod *method_get_underlying_system_type = NULL; MonoMethod *usertype_method; if (!method_get_underlying_system_type) method_get_underlying_system_type = mono_class_get_method_from_name (mono_defaults.systemtype_class, "get_UnderlyingSystemType", 0); usertype_method = mono_object_get_virtual_method ((MonoObject *) t, method_get_underlying_system_type); return (MonoReflectionType *) mono_runtime_invoke (usertype_method, t, NULL, NULL); } static gboolean is_corlib_type (MonoClass *class) { return class->image == mono_defaults.corlib; } #define check_corlib_type_cached(_class, _namespace, _name) do { \ static MonoClass *cached_class; \ if (cached_class) \ return cached_class == _class; \ if (is_corlib_type (_class) && !strcmp (_name, _class->name) && !strcmp (_namespace, _class->name_space)) { \ cached_class = _class; \ return TRUE; \ } \ return FALSE; \ } while (0) \ #ifndef DISABLE_REFLECTION_EMIT static gboolean is_sre_array (MonoClass *class) { check_corlib_type_cached (class, "System.Reflection.Emit", "ArrayType"); } static gboolean is_sre_byref (MonoClass *class) { check_corlib_type_cached (class, "System.Reflection.Emit", "ByRefType"); } static gboolean is_sre_pointer (MonoClass *class) { check_corlib_type_cached (class, "System.Reflection.Emit", "PointerType"); } static gboolean is_sre_generic_instance (MonoClass *class) { check_corlib_type_cached (class, "System.Reflection", "MonoGenericClass"); } static gboolean is_sre_type_builder (MonoClass *class) { check_corlib_type_cached (class, "System.Reflection.Emit", "TypeBuilder"); } static gboolean is_sre_method_builder (MonoClass *class) { check_corlib_type_cached (class, "System.Reflection.Emit", "MethodBuilder"); } static gboolean is_sre_ctor_builder (MonoClass *class) { check_corlib_type_cached (class, "System.Reflection.Emit", "ConstructorBuilder"); } static gboolean is_sre_field_builder (MonoClass *class) { check_corlib_type_cached (class, "System.Reflection.Emit", "FieldBuilder"); } static gboolean is_sre_method_on_tb_inst (MonoClass *class) { check_corlib_type_cached (class, "System.Reflection.Emit", "MethodOnTypeBuilderInst"); } static gboolean is_sre_ctor_on_tb_inst (MonoClass *class) { check_corlib_type_cached (class, "System.Reflection.Emit", "ConstructorOnTypeBuilderInst"); } MonoType* mono_reflection_type_get_handle (MonoReflectionType* ref) { MonoClass *class; if (!ref) return NULL; if (ref->type) return ref->type; if (is_usertype (ref)) { ref = mono_reflection_type_get_underlying_system_type (ref); if (ref == NULL || is_usertype (ref)) return NULL; if (ref->type) return ref->type; } class = mono_object_class (ref); if (is_sre_array (class)) { MonoType *res; MonoReflectionArrayType *sre_array = (MonoReflectionArrayType*)ref; MonoType *base = mono_reflection_type_get_handle (sre_array->element_type); g_assert (base); if (sre_array->rank == 0) //single dimentional array res = &mono_array_class_get (mono_class_from_mono_type (base), 1)->byval_arg; else res = &mono_bounded_array_class_get (mono_class_from_mono_type (base), sre_array->rank, TRUE)->byval_arg; sre_array->type.type = res; return res; } else if (is_sre_byref (class)) { MonoType *res; MonoReflectionDerivedType *sre_byref = (MonoReflectionDerivedType*)ref; MonoType *base = mono_reflection_type_get_handle (sre_byref->element_type); g_assert (base); res = &mono_class_from_mono_type (base)->this_arg; sre_byref->type.type = res; return res; } else if (is_sre_pointer (class)) { MonoType *res; MonoReflectionDerivedType *sre_pointer = (MonoReflectionDerivedType*)ref; MonoType *base = mono_reflection_type_get_handle (sre_pointer->element_type); g_assert (base); res = &mono_ptr_class_get (base)->byval_arg; sre_pointer->type.type = res; return res; } else if (is_sre_generic_instance (class)) { MonoType *res, **types; MonoReflectionGenericClass *gclass = (MonoReflectionGenericClass*)ref; int i, count; count = mono_array_length (gclass->type_arguments); types = g_new0 (MonoType*, count); for (i = 0; i < count; ++i) { MonoReflectionType *t = mono_array_get (gclass->type_arguments, gpointer, i); types [i] = mono_reflection_type_get_handle (t); if (!types[i]) { g_free (types); return NULL; } } res = mono_reflection_bind_generic_parameters (gclass->generic_type, count, types); g_free (types); g_assert (res); gclass->type.type = res; return res; } g_error ("Cannot handle corlib user type %s", mono_type_full_name (&mono_object_class(ref)->byval_arg)); return NULL; } void mono_reflection_create_unmanaged_type (MonoReflectionType *type) { mono_reflection_type_get_handle (type); } void mono_reflection_register_with_runtime (MonoReflectionType *type) { MonoType *res = mono_reflection_type_get_handle (type); MonoDomain *domain = mono_object_domain ((MonoObject*)type); MonoClass *class; if (!res) mono_raise_exception (mono_get_exception_argument (NULL, "Invalid generic instantiation, one or more arguments are not proper user types")); class = mono_class_from_mono_type (res); mono_loader_lock (); /*same locking as mono_type_get_object*/ mono_domain_lock (domain); if (!class->image->dynamic) { mono_class_setup_supertypes (class); } else { if (!domain->type_hash) domain->type_hash = mono_g_hash_table_new_type ((GHashFunc)mymono_metadata_type_hash, (GCompareFunc)mymono_metadata_type_equal, MONO_HASH_VALUE_GC); mono_g_hash_table_insert (domain->type_hash, res, type); } mono_domain_unlock (domain); mono_loader_unlock (); } /** * LOCKING: Assumes the loader lock is held. */ static MonoMethodSignature* parameters_to_signature (MonoImage *image, MonoArray *parameters) { MonoMethodSignature *sig; int count, i; count = parameters? mono_array_length (parameters): 0; sig = image_g_malloc0 (image, MONO_SIZEOF_METHOD_SIGNATURE + sizeof (MonoType*) * count); sig->param_count = count; sig->sentinelpos = -1; /* FIXME */ for (i = 0; i < count; ++i) sig->params [i] = mono_type_array_get_and_resolve (parameters, i); return sig; } /** * LOCKING: Assumes the loader lock is held. */ static MonoMethodSignature* ctor_builder_to_signature (MonoImage *image, MonoReflectionCtorBuilder *ctor) { MonoMethodSignature *sig; sig = parameters_to_signature (image, ctor->parameters); sig->hasthis = ctor->attrs & METHOD_ATTRIBUTE_STATIC? 0: 1; sig->ret = &mono_defaults.void_class->byval_arg; return sig; } /** * LOCKING: Assumes the loader lock is held. */ static MonoMethodSignature* method_builder_to_signature (MonoImage *image, MonoReflectionMethodBuilder *method) { MonoMethodSignature *sig; sig = parameters_to_signature (image, method->parameters); sig->hasthis = method->attrs & METHOD_ATTRIBUTE_STATIC? 0: 1; sig->ret = method->rtype? mono_reflection_type_get_handle ((MonoReflectionType*)method->rtype): &mono_defaults.void_class->byval_arg; sig->generic_param_count = method->generic_params ? mono_array_length (method->generic_params) : 0; return sig; } static MonoMethodSignature* dynamic_method_to_signature (MonoReflectionDynamicMethod *method) { MonoMethodSignature *sig; sig = parameters_to_signature (NULL, method->parameters); sig->hasthis = method->attrs & METHOD_ATTRIBUTE_STATIC? 0: 1; sig->ret = method->rtype? mono_reflection_type_get_handle (method->rtype): &mono_defaults.void_class->byval_arg; sig->generic_param_count = 0; return sig; } static void get_prop_name_and_type (MonoObject *prop, char **name, MonoType **type) { MonoClass *klass = mono_object_class (prop); if (strcmp (klass->name, "PropertyBuilder") == 0) { MonoReflectionPropertyBuilder *pb = (MonoReflectionPropertyBuilder *)prop; *name = mono_string_to_utf8 (pb->name); *type = mono_reflection_type_get_handle ((MonoReflectionType*)pb->type); } else { MonoReflectionProperty *p = (MonoReflectionProperty *)prop; *name = g_strdup (p->property->name); if (p->property->get) *type = mono_method_signature (p->property->get)->ret; else *type = mono_method_signature (p->property->set)->params [mono_method_signature (p->property->set)->param_count - 1]; } } static void get_field_name_and_type (MonoObject *field, char **name, MonoType **type) { MonoClass *klass = mono_object_class (field); if (strcmp (klass->name, "FieldBuilder") == 0) { MonoReflectionFieldBuilder *fb = (MonoReflectionFieldBuilder *)field; *name = mono_string_to_utf8 (fb->name); *type = mono_reflection_type_get_handle ((MonoReflectionType*)fb->type); } else { MonoReflectionField *f = (MonoReflectionField *)field; *name = g_strdup (mono_field_get_name (f->field)); *type = f->field->type; } } #else /* DISABLE_REFLECTION_EMIT */ void mono_reflection_register_with_runtime (MonoReflectionType *type) { /* This is empty */ } static gboolean is_sre_type_builder (MonoClass *class) { return FALSE; } static gboolean is_sre_generic_instance (MonoClass *class) { return FALSE; } static void init_type_builder_generics (MonoObject *type) { } #endif /* !DISABLE_REFLECTION_EMIT */ static gboolean is_sr_mono_field (MonoClass *class) { check_corlib_type_cached (class, "System.Reflection", "MonoField"); } static gboolean is_sr_mono_property (MonoClass *class) { check_corlib_type_cached (class, "System.Reflection", "MonoProperty"); } static gboolean is_sr_mono_method (MonoClass *class) { check_corlib_type_cached (class, "System.Reflection", "MonoMethod"); } static gboolean is_sr_mono_cmethod (MonoClass *class) { check_corlib_type_cached (class, "System.Reflection", "MonoCMethod"); } static gboolean is_sr_mono_generic_method (MonoClass *class) { check_corlib_type_cached (class, "System.Reflection", "MonoGenericMethod"); } static gboolean is_sr_mono_generic_cmethod (MonoClass *class) { check_corlib_type_cached (class, "System.Reflection", "MonoGenericCMethod"); } gboolean mono_class_is_reflection_method_or_constructor (MonoClass *class) { return is_sr_mono_method (class) || is_sr_mono_cmethod (class) || is_sr_mono_generic_method (class) || is_sr_mono_generic_cmethod (class); } static gboolean is_usertype (MonoReflectionType *ref) { MonoClass *class = mono_object_class (ref); return class->image != mono_defaults.corlib || strcmp ("TypeDelegator", class->name) == 0; } static MonoReflectionType* mono_reflection_type_resolve_user_types (MonoReflectionType *type) { if (!type || type->type) return type; if (is_usertype (type)) { type = mono_reflection_type_get_underlying_system_type (type); if (is_usertype (type)) mono_raise_exception (mono_get_exception_not_supported ("User defined subclasses of System.Type are not yet supported22")); } return type; } /* * Encode a value in a custom attribute stream of bytes. * The value to encode is either supplied as an object in argument val * (valuetypes are boxed), or as a pointer to the data in the * argument argval. * @type represents the type of the value * @buffer is the start of the buffer * @p the current position in the buffer * @buflen contains the size of the buffer and is used to return the new buffer size * if this needs to be realloced. * @retbuffer and @retp return the start and the position of the buffer */ static void encode_cattr_value (MonoAssembly *assembly, char *buffer, char *p, char **retbuffer, char **retp, guint32 *buflen, MonoType *type, MonoObject *arg, char *argval) { MonoTypeEnum simple_type; if ((p-buffer) + 10 >= *buflen) { char *newbuf; *buflen *= 2; newbuf = g_realloc (buffer, *buflen); p = newbuf + (p-buffer); buffer = newbuf; } if (!argval) argval = ((char*)arg + sizeof (MonoObject)); simple_type = type->type; handle_enum: switch (simple_type) { case MONO_TYPE_BOOLEAN: case MONO_TYPE_U1: case MONO_TYPE_I1: *p++ = *argval; break; case MONO_TYPE_CHAR: case MONO_TYPE_U2: case MONO_TYPE_I2: swap_with_size (p, argval, 2, 1); p += 2; break; case MONO_TYPE_U4: case MONO_TYPE_I4: case MONO_TYPE_R4: swap_with_size (p, argval, 4, 1); p += 4; break; case MONO_TYPE_R8: #if defined(ARM_FPU_FPA) && G_BYTE_ORDER == G_LITTLE_ENDIAN p [0] = argval [4]; p [1] = argval [5]; p [2] = argval [6]; p [3] = argval [7]; p [4] = argval [0]; p [5] = argval [1]; p [6] = argval [2]; p [7] = argval [3]; #else swap_with_size (p, argval, 8, 1); #endif p += 8; break; case MONO_TYPE_U8: case MONO_TYPE_I8: swap_with_size (p, argval, 8, 1); p += 8; break; case MONO_TYPE_VALUETYPE: if (type->data.klass->enumtype) { simple_type = mono_class_enum_basetype (type->data.klass)->type; goto handle_enum; } else { g_warning ("generic valutype %s not handled in custom attr value decoding", type->data.klass->name); } break; case MONO_TYPE_STRING: { char *str; guint32 slen; if (!arg) { *p++ = 0xFF; break; } str = mono_string_to_utf8 ((MonoString*)arg); slen = strlen (str); if ((p-buffer) + 10 + slen >= *buflen) { char *newbuf; *buflen *= 2; *buflen += slen; newbuf = g_realloc (buffer, *buflen); p = newbuf + (p-buffer); buffer = newbuf; } mono_metadata_encode_value (slen, p, &p); memcpy (p, str, slen); p += slen; g_free (str); break; } case MONO_TYPE_CLASS: { char *str; guint32 slen; if (!arg) { *p++ = 0xFF; break; } handle_type: str = type_get_qualified_name (mono_reflection_type_get_handle ((MonoReflectionType*)arg), NULL); slen = strlen (str); if ((p-buffer) + 10 + slen >= *buflen) { char *newbuf; *buflen *= 2; *buflen += slen; newbuf = g_realloc (buffer, *buflen); p = newbuf + (p-buffer); buffer = newbuf; } mono_metadata_encode_value (slen, p, &p); memcpy (p, str, slen); p += slen; g_free (str); break; } case MONO_TYPE_SZARRAY: { int len, i; MonoClass *eclass, *arg_eclass; if (!arg) { *p++ = 0xff; *p++ = 0xff; *p++ = 0xff; *p++ = 0xff; break; } len = mono_array_length ((MonoArray*)arg); *p++ = len & 0xff; *p++ = (len >> 8) & 0xff; *p++ = (len >> 16) & 0xff; *p++ = (len >> 24) & 0xff; *retp = p; *retbuffer = buffer; eclass = type->data.klass; arg_eclass = mono_object_class (arg)->element_class; if (!eclass) { /* Happens when we are called from the MONO_TYPE_OBJECT case below */ eclass = mono_defaults.object_class; } if (eclass == mono_defaults.object_class && arg_eclass->valuetype) { char *elptr = mono_array_addr ((MonoArray*)arg, char, 0); int elsize = mono_class_array_element_size (arg_eclass); for (i = 0; i < len; ++i) { encode_cattr_value (assembly, buffer, p, &buffer, &p, buflen, &arg_eclass->byval_arg, NULL, elptr); elptr += elsize; } } else if (eclass->valuetype && arg_eclass->valuetype) { char *elptr = mono_array_addr ((MonoArray*)arg, char, 0); int elsize = mono_class_array_element_size (eclass); for (i = 0; i < len; ++i) { encode_cattr_value (assembly, buffer, p, &buffer, &p, buflen, &eclass->byval_arg, NULL, elptr); elptr += elsize; } } else { for (i = 0; i < len; ++i) { encode_cattr_value (assembly, buffer, p, &buffer, &p, buflen, &eclass->byval_arg, mono_array_get ((MonoArray*)arg, MonoObject*, i), NULL); } } break; } case MONO_TYPE_OBJECT: { MonoClass *klass; char *str; guint32 slen; /* * The parameter type is 'object' but the type of the actual * argument is not. So we have to add type information to the blob * too. This is completely undocumented in the spec. */ if (arg == NULL) { *p++ = MONO_TYPE_STRING; // It's same hack as MS uses *p++ = 0xFF; break; } klass = mono_object_class (arg); if (mono_object_isinst (arg, mono_defaults.systemtype_class)) { *p++ = 0x50; goto handle_type; } else if (klass->enumtype) { *p++ = 0x55; } else if (klass == mono_defaults.string_class) { simple_type = MONO_TYPE_STRING; *p++ = 0x0E; goto handle_enum; } else if (klass->rank == 1) { *p++ = 0x1D; if (klass->element_class->byval_arg.type == MONO_TYPE_OBJECT) /* See Partition II, Appendix B3 */ *p++ = 0x51; else *p++ = klass->element_class->byval_arg.type; encode_cattr_value (assembly, buffer, p, &buffer, &p, buflen, &klass->byval_arg, arg, NULL); break; } else if (klass->byval_arg.type >= MONO_TYPE_BOOLEAN && klass->byval_arg.type <= MONO_TYPE_R8) { *p++ = simple_type = klass->byval_arg.type; goto handle_enum; } else { g_error ("unhandled type in custom attr"); } str = type_get_qualified_name (mono_class_get_type(klass), NULL); slen = strlen (str); if ((p-buffer) + 10 + slen >= *buflen) { char *newbuf; *buflen *= 2; *buflen += slen; newbuf = g_realloc (buffer, *buflen); p = newbuf + (p-buffer); buffer = newbuf; } mono_metadata_encode_value (slen, p, &p); memcpy (p, str, slen); p += slen; g_free (str); simple_type = mono_class_enum_basetype (klass)->type; goto handle_enum; } default: g_error ("type 0x%02x not yet supported in custom attr encoder", simple_type); } *retp = p; *retbuffer = buffer; } static void encode_field_or_prop_type (MonoType *type, char *p, char **retp) { if (type->type == MONO_TYPE_VALUETYPE && type->data.klass->enumtype) { char *str = type_get_qualified_name (type, NULL); int slen = strlen (str); *p++ = 0x55; /* * This seems to be optional... * *p++ = 0x80; */ mono_metadata_encode_value (slen, p, &p); memcpy (p, str, slen); p += slen; g_free (str); } else if (type->type == MONO_TYPE_OBJECT) { *p++ = 0x51; } else if (type->type == MONO_TYPE_CLASS) { /* it should be a type: encode_cattr_value () has the check */ *p++ = 0x50; } else { mono_metadata_encode_value (type->type, p, &p); if (type->type == MONO_TYPE_SZARRAY) /* See the examples in Partition VI, Annex B */ encode_field_or_prop_type (&type->data.klass->byval_arg, p, &p); } *retp = p; } #ifndef DISABLE_REFLECTION_EMIT static void encode_named_val (MonoReflectionAssembly *assembly, char *buffer, char *p, char **retbuffer, char **retp, guint32 *buflen, MonoType *type, char *name, MonoObject *value) { int len; /* Preallocate a large enough buffer */ if (type->type == MONO_TYPE_VALUETYPE && type->data.klass->enumtype) { char *str = type_get_qualified_name (type, NULL); len = strlen (str); g_free (str); } else if (type->type == MONO_TYPE_SZARRAY && type->data.klass->enumtype) { char *str = type_get_qualified_name (&type->data.klass->byval_arg, NULL); len = strlen (str); g_free (str); } else { len = 0; } len += strlen (name); if ((p-buffer) + 20 + len >= *buflen) { char *newbuf; *buflen *= 2; *buflen += len; newbuf = g_realloc (buffer, *buflen); p = newbuf + (p-buffer); buffer = newbuf; } encode_field_or_prop_type (type, p, &p); len = strlen (name); mono_metadata_encode_value (len, p, &p); memcpy (p, name, len); p += len; encode_cattr_value (assembly->assembly, buffer, p, &buffer, &p, buflen, type, value, NULL); *retp = p; *retbuffer = buffer; } /* * mono_reflection_get_custom_attrs_blob: * @ctor: custom attribute constructor * @ctorArgs: arguments o the constructor * @properties: * @propValues: * @fields: * @fieldValues: * * Creates the blob of data that needs to be saved in the metadata and that represents * the custom attributed described by @ctor, @ctorArgs etc. * Returns: a Byte array representing the blob of data. */ MonoArray* mono_reflection_get_custom_attrs_blob (MonoReflectionAssembly *assembly, MonoObject *ctor, MonoArray *ctorArgs, MonoArray *properties, MonoArray *propValues, MonoArray *fields, MonoArray* fieldValues) { MonoArray *result; MonoMethodSignature *sig; MonoObject *arg; char *buffer, *p; guint32 buflen, i; MONO_ARCH_SAVE_REGS; if (strcmp (ctor->vtable->klass->name, "MonoCMethod")) { /* sig is freed later so allocate it in the heap */ sig = ctor_builder_to_signature (NULL, (MonoReflectionCtorBuilder*)ctor); } else { sig = mono_method_signature (((MonoReflectionMethod*)ctor)->method); } g_assert (mono_array_length (ctorArgs) == sig->param_count); buflen = 256; p = buffer = g_malloc (buflen); /* write the prolog */ *p++ = 1; *p++ = 0; for (i = 0; i < sig->param_count; ++i) { arg = mono_array_get (ctorArgs, MonoObject*, i); encode_cattr_value (assembly->assembly, buffer, p, &buffer, &p, &buflen, sig->params [i], arg, NULL); } i = 0; if (properties) i += mono_array_length (properties); if (fields) i += mono_array_length (fields); *p++ = i & 0xff; *p++ = (i >> 8) & 0xff; if (properties) { MonoObject *prop; for (i = 0; i < mono_array_length (properties); ++i) { MonoType *ptype; char *pname; prop = mono_array_get (properties, gpointer, i); get_prop_name_and_type (prop, &pname, &ptype); *p++ = 0x54; /* PROPERTY signature */ encode_named_val (assembly, buffer, p, &buffer, &p, &buflen, ptype, pname, (MonoObject*)mono_array_get (propValues, gpointer, i)); g_free (pname); } } if (fields) { MonoObject *field; for (i = 0; i < mono_array_length (fields); ++i) { MonoType *ftype; char *fname; field = mono_array_get (fields, gpointer, i); get_field_name_and_type (field, &fname, &ftype); *p++ = 0x53; /* FIELD signature */ encode_named_val (assembly, buffer, p, &buffer, &p, &buflen, ftype, fname, (MonoObject*)mono_array_get (fieldValues, gpointer, i)); g_free (fname); } } g_assert (p - buffer <= buflen); buflen = p - buffer; result = mono_array_new (mono_domain_get (), mono_defaults.byte_class, buflen); p = mono_array_addr (result, char, 0); memcpy (p, buffer, buflen); g_free (buffer); if (strcmp (ctor->vtable->klass->name, "MonoCMethod")) g_free (sig); return result; } /* * mono_reflection_setup_internal_class: * @tb: a TypeBuilder object * * Creates a MonoClass that represents the TypeBuilder. * This is a trick that lets us simplify a lot of reflection code * (and will allow us to support Build and Run assemblies easier). */ void mono_reflection_setup_internal_class (MonoReflectionTypeBuilder *tb) { MonoError error; MonoClass *klass, *parent; MONO_ARCH_SAVE_REGS; RESOLVE_TYPE (tb->parent); mono_loader_lock (); if (tb->parent) { /* check so we can compile corlib correctly */ if (strcmp (mono_object_class (tb->parent)->name, "TypeBuilder") == 0) { /* mono_class_setup_mono_type () guaranteess type->data.klass is valid */ parent = mono_reflection_type_get_handle ((MonoReflectionType*)tb->parent)->data.klass; } else { parent = mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)tb->parent)); } } else { parent = NULL; } /* the type has already being created: it means we just have to change the parent */ if (tb->type.type) { klass = mono_class_from_mono_type (tb->type.type); klass->parent = NULL; /* fool mono_class_setup_parent */ klass->supertypes = NULL; mono_class_setup_parent (klass, parent); mono_class_setup_mono_type (klass); mono_loader_unlock (); return; } klass = mono_image_alloc0 (&tb->module->dynamic_image->image, sizeof (MonoClass)); klass->image = &tb->module->dynamic_image->image; klass->inited = 1; /* we lie to the runtime */ klass->name = mono_string_to_utf8_image (klass->image, tb->name, &error); if (!mono_error_ok (&error)) goto failure; klass->name_space = mono_string_to_utf8_image (klass->image, tb->nspace, &error); if (!mono_error_ok (&error)) goto failure; klass->type_token = MONO_TOKEN_TYPE_DEF | tb->table_idx; klass->flags = tb->attrs; mono_profiler_class_event (klass, MONO_PROFILE_START_LOAD); klass->element_class = klass; if (mono_class_get_ref_info (klass) == NULL) { mono_class_set_ref_info (klass, tb); /* Put into cache so mono_class_get () will find it. Skip nested types as those should not be available on the global scope. */ if (!tb->nesting_type) { mono_image_add_to_name_cache (klass->image, klass->name_space, klass->name, tb->table_idx); } else { klass->image->reflection_info_unregister_classes = g_slist_prepend (klass->image->reflection_info_unregister_classes, klass); } } else { g_assert (mono_class_get_ref_info (klass) == tb); } mono_g_hash_table_insert (tb->module->dynamic_image->tokens, GUINT_TO_POINTER (MONO_TOKEN_TYPE_DEF | tb->table_idx), tb); if (parent != NULL) { mono_class_setup_parent (klass, parent); } else if (strcmp (klass->name, "Object") == 0 && strcmp (klass->name_space, "System") == 0) { const char *old_n = klass->name; /* trick to get relative numbering right when compiling corlib */ klass->name = "BuildingObject"; mono_class_setup_parent (klass, mono_defaults.object_class); klass->name = old_n; } if ((!strcmp (klass->name, "ValueType") && !strcmp (klass->name_space, "System")) || (!strcmp (klass->name, "Object") && !strcmp (klass->name_space, "System")) || (!strcmp (klass->name, "Enum") && !strcmp (klass->name_space, "System"))) { klass->instance_size = sizeof (MonoObject); klass->size_inited = 1; mono_class_setup_vtable_general (klass, NULL, 0, NULL); } mono_class_setup_mono_type (klass); mono_class_setup_supertypes (klass); /* * FIXME: handle interfaces. */ tb->type.type = &klass->byval_arg; if (tb->nesting_type) { g_assert (tb->nesting_type->type); klass->nested_in = mono_class_from_mono_type (mono_reflection_type_get_handle (tb->nesting_type)); } /*g_print ("setup %s as %s (%p)\n", klass->name, ((MonoObject*)tb)->vtable->klass->name, tb);*/ mono_profiler_class_loaded (klass, MONO_PROFILE_OK); mono_loader_unlock (); return; failure: mono_loader_unlock (); mono_error_raise_exception (&error); } /* * mono_reflection_setup_generic_class: * @tb: a TypeBuilder object * * Setup the generic class before adding the first generic parameter. */ void mono_reflection_setup_generic_class (MonoReflectionTypeBuilder *tb) { } /* * mono_reflection_create_generic_class: * @tb: a TypeBuilder object * * Creates the generic class after all generic parameters have been added. */ void mono_reflection_create_generic_class (MonoReflectionTypeBuilder *tb) { MonoClass *klass; int count, i; MONO_ARCH_SAVE_REGS; klass = mono_class_from_mono_type (tb->type.type); count = tb->generic_params ? mono_array_length (tb->generic_params) : 0; if (klass->generic_container || (count == 0)) return; g_assert (tb->generic_container && (tb->generic_container->owner.klass == klass)); klass->generic_container = mono_image_alloc0 (klass->image, sizeof (MonoGenericContainer)); klass->generic_container->owner.klass = klass; klass->generic_container->type_argc = count; klass->generic_container->type_params = mono_image_alloc0 (klass->image, sizeof (MonoGenericParamFull) * count); klass->is_generic = 1; for (i = 0; i < count; i++) { MonoReflectionGenericParam *gparam = mono_array_get (tb->generic_params, gpointer, i); MonoGenericParamFull *param = (MonoGenericParamFull *) mono_reflection_type_get_handle ((MonoReflectionType*)gparam)->data.generic_param; klass->generic_container->type_params [i] = *param; /*Make sure we are a diferent type instance */ klass->generic_container->type_params [i].param.owner = klass->generic_container; klass->generic_container->type_params [i].info.pklass = NULL; klass->generic_container->type_params [i].info.flags = gparam->attrs; g_assert (klass->generic_container->type_params [i].param.owner); } klass->generic_container->context.class_inst = mono_get_shared_generic_inst (klass->generic_container); } /* * mono_reflection_create_internal_class: * @tb: a TypeBuilder object * * Actually create the MonoClass that is associated with the TypeBuilder. */ void mono_reflection_create_internal_class (MonoReflectionTypeBuilder *tb) { MonoClass *klass; MONO_ARCH_SAVE_REGS; klass = mono_class_from_mono_type (tb->type.type); mono_loader_lock (); if (klass->enumtype && mono_class_enum_basetype (klass) == NULL) { MonoReflectionFieldBuilder *fb; MonoClass *ec; MonoType *enum_basetype; g_assert (tb->fields != NULL); g_assert (mono_array_length (tb->fields) >= 1); fb = mono_array_get (tb->fields, MonoReflectionFieldBuilder*, 0); if (!mono_type_is_valid_enum_basetype (mono_reflection_type_get_handle ((MonoReflectionType*)fb->type))) { mono_loader_unlock (); return; } enum_basetype = mono_reflection_type_get_handle ((MonoReflectionType*)fb->type); klass->element_class = mono_class_from_mono_type (enum_basetype); if (!klass->element_class) klass->element_class = mono_class_from_mono_type (enum_basetype); /* * get the element_class from the current corlib. */ ec = default_class_from_mono_type (enum_basetype); klass->instance_size = ec->instance_size; klass->size_inited = 1; /* * this is almost safe to do with enums and it's needed to be able * to create objects of the enum type (for use in SetConstant). */ /* FIXME: Does this mean enums can't have method overrides ? */ mono_class_setup_vtable_general (klass, NULL, 0, NULL); } mono_loader_unlock (); } static MonoMarshalSpec* mono_marshal_spec_from_builder (MonoImage *image, MonoAssembly *assembly, MonoReflectionMarshal *minfo) { MonoMarshalSpec *res; res = image_g_new0 (image, MonoMarshalSpec, 1); res->native = minfo->type; switch (minfo->type) { case MONO_NATIVE_LPARRAY: res->data.array_data.elem_type = minfo->eltype; if (minfo->has_size) { res->data.array_data.param_num = minfo->param_num; res->data.array_data.num_elem = minfo->count; res->data.array_data.elem_mult = minfo->param_num == -1 ? 0 : 1; } else { res->data.array_data.param_num = -1; res->data.array_data.num_elem = -1; res->data.array_data.elem_mult = -1; } break; case MONO_NATIVE_BYVALTSTR: case MONO_NATIVE_BYVALARRAY: res->data.array_data.num_elem = minfo->count; break; case MONO_NATIVE_CUSTOM: if (minfo->marshaltyperef) res->data.custom_data.custom_name = type_get_fully_qualified_name (mono_reflection_type_get_handle ((MonoReflectionType*)minfo->marshaltyperef)); if (minfo->mcookie) res->data.custom_data.cookie = mono_string_to_utf8 (minfo->mcookie); break; default: break; } return res; } #endif /* !DISABLE_REFLECTION_EMIT */ MonoReflectionMarshal* mono_reflection_marshal_from_marshal_spec (MonoDomain *domain, MonoClass *klass, MonoMarshalSpec *spec) { static MonoClass *System_Reflection_Emit_UnmanagedMarshalClass; MonoReflectionMarshal *minfo; MonoType *mtype; if (!System_Reflection_Emit_UnmanagedMarshalClass) { System_Reflection_Emit_UnmanagedMarshalClass = mono_class_from_name ( mono_defaults.corlib, "System.Reflection.Emit", "UnmanagedMarshal"); g_assert (System_Reflection_Emit_UnmanagedMarshalClass); } minfo = (MonoReflectionMarshal*)mono_object_new (domain, System_Reflection_Emit_UnmanagedMarshalClass); minfo->type = spec->native; switch (minfo->type) { case MONO_NATIVE_LPARRAY: minfo->eltype = spec->data.array_data.elem_type; minfo->count = spec->data.array_data.num_elem; minfo->param_num = spec->data.array_data.param_num; break; case MONO_NATIVE_BYVALTSTR: case MONO_NATIVE_BYVALARRAY: minfo->count = spec->data.array_data.num_elem; break; case MONO_NATIVE_CUSTOM: if (spec->data.custom_data.custom_name) { mtype = mono_reflection_type_from_name (spec->data.custom_data.custom_name, klass->image); if (mtype) MONO_OBJECT_SETREF (minfo, marshaltyperef, mono_type_get_object (domain, mtype)); MONO_OBJECT_SETREF (minfo, marshaltype, mono_string_new (domain, spec->data.custom_data.custom_name)); } if (spec->data.custom_data.cookie) MONO_OBJECT_SETREF (minfo, mcookie, mono_string_new (domain, spec->data.custom_data.cookie)); break; default: break; } return minfo; } #ifndef DISABLE_REFLECTION_EMIT static MonoMethod* reflection_methodbuilder_to_mono_method (MonoClass *klass, ReflectionMethodBuilder *rmb, MonoMethodSignature *sig) { MonoError error; MonoMethod *m; MonoMethodWrapper *wrapperm; MonoMarshalSpec **specs; MonoReflectionMethodAux *method_aux; MonoImage *image; gboolean dynamic; int i; mono_error_init (&error); /* * Methods created using a MethodBuilder should have their memory allocated * inside the image mempool, while dynamic methods should have their memory * malloc'd. */ dynamic = rmb->refs != NULL; image = dynamic ? NULL : klass->image; if (!dynamic) g_assert (!klass->generic_class); mono_loader_lock (); if ((rmb->attrs & METHOD_ATTRIBUTE_PINVOKE_IMPL) || (rmb->iattrs & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL)) m = (MonoMethod *)image_g_new0 (image, MonoMethodPInvoke, 1); else m = (MonoMethod *)image_g_new0 (image, MonoMethodWrapper, 1); wrapperm = (MonoMethodWrapper*)m; m->dynamic = dynamic; m->slot = -1; m->flags = rmb->attrs; m->iflags = rmb->iattrs; m->name = mono_string_to_utf8_image (image, rmb->name, &error); g_assert (mono_error_ok (&error)); m->klass = klass; m->signature = sig; m->sre_method = TRUE; m->skip_visibility = rmb->skip_visibility; if (rmb->table_idx) m->token = MONO_TOKEN_METHOD_DEF | (*rmb->table_idx); if (m->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) { if (klass == mono_defaults.string_class && !strcmp (m->name, ".ctor")) m->string_ctor = 1; m->signature->pinvoke = 1; } else if (m->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) { m->signature->pinvoke = 1; method_aux = image_g_new0 (image, MonoReflectionMethodAux, 1); method_aux->dllentry = rmb->dllentry ? mono_string_to_utf8_image (image, rmb->dllentry, &error) : image_strdup (image, m->name); g_assert (mono_error_ok (&error)); method_aux->dll = mono_string_to_utf8_image (image, rmb->dll, &error); g_assert (mono_error_ok (&error)); ((MonoMethodPInvoke*)m)->piflags = (rmb->native_cc << 8) | (rmb->charset ? (rmb->charset - 1) * 2 : 0) | rmb->extra_flags; if (klass->image->dynamic) g_hash_table_insert (((MonoDynamicImage*)klass->image)->method_aux_hash, m, method_aux); mono_loader_unlock (); return m; } else if (!(m->flags & METHOD_ATTRIBUTE_ABSTRACT) && !(m->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME)) { MonoMethodHeader *header; guint32 code_size; gint32 max_stack, i; gint32 num_locals = 0; gint32 num_clauses = 0; guint8 *code; if (rmb->ilgen) { code = mono_array_addr (rmb->ilgen->code, guint8, 0); code_size = rmb->ilgen->code_len; max_stack = rmb->ilgen->max_stack; num_locals = rmb->ilgen->locals ? mono_array_length (rmb->ilgen->locals) : 0; if (rmb->ilgen->ex_handlers) num_clauses = method_count_clauses (rmb->ilgen); } else { if (rmb->code) { code = mono_array_addr (rmb->code, guint8, 0); code_size = mono_array_length (rmb->code); /* we probably need to run a verifier on the code... */ max_stack = 8; } else { code = NULL; code_size = 0; max_stack = 8; } } header = image_g_malloc0 (image, MONO_SIZEOF_METHOD_HEADER + num_locals * sizeof (MonoType*)); header->code_size = code_size; header->code = image_g_malloc (image, code_size); memcpy ((char*)header->code, code, code_size); header->max_stack = max_stack; header->init_locals = rmb->init_locals; header->num_locals = num_locals; for (i = 0; i < num_locals; ++i) { MonoReflectionLocalBuilder *lb = mono_array_get (rmb->ilgen->locals, MonoReflectionLocalBuilder*, i); header->locals [i] = image_g_new0 (image, MonoType, 1); memcpy (header->locals [i], mono_reflection_type_get_handle ((MonoReflectionType*)lb->type), MONO_SIZEOF_TYPE); } header->num_clauses = num_clauses; if (num_clauses) { header->clauses = method_encode_clauses (image, (MonoDynamicImage*)klass->image, rmb->ilgen, num_clauses); } wrapperm->header = header; } if (rmb->generic_params) { int count = mono_array_length (rmb->generic_params); MonoGenericContainer *container = rmb->generic_container; g_assert (container); container->type_argc = count; container->type_params = image_g_new0 (image, MonoGenericParamFull, count); container->owner.method = m; m->is_generic = TRUE; mono_method_set_generic_container (m, container); for (i = 0; i < count; i++) { MonoReflectionGenericParam *gp = mono_array_get (rmb->generic_params, MonoReflectionGenericParam*, i); MonoGenericParamFull *param = (MonoGenericParamFull *) mono_reflection_type_get_handle ((MonoReflectionType*)gp)->data.generic_param; container->type_params [i] = *param; } /* * The method signature might have pointers to generic parameters that belong to other methods. * This is a valid SRE case, but the resulting method signature must be encoded using the proper * generic parameters. */ for (i = 0; i < m->signature->param_count; ++i) { MonoType *t = m->signature->params [i]; if (t->type == MONO_TYPE_MVAR) { MonoGenericParam *gparam = t->data.generic_param; if (gparam->num < count) { m->signature->params [i] = mono_metadata_type_dup (image, m->signature->params [i]); m->signature->params [i]->data.generic_param = mono_generic_container_get_param (container, gparam->num); } } } if (klass->generic_container) { container->parent = klass->generic_container; container->context.class_inst = klass->generic_container->context.class_inst; } container->context.method_inst = mono_get_shared_generic_inst (container); } if (rmb->refs) { MonoMethodWrapper *mw = (MonoMethodWrapper*)m; int i; void **data; m->wrapper_type = MONO_WRAPPER_DYNAMIC_METHOD; mw->method_data = data = image_g_new (image, gpointer, rmb->nrefs + 1); data [0] = GUINT_TO_POINTER (rmb->nrefs); for (i = 0; i < rmb->nrefs; ++i) data [i + 1] = rmb->refs [i]; } method_aux = NULL; /* Parameter info */ if (rmb->pinfo) { if (!method_aux) method_aux = image_g_new0 (image, MonoReflectionMethodAux, 1); method_aux->param_names = image_g_new0 (image, char *, mono_method_signature (m)->param_count + 1); for (i = 0; i <= m->signature->param_count; ++i) { MonoReflectionParamBuilder *pb; if ((pb = mono_array_get (rmb->pinfo, MonoReflectionParamBuilder*, i))) { if ((i > 0) && (pb->attrs)) { /* Make a copy since it might point to a shared type structure */ m->signature->params [i - 1] = mono_metadata_type_dup (klass->image, m->signature->params [i - 1]); m->signature->params [i - 1]->attrs = pb->attrs; } if (pb->attrs & PARAM_ATTRIBUTE_HAS_DEFAULT) { MonoDynamicImage *assembly; guint32 idx, def_type, len; char *p; const char *p2; if (!method_aux->param_defaults) { method_aux->param_defaults = image_g_new0 (image, guint8*, m->signature->param_count + 1); method_aux->param_default_types = image_g_new0 (image, guint32, m->signature->param_count + 1); } assembly = (MonoDynamicImage*)klass->image; idx = encode_constant (assembly, pb->def_value, &def_type); /* Copy the data from the blob since it might get realloc-ed */ p = assembly->blob.data + idx; len = mono_metadata_decode_blob_size (p, &p2); len += p2 - p; method_aux->param_defaults [i] = image_g_malloc (image, len); method_aux->param_default_types [i] = def_type; memcpy ((gpointer)method_aux->param_defaults [i], p, len); } if (pb->name) { method_aux->param_names [i] = mono_string_to_utf8_image (image, pb->name, &error); g_assert (mono_error_ok (&error)); } if (pb->cattrs) { if (!method_aux->param_cattr) method_aux->param_cattr = image_g_new0 (image, MonoCustomAttrInfo*, m->signature->param_count + 1); method_aux->param_cattr [i] = mono_custom_attrs_from_builders (image, klass->image, pb->cattrs); } } } } /* Parameter marshalling */ specs = NULL; if (rmb->pinfo) for (i = 0; i < mono_array_length (rmb->pinfo); ++i) { MonoReflectionParamBuilder *pb; if ((pb = mono_array_get (rmb->pinfo, MonoReflectionParamBuilder*, i))) { if (pb->marshal_info) { if (specs == NULL) specs = image_g_new0 (image, MonoMarshalSpec*, sig->param_count + 1); specs [pb->position] = mono_marshal_spec_from_builder (image, klass->image->assembly, pb->marshal_info); } } } if (specs != NULL) { if (!method_aux) method_aux = image_g_new0 (image, MonoReflectionMethodAux, 1); method_aux->param_marshall = specs; } if (klass->image->dynamic && method_aux) g_hash_table_insert (((MonoDynamicImage*)klass->image)->method_aux_hash, m, method_aux); mono_loader_unlock (); return m; } static MonoMethod* ctorbuilder_to_mono_method (MonoClass *klass, MonoReflectionCtorBuilder* mb) { ReflectionMethodBuilder rmb; MonoMethodSignature *sig; mono_loader_lock (); sig = ctor_builder_to_signature (klass->image, mb); mono_loader_unlock (); reflection_methodbuilder_from_ctor_builder (&rmb, mb); mb->mhandle = reflection_methodbuilder_to_mono_method (klass, &rmb, sig); mono_save_custom_attrs (klass->image, mb->mhandle, mb->cattrs); /* If we are in a generic class, we might be called multiple times from inflate_method */ if (!((MonoDynamicImage*)(MonoDynamicImage*)klass->image)->save && !klass->generic_container) { /* ilgen is no longer needed */ mb->ilgen = NULL; } return mb->mhandle; } static MonoMethod* methodbuilder_to_mono_method (MonoClass *klass, MonoReflectionMethodBuilder* mb) { ReflectionMethodBuilder rmb; MonoMethodSignature *sig; mono_loader_lock (); sig = method_builder_to_signature (klass->image, mb); mono_loader_unlock (); reflection_methodbuilder_from_method_builder (&rmb, mb); mb->mhandle = reflection_methodbuilder_to_mono_method (klass, &rmb, sig); mono_save_custom_attrs (klass->image, mb->mhandle, mb->cattrs); /* If we are in a generic class, we might be called multiple times from inflate_method */ if (!((MonoDynamicImage*)(MonoDynamicImage*)klass->image)->save && !klass->generic_container) { /* ilgen is no longer needed */ mb->ilgen = NULL; } return mb->mhandle; } static MonoClassField* fieldbuilder_to_mono_class_field (MonoClass *klass, MonoReflectionFieldBuilder* fb) { MonoClassField *field; MonoType *custom; field = g_new0 (MonoClassField, 1); field->name = mono_string_to_utf8 (fb->name); if (fb->attrs || fb->modreq || fb->modopt) { field->type = mono_metadata_type_dup (NULL, mono_reflection_type_get_handle ((MonoReflectionType*)fb->type)); field->type->attrs = fb->attrs; g_assert (klass->image->dynamic); custom = add_custom_modifiers ((MonoDynamicImage*)klass->image, field->type, fb->modreq, fb->modopt); g_free (field->type); field->type = custom; } else { field->type = mono_reflection_type_get_handle ((MonoReflectionType*)fb->type); } if (fb->offset != -1) field->offset = fb->offset; field->parent = klass; mono_save_custom_attrs (klass->image, field, fb->cattrs); // FIXME: Can't store fb->def_value/RVA, is it needed for field_on_insts ? return field; } #endif MonoType* mono_reflection_bind_generic_parameters (MonoReflectionType *type, int type_argc, MonoType **types) { MonoClass *klass; MonoReflectionTypeBuilder *tb = NULL; gboolean is_dynamic = FALSE; MonoDomain *domain; MonoClass *geninst; mono_loader_lock (); domain = mono_object_domain (type); if (is_sre_type_builder (mono_object_class (type))) { tb = (MonoReflectionTypeBuilder *) type; is_dynamic = TRUE; } else if (is_sre_generic_instance (mono_object_class (type))) { MonoReflectionGenericClass *rgi = (MonoReflectionGenericClass *) type; MonoReflectionType *gtd = rgi->generic_type; if (is_sre_type_builder (mono_object_class (gtd))) { tb = (MonoReflectionTypeBuilder *)gtd; is_dynamic = TRUE; } } /* FIXME: fix the CreateGenericParameters protocol to avoid the two stage setup of TypeBuilders */ if (tb && tb->generic_container) mono_reflection_create_generic_class (tb); klass = mono_class_from_mono_type (mono_reflection_type_get_handle (type)); if (!klass->generic_container) { mono_loader_unlock (); return NULL; } if (klass->wastypebuilder) { tb = (MonoReflectionTypeBuilder *) mono_class_get_ref_info (klass); is_dynamic = TRUE; } mono_loader_unlock (); geninst = mono_class_bind_generic_parameters (klass, type_argc, types, is_dynamic); return &geninst->byval_arg; } MonoClass* mono_class_bind_generic_parameters (MonoClass *klass, int type_argc, MonoType **types, gboolean is_dynamic) { MonoGenericClass *gclass; MonoGenericInst *inst; g_assert (klass->generic_container); inst = mono_metadata_get_generic_inst (type_argc, types); gclass = mono_metadata_lookup_generic_class (klass, inst, is_dynamic); return mono_generic_class_get_class (gclass); } MonoReflectionMethod* mono_reflection_bind_generic_method_parameters (MonoReflectionMethod *rmethod, MonoArray *types) { MonoClass *klass; MonoMethod *method, *inflated; MonoMethodInflated *imethod; MonoGenericContext tmp_context; MonoGenericInst *ginst; MonoType **type_argv; int count, i; MONO_ARCH_SAVE_REGS; /*FIXME but this no longer should happen*/ if (!strcmp (rmethod->object.vtable->klass->name, "MethodBuilder")) { #ifndef DISABLE_REFLECTION_EMIT MonoReflectionMethodBuilder *mb = NULL; MonoReflectionTypeBuilder *tb; MonoClass *klass; mb = (MonoReflectionMethodBuilder *) rmethod; tb = (MonoReflectionTypeBuilder *) mb->type; klass = mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)tb)); method = methodbuilder_to_mono_method (klass, mb); #else g_assert_not_reached (); method = NULL; #endif } else { method = rmethod->method; } klass = method->klass; if (method->is_inflated) method = ((MonoMethodInflated *) method)->declaring; count = mono_method_signature (method)->generic_param_count; if (count != mono_array_length (types)) return NULL; type_argv = g_new0 (MonoType *, count); for (i = 0; i < count; i++) { MonoReflectionType *garg = mono_array_get (types, gpointer, i); type_argv [i] = mono_reflection_type_get_handle (garg); } ginst = mono_metadata_get_generic_inst (count, type_argv); g_free (type_argv); tmp_context.class_inst = klass->generic_class ? klass->generic_class->context.class_inst : NULL; tmp_context.method_inst = ginst; inflated = mono_class_inflate_generic_method (method, &tmp_context); imethod = (MonoMethodInflated *) inflated; /*FIXME but I think this is no longer necessary*/ if (method->klass->image->dynamic) { MonoDynamicImage *image = (MonoDynamicImage*)method->klass->image; /* * This table maps metadata structures representing inflated methods/fields * to the reflection objects representing their generic definitions. */ mono_loader_lock (); mono_g_hash_table_insert (image->generic_def_objects, imethod, rmethod); mono_loader_unlock (); } if (!mono_verifier_is_method_valid_generic_instantiation (inflated)) mono_raise_exception (mono_get_exception_argument ("typeArguments", "Invalid generic arguments")); return mono_method_get_object (mono_object_domain (rmethod), inflated, NULL); } #ifndef DISABLE_REFLECTION_EMIT static MonoMethod * inflate_mono_method (MonoClass *klass, MonoMethod *method, MonoObject *obj) { MonoMethodInflated *imethod; MonoGenericContext *context; int i; /* * With generic code sharing the klass might not be inflated. * This can happen because classes inflated with their own * type arguments are "normalized" to the uninflated class. */ if (!klass->generic_class) return method; context = mono_class_get_context (klass); if (klass->method.count && klass->methods) { /* Find the already created inflated method */ for (i = 0; i < klass->method.count; ++i) { g_assert (klass->methods [i]->is_inflated); if (((MonoMethodInflated*)klass->methods [i])->declaring == method) break; } g_assert (i < klass->method.count); imethod = (MonoMethodInflated*)klass->methods [i]; } else { imethod = (MonoMethodInflated *) mono_class_inflate_generic_method_full (method, klass, context); } if (method->is_generic && method->klass->image->dynamic) { MonoDynamicImage *image = (MonoDynamicImage*)method->klass->image; mono_loader_lock (); mono_g_hash_table_insert (image->generic_def_objects, imethod, obj); mono_loader_unlock (); } return (MonoMethod *) imethod; } static MonoMethod * inflate_method (MonoReflectionType *type, MonoObject *obj) { MonoMethod *method; MonoClass *gklass; MonoClass *type_class = mono_object_class (type); if (is_sre_generic_instance (type_class)) { MonoReflectionGenericClass *mgc = (MonoReflectionGenericClass*)type; gklass = mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)mgc->generic_type)); } else if (is_sre_type_builder (type_class)) { gklass = mono_class_from_mono_type (mono_reflection_type_get_handle (type)); } else if (type->type) { gklass = mono_class_from_mono_type (type->type); gklass = mono_class_get_generic_type_definition (gklass); } else { g_error ("Can't handle type %s", mono_type_get_full_name (mono_object_class (type))); } if (!strcmp (obj->vtable->klass->name, "MethodBuilder")) if (((MonoReflectionMethodBuilder*)obj)->mhandle) method = ((MonoReflectionMethodBuilder*)obj)->mhandle; else method = methodbuilder_to_mono_method (gklass, (MonoReflectionMethodBuilder *) obj); else if (!strcmp (obj->vtable->klass->name, "ConstructorBuilder")) method = ctorbuilder_to_mono_method (gklass, (MonoReflectionCtorBuilder *) obj); else if (!strcmp (obj->vtable->klass->name, "MonoMethod") || !strcmp (obj->vtable->klass->name, "MonoCMethod")) method = ((MonoReflectionMethod *) obj)->method; else { method = NULL; /* prevent compiler warning */ g_error ("can't handle type %s", obj->vtable->klass->name); } return inflate_mono_method (mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)type)), method, obj); } /*TODO avoid saving custom attrs for generic classes as it's enough to have them on the generic type definition.*/ void mono_reflection_generic_class_initialize (MonoReflectionGenericClass *type, MonoArray *methods, MonoArray *ctors, MonoArray *fields, MonoArray *properties, MonoArray *events) { MonoGenericClass *gclass; MonoDynamicGenericClass *dgclass; MonoClass *klass, *gklass; MonoType *gtype; int i; MONO_ARCH_SAVE_REGS; gtype = mono_reflection_type_get_handle ((MonoReflectionType*)type); klass = mono_class_from_mono_type (gtype); g_assert (gtype->type == MONO_TYPE_GENERICINST); gclass = gtype->data.generic_class; if (!gclass->is_dynamic) return; dgclass = (MonoDynamicGenericClass *) gclass; if (dgclass->initialized) return; gklass = gclass->container_class; mono_class_init (gklass); dgclass->count_methods = methods ? mono_array_length (methods) : 0; dgclass->count_ctors = ctors ? mono_array_length (ctors) : 0; dgclass->count_fields = fields ? mono_array_length (fields) : 0; dgclass->methods = mono_image_set_new0 (gclass->owner, MonoMethod *, dgclass->count_methods); dgclass->ctors = mono_image_set_new0 (gclass->owner, MonoMethod *, dgclass->count_ctors); dgclass->fields = mono_image_set_new0 (gclass->owner, MonoClassField, dgclass->count_fields); dgclass->field_objects = mono_image_set_new0 (gclass->owner, MonoObject*, dgclass->count_fields); dgclass->field_generic_types = mono_image_set_new0 (gclass->owner, MonoType*, dgclass->count_fields); for (i = 0; i < dgclass->count_methods; i++) { MonoObject *obj = mono_array_get (methods, gpointer, i); dgclass->methods [i] = inflate_method ((MonoReflectionType*)type, obj); } for (i = 0; i < dgclass->count_ctors; i++) { MonoObject *obj = mono_array_get (ctors, gpointer, i); dgclass->ctors [i] = inflate_method ((MonoReflectionType*)type, obj); } for (i = 0; i < dgclass->count_fields; i++) { MonoObject *obj = mono_array_get (fields, gpointer, i); MonoClassField *field, *inflated_field = NULL; if (!strcmp (obj->vtable->klass->name, "FieldBuilder")) inflated_field = field = fieldbuilder_to_mono_class_field (klass, (MonoReflectionFieldBuilder *) obj); else if (!strcmp (obj->vtable->klass->name, "MonoField")) field = ((MonoReflectionField *) obj)->field; else { field = NULL; /* prevent compiler warning */ g_assert_not_reached (); } dgclass->fields [i] = *field; dgclass->fields [i].parent = klass; dgclass->fields [i].type = mono_class_inflate_generic_type ( field->type, mono_generic_class_get_context ((MonoGenericClass *) dgclass)); dgclass->field_generic_types [i] = field->type; MOVING_GC_REGISTER (&dgclass->field_objects [i]); dgclass->field_objects [i] = obj; if (inflated_field) { g_free (inflated_field); } else { dgclass->fields [i].name = mono_image_set_strdup (gclass->owner, dgclass->fields [i].name); } } dgclass->initialized = TRUE; } void mono_reflection_free_dynamic_generic_class (MonoGenericClass *gclass) { MonoDynamicGenericClass *dgclass; int i; g_assert (gclass->is_dynamic); dgclass = (MonoDynamicGenericClass *)gclass; for (i = 0; i < dgclass->count_fields; ++i) { MonoClassField *field = dgclass->fields + i; mono_metadata_free_type (field->type); #if HAVE_SGEN_GC MONO_GC_UNREGISTER_ROOT (dgclass->field_objects [i]); #endif } } static void fix_partial_generic_class (MonoClass *klass) { MonoClass *gklass = klass->generic_class->container_class; MonoDynamicGenericClass *dgclass; int i; if (klass->wastypebuilder) return; dgclass = (MonoDynamicGenericClass *) klass->generic_class; if (klass->parent != gklass->parent) { MonoError error; MonoType *parent_type = mono_class_inflate_generic_type_checked (&gklass->parent->byval_arg, &klass->generic_class->context, &error); if (mono_error_ok (&error)) { MonoClass *parent = mono_class_from_mono_type (parent_type); mono_metadata_free_type (parent_type); if (parent != klass->parent) { /*fool mono_class_setup_parent*/ klass->supertypes = NULL; mono_class_setup_parent (klass, parent); } } else { mono_class_set_failure (klass, MONO_EXCEPTION_TYPE_LOAD, NULL); mono_error_cleanup (&error); if (gklass->wastypebuilder) klass->wastypebuilder = TRUE; return; } } if (!dgclass->initialized) return; if (klass->method.count != gklass->method.count) { klass->method.count = gklass->method.count; klass->methods = mono_image_alloc (klass->image, sizeof (MonoMethod*) * (klass->method.count + 1)); for (i = 0; i < klass->method.count; i++) { klass->methods [i] = mono_class_inflate_generic_method_full ( gklass->methods [i], klass, mono_class_get_context (klass)); } } if (klass->interface_count && klass->interface_count != gklass->interface_count) { klass->interface_count = gklass->interface_count; klass->interfaces = mono_image_alloc (klass->image, sizeof (MonoClass*) * gklass->interface_count); klass->interfaces_packed = NULL; /*make setup_interface_offsets happy*/ for (i = 0; i < gklass->interface_count; ++i) { MonoType *iface_type = mono_class_inflate_generic_type (&gklass->interfaces [i]->byval_arg, mono_class_get_context (klass)); klass->interfaces [i] = mono_class_from_mono_type (iface_type); mono_metadata_free_type (iface_type); ensure_runtime_vtable (klass->interfaces [i]); } klass->interfaces_inited = 1; } if (klass->field.count != gklass->field.count) { klass->field.count = gklass->field.count; klass->fields = image_g_new0 (klass->image, MonoClassField, klass->field.count); for (i = 0; i < klass->field.count; i++) { klass->fields [i] = gklass->fields [i]; klass->fields [i].parent = klass; klass->fields [i].type = mono_class_inflate_generic_type (gklass->fields [i].type, mono_class_get_context (klass)); } } /*We can only finish with this klass once it's parent has as well*/ if (gklass->wastypebuilder) klass->wastypebuilder = TRUE; return; } static void ensure_generic_class_runtime_vtable (MonoClass *klass) { MonoClass *gklass = klass->generic_class->container_class; ensure_runtime_vtable (gklass); fix_partial_generic_class (klass); } static void ensure_runtime_vtable (MonoClass *klass) { MonoReflectionTypeBuilder *tb = mono_class_get_ref_info (klass); int i, num, j; if (!klass->image->dynamic || (!tb && !klass->generic_class) || klass->wastypebuilder) return; if (klass->parent) ensure_runtime_vtable (klass->parent); if (tb) { num = tb->ctors? mono_array_length (tb->ctors): 0; num += tb->num_methods; klass->method.count = num; klass->methods = mono_image_alloc (klass->image, sizeof (MonoMethod*) * num); num = tb->ctors? mono_array_length (tb->ctors): 0; for (i = 0; i < num; ++i) klass->methods [i] = ctorbuilder_to_mono_method (klass, mono_array_get (tb->ctors, MonoReflectionCtorBuilder*, i)); num = tb->num_methods; j = i; for (i = 0; i < num; ++i) klass->methods [j++] = methodbuilder_to_mono_method (klass, mono_array_get (tb->methods, MonoReflectionMethodBuilder*, i)); if (tb->interfaces) { klass->interface_count = mono_array_length (tb->interfaces); klass->interfaces = mono_image_alloc (klass->image, sizeof (MonoClass*) * klass->interface_count); for (i = 0; i < klass->interface_count; ++i) { MonoType *iface = mono_type_array_get_and_resolve (tb->interfaces, i); klass->interfaces [i] = mono_class_from_mono_type (iface); ensure_runtime_vtable (klass->interfaces [i]); } klass->interfaces_inited = 1; } } else if (klass->generic_class){ ensure_generic_class_runtime_vtable (klass); } if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) { int slot_num = 0; for (i = 0; i < klass->method.count; ++i) { MonoMethod *im = klass->methods [i]; if (!(im->flags & METHOD_ATTRIBUTE_STATIC)) im->slot = slot_num++; } klass->interfaces_packed = NULL; /*make setup_interface_offsets happy*/ mono_class_setup_interface_offsets (klass); mono_class_setup_interface_id (klass); } /* * The generic vtable is needed even if image->run is not set since some * runtime code like ves_icall_Type_GetMethodsByName depends on * method->slot being defined. */ /* * tb->methods could not be freed since it is used for determining * overrides during dynamic vtable construction. */ } static MonoMethod* mono_reflection_method_get_handle (MonoObject *method) { MonoClass *class = mono_object_class (method); if (is_sr_mono_method (class) || is_sr_mono_generic_method (class)) { MonoReflectionMethod *sr_method = (MonoReflectionMethod*)method; return sr_method->method; } if (is_sre_method_builder (class)) { MonoReflectionMethodBuilder *mb = (MonoReflectionMethodBuilder*)method; return mb->mhandle; } if (is_sre_method_on_tb_inst (class)) { MonoReflectionMethodOnTypeBuilderInst *m = (MonoReflectionMethodOnTypeBuilderInst*)method; MonoMethod *result; /*FIXME move this to a proper method and unify with resolve_object*/ if (m->method_args) { result = mono_reflection_method_on_tb_inst_get_handle (m); } else { MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType*)m->inst); MonoClass *inflated_klass = mono_class_from_mono_type (type); MonoMethod *mono_method; if (is_sre_method_builder (mono_object_class (m->mb))) mono_method = ((MonoReflectionMethodBuilder *)m->mb)->mhandle; else if (is_sr_mono_method (mono_object_class (m->mb))) mono_method = ((MonoReflectionMethod *)m->mb)->method; else g_error ("resolve_object:: can't handle a MTBI with base_method of type %s", mono_type_get_full_name (mono_object_class (m->mb))); result = inflate_mono_method (inflated_klass, mono_method, (MonoObject*)m->mb); } return result; } g_error ("Can't handle methods of type %s:%s", class->name_space, class->name); return NULL; } void mono_reflection_get_dynamic_overrides (MonoClass *klass, MonoMethod ***overrides, int *num_overrides) { MonoReflectionTypeBuilder *tb; int i, onum; *overrides = NULL; *num_overrides = 0; g_assert (klass->image->dynamic); if (!mono_class_get_ref_info (klass)) return; g_assert (strcmp (((MonoObject*)mono_class_get_ref_info (klass))->vtable->klass->name, "TypeBuilder") == 0); tb = (MonoReflectionTypeBuilder*)mono_class_get_ref_info (klass); onum = 0; if (tb->methods) { for (i = 0; i < tb->num_methods; ++i) { MonoReflectionMethodBuilder *mb = mono_array_get (tb->methods, MonoReflectionMethodBuilder*, i); if (mb->override_method) onum ++; } } if (onum) { *overrides = g_new0 (MonoMethod*, onum * 2); onum = 0; for (i = 0; i < tb->num_methods; ++i) { MonoReflectionMethodBuilder *mb = mono_array_get (tb->methods, MonoReflectionMethodBuilder*, i); if (mb->override_method) { (*overrides) [onum * 2] = mono_reflection_method_get_handle ((MonoObject *)mb->override_method); (*overrides) [onum * 2 + 1] = mb->mhandle; g_assert (mb->mhandle); onum ++; } } } *num_overrides = onum; } static void typebuilder_setup_fields (MonoClass *klass, MonoError *error) { MonoReflectionTypeBuilder *tb = mono_class_get_ref_info (klass); MonoReflectionFieldBuilder *fb; MonoClassField *field; MonoImage *image = klass->image; const char *p, *p2; int i; guint32 len, idx, real_size = 0; klass->field.count = tb->num_fields; klass->field.first = 0; mono_error_init (error); if (tb->class_size) { g_assert ((tb->packing_size & 0xfffffff0) == 0); klass->packing_size = tb->packing_size; real_size = klass->instance_size + tb->class_size; } if (!klass->field.count) { klass->instance_size = MAX (klass->instance_size, real_size); return; } klass->fields = image_g_new0 (image, MonoClassField, klass->field.count); mono_class_alloc_ext (klass); klass->ext->field_def_values = image_g_new0 (image, MonoFieldDefaultValue, klass->field.count); /* This is, guess what, a hack. The issue is that the runtime doesn't know how to setup the fields of a typebuider and crash. On the static path no field class is resolved, only types are built. This is the right thing to do but we suck. Setting size_inited is harmless because we're doing the same job as mono_class_setup_fields anyway. */ klass->size_inited = 1; for (i = 0; i < klass->field.count; ++i) { fb = mono_array_get (tb->fields, gpointer, i); field = &klass->fields [i]; field->name = mono_string_to_utf8_image (image, fb->name, error); if (!mono_error_ok (error)) return; if (fb->attrs) { field->type = mono_metadata_type_dup (klass->image, mono_reflection_type_get_handle ((MonoReflectionType*)fb->type)); field->type->attrs = fb->attrs; } else { field->type = mono_reflection_type_get_handle ((MonoReflectionType*)fb->type); } if ((fb->attrs & FIELD_ATTRIBUTE_HAS_FIELD_RVA) && fb->rva_data) klass->ext->field_def_values [i].data = mono_array_addr (fb->rva_data, char, 0); if (fb->offset != -1) field->offset = fb->offset; field->parent = klass; fb->handle = field; mono_save_custom_attrs (klass->image, field, fb->cattrs); if (klass->enumtype && !(field->type->attrs & FIELD_ATTRIBUTE_STATIC)) { klass->cast_class = klass->element_class = mono_class_from_mono_type (field->type); } if (fb->def_value) { MonoDynamicImage *assembly = (MonoDynamicImage*)klass->image; field->type->attrs |= FIELD_ATTRIBUTE_HAS_DEFAULT; idx = encode_constant (assembly, fb->def_value, &klass->ext->field_def_values [i].def_type); /* Copy the data from the blob since it might get realloc-ed */ p = assembly->blob.data + idx; len = mono_metadata_decode_blob_size (p, &p2); len += p2 - p; klass->ext->field_def_values [i].data = mono_image_alloc (image, len); memcpy ((gpointer)klass->ext->field_def_values [i].data, p, len); } } klass->instance_size = MAX (klass->instance_size, real_size); mono_class_layout_fields (klass); } static void typebuilder_setup_properties (MonoClass *klass, MonoError *error) { MonoReflectionTypeBuilder *tb = mono_class_get_ref_info (klass); MonoReflectionPropertyBuilder *pb; MonoImage *image = klass->image; MonoProperty *properties; int i; mono_error_init (error); if (!klass->ext) klass->ext = image_g_new0 (image, MonoClassExt, 1); klass->ext->property.count = tb->properties ? mono_array_length (tb->properties) : 0; klass->ext->property.first = 0; properties = image_g_new0 (image, MonoProperty, klass->ext->property.count); klass->ext->properties = properties; for (i = 0; i < klass->ext->property.count; ++i) { pb = mono_array_get (tb->properties, MonoReflectionPropertyBuilder*, i); properties [i].parent = klass; properties [i].attrs = pb->attrs; properties [i].name = mono_string_to_utf8_image (image, pb->name, error); if (!mono_error_ok (error)) return; if (pb->get_method) properties [i].get = pb->get_method->mhandle; if (pb->set_method) properties [i].set = pb->set_method->mhandle; mono_save_custom_attrs (klass->image, &properties [i], pb->cattrs); if (pb->def_value) { guint32 len, idx; const char *p, *p2; MonoDynamicImage *assembly = (MonoDynamicImage*)klass->image; if (!klass->ext->prop_def_values) klass->ext->prop_def_values = image_g_new0 (image, MonoFieldDefaultValue, klass->ext->property.count); properties [i].attrs |= PROPERTY_ATTRIBUTE_HAS_DEFAULT; idx = encode_constant (assembly, pb->def_value, &klass->ext->prop_def_values [i].def_type); /* Copy the data from the blob since it might get realloc-ed */ p = assembly->blob.data + idx; len = mono_metadata_decode_blob_size (p, &p2); len += p2 - p; klass->ext->prop_def_values [i].data = mono_image_alloc (image, len); memcpy ((gpointer)klass->ext->prop_def_values [i].data, p, len); } } } MonoReflectionEvent * mono_reflection_event_builder_get_event_info (MonoReflectionTypeBuilder *tb, MonoReflectionEventBuilder *eb) { MonoEvent *event = g_new0 (MonoEvent, 1); MonoClass *klass; klass = mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)tb)); event->parent = klass; event->attrs = eb->attrs; event->name = mono_string_to_utf8 (eb->name); if (eb->add_method) event->add = eb->add_method->mhandle; if (eb->remove_method) event->remove = eb->remove_method->mhandle; if (eb->raise_method) event->raise = eb->raise_method->mhandle; #ifndef MONO_SMALL_CONFIG if (eb->other_methods) { int j; event->other = g_new0 (MonoMethod*, mono_array_length (eb->other_methods) + 1); for (j = 0; j < mono_array_length (eb->other_methods); ++j) { MonoReflectionMethodBuilder *mb = mono_array_get (eb->other_methods, MonoReflectionMethodBuilder*, j); event->other [j] = mb->mhandle; } } #endif return mono_event_get_object (mono_object_domain (tb), klass, event); } static void typebuilder_setup_events (MonoClass *klass, MonoError *error) { MonoReflectionTypeBuilder *tb = mono_class_get_ref_info (klass); MonoReflectionEventBuilder *eb; MonoImage *image = klass->image; MonoEvent *events; int i; mono_error_init (error); if (!klass->ext) klass->ext = image_g_new0 (image, MonoClassExt, 1); klass->ext->event.count = tb->events ? mono_array_length (tb->events) : 0; klass->ext->event.first = 0; events = image_g_new0 (image, MonoEvent, klass->ext->event.count); klass->ext->events = events; for (i = 0; i < klass->ext->event.count; ++i) { eb = mono_array_get (tb->events, MonoReflectionEventBuilder*, i); events [i].parent = klass; events [i].attrs = eb->attrs; events [i].name = mono_string_to_utf8_image (image, eb->name, error); if (!mono_error_ok (error)) return; if (eb->add_method) events [i].add = eb->add_method->mhandle; if (eb->remove_method) events [i].remove = eb->remove_method->mhandle; if (eb->raise_method) events [i].raise = eb->raise_method->mhandle; #ifndef MONO_SMALL_CONFIG if (eb->other_methods) { int j; events [i].other = image_g_new0 (image, MonoMethod*, mono_array_length (eb->other_methods) + 1); for (j = 0; j < mono_array_length (eb->other_methods); ++j) { MonoReflectionMethodBuilder *mb = mono_array_get (eb->other_methods, MonoReflectionMethodBuilder*, j); events [i].other [j] = mb->mhandle; } } #endif mono_save_custom_attrs (klass->image, &events [i], eb->cattrs); } } static gboolean remove_instantiations_of_and_ensure_contents (gpointer key, gpointer value, gpointer user_data) { MonoType *type = (MonoType*)key; MonoClass *klass = (MonoClass*)user_data; if ((type->type == MONO_TYPE_GENERICINST) && (type->data.generic_class->container_class == klass)) { fix_partial_generic_class (mono_class_from_mono_type (type)); //Ensure it's safe to use it. return TRUE; } else return FALSE; } static void check_array_for_usertypes (MonoArray *arr) { int i; if (!arr) return; for (i = 0; i < mono_array_length (arr); ++i) RESOLVE_ARRAY_TYPE_ELEMENT (arr, i); } MonoReflectionType* mono_reflection_create_runtime_class (MonoReflectionTypeBuilder *tb) { MonoError error; MonoClass *klass; MonoDomain* domain; MonoReflectionType* res; int i, j; MONO_ARCH_SAVE_REGS; domain = mono_object_domain (tb); klass = mono_class_from_mono_type (tb->type.type); /* * Check for user defined Type subclasses. */ RESOLVE_TYPE (tb->parent); check_array_for_usertypes (tb->interfaces); if (tb->fields) { for (i = 0; i < mono_array_length (tb->fields); ++i) { MonoReflectionFieldBuilder *fb = mono_array_get (tb->fields, gpointer, i); if (fb) { RESOLVE_TYPE (fb->type); check_array_for_usertypes (fb->modreq); check_array_for_usertypes (fb->modopt); if (fb->marshal_info && fb->marshal_info->marshaltyperef) RESOLVE_TYPE (fb->marshal_info->marshaltyperef); } } } if (tb->methods) { for (i = 0; i < mono_array_length (tb->methods); ++i) { MonoReflectionMethodBuilder *mb = mono_array_get (tb->methods, gpointer, i); if (mb) { RESOLVE_TYPE (mb->rtype); check_array_for_usertypes (mb->return_modreq); check_array_for_usertypes (mb->return_modopt); check_array_for_usertypes (mb->parameters); if (mb->param_modreq) for (j = 0; j < mono_array_length (mb->param_modreq); ++j) check_array_for_usertypes (mono_array_get (mb->param_modreq, MonoArray*, j)); if (mb->param_modopt) for (j = 0; j < mono_array_length (mb->param_modopt); ++j) check_array_for_usertypes (mono_array_get (mb->param_modopt, MonoArray*, j)); } } } if (tb->ctors) { for (i = 0; i < mono_array_length (tb->ctors); ++i) { MonoReflectionCtorBuilder *mb = mono_array_get (tb->ctors, gpointer, i); if (mb) { check_array_for_usertypes (mb->parameters); if (mb->param_modreq) for (j = 0; j < mono_array_length (mb->param_modreq); ++j) check_array_for_usertypes (mono_array_get (mb->param_modreq, MonoArray*, j)); if (mb->param_modopt) for (j = 0; j < mono_array_length (mb->param_modopt); ++j) check_array_for_usertypes (mono_array_get (mb->param_modopt, MonoArray*, j)); } } } mono_save_custom_attrs (klass->image, klass, tb->cattrs); /* * we need to lock the domain because the lock will be taken inside * So, we need to keep the locking order correct. */ mono_loader_lock (); mono_domain_lock (domain); if (klass->wastypebuilder) { mono_domain_unlock (domain); mono_loader_unlock (); return mono_type_get_object (mono_object_domain (tb), &klass->byval_arg); } /* * Fields to set in klass: * the various flags: delegate/unicode/contextbound etc. */ klass->flags = tb->attrs; klass->has_cctor = 1; klass->has_finalize = 1; /* fool mono_class_setup_parent */ klass->supertypes = NULL; mono_class_setup_parent (klass, klass->parent); mono_class_setup_mono_type (klass); #if 0 if (!((MonoDynamicImage*)klass->image)->run) { if (klass->generic_container) { /* FIXME: The code below can't handle generic classes */ klass->wastypebuilder = TRUE; mono_loader_unlock (); mono_domain_unlock (domain); return mono_type_get_object (mono_object_domain (tb), &klass->byval_arg); } } #endif /* enums are done right away */ if (!klass->enumtype) ensure_runtime_vtable (klass); if (tb->subtypes) { for (i = 0; i < mono_array_length (tb->subtypes); ++i) { MonoReflectionTypeBuilder *subtb = mono_array_get (tb->subtypes, MonoReflectionTypeBuilder*, i); mono_class_alloc_ext (klass); klass->ext->nested_classes = g_list_prepend_image (klass->image, klass->ext->nested_classes, mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)subtb))); } } klass->nested_classes_inited = TRUE; /* fields and object layout */ if (klass->parent) { if (!klass->parent->size_inited) mono_class_init (klass->parent); klass->instance_size = klass->parent->instance_size; klass->sizes.class_size = 0; klass->min_align = klass->parent->min_align; /* if the type has no fields we won't call the field_setup * routine which sets up klass->has_references. */ klass->has_references |= klass->parent->has_references; } else { klass->instance_size = sizeof (MonoObject); klass->min_align = 1; } /* FIXME: handle packing_size and instance_size */ typebuilder_setup_fields (klass, &error); if (!mono_error_ok (&error)) goto failure; typebuilder_setup_properties (klass, &error); if (!mono_error_ok (&error)) goto failure; typebuilder_setup_events (klass, &error); if (!mono_error_ok (&error)) goto failure; klass->wastypebuilder = TRUE; /* * If we are a generic TypeBuilder, there might be instantiations in the type cache * which have type System.Reflection.MonoGenericClass, but after the type is created, * we want to return normal System.MonoType objects, so clear these out from the cache. * * Together with this we must ensure the contents of all instances to match the created type. */ if (domain->type_hash && klass->generic_container) mono_g_hash_table_foreach_remove (domain->type_hash, remove_instantiations_of_and_ensure_contents, klass); mono_domain_unlock (domain); mono_loader_unlock (); if (klass->enumtype && !mono_class_is_valid_enum (klass)) { mono_class_set_failure (klass, MONO_EXCEPTION_TYPE_LOAD, NULL); mono_raise_exception (mono_get_exception_type_load (tb->name, NULL)); } res = mono_type_get_object (mono_object_domain (tb), &klass->byval_arg); g_assert (res != (MonoReflectionType*)tb); return res; failure: mono_class_set_failure (klass, MONO_EXCEPTION_TYPE_LOAD, NULL); klass->wastypebuilder = TRUE; mono_domain_unlock (domain); mono_loader_unlock (); mono_error_raise_exception (&error); return NULL; } void mono_reflection_initialize_generic_parameter (MonoReflectionGenericParam *gparam) { MonoGenericParamFull *param; MonoImage *image; MonoClass *pklass; MONO_ARCH_SAVE_REGS; param = g_new0 (MonoGenericParamFull, 1); if (gparam->mbuilder) { if (!gparam->mbuilder->generic_container) { MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder *)gparam->mbuilder->type; MonoClass *klass = mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)tb)); gparam->mbuilder->generic_container = mono_image_alloc0 (klass->image, sizeof (MonoGenericContainer)); gparam->mbuilder->generic_container->is_method = TRUE; /* * Cannot set owner.method, since the MonoMethod is not created yet. * Set the image field instead, so type_in_image () works. */ gparam->mbuilder->generic_container->image = klass->image; } param->param.owner = gparam->mbuilder->generic_container; } else if (gparam->tbuilder) { if (!gparam->tbuilder->generic_container) { MonoClass *klass = mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)gparam->tbuilder)); gparam->tbuilder->generic_container = mono_image_alloc0 (klass->image, sizeof (MonoGenericContainer)); gparam->tbuilder->generic_container->owner.klass = klass; } param->param.owner = gparam->tbuilder->generic_container; } param->info.name = mono_string_to_utf8 (gparam->name); param->param.num = gparam->index; image = &gparam->tbuilder->module->dynamic_image->image; pklass = mono_class_from_generic_parameter ((MonoGenericParam *) param, image, gparam->mbuilder != NULL); gparam->type.type = &pklass->byval_arg; mono_class_set_ref_info (pklass, gparam); mono_image_lock (image); image->reflection_info_unregister_classes = g_slist_prepend (image->reflection_info_unregister_classes, pklass); mono_image_unlock (image); } MonoArray * mono_reflection_sighelper_get_signature_local (MonoReflectionSigHelper *sig) { MonoReflectionModuleBuilder *module = sig->module; MonoDynamicImage *assembly = module != NULL ? module->dynamic_image : NULL; guint32 na = sig->arguments ? mono_array_length (sig->arguments) : 0; guint32 buflen, i; MonoArray *result; SigBuffer buf; check_array_for_usertypes (sig->arguments); sigbuffer_init (&buf, 32); sigbuffer_add_value (&buf, 0x07); sigbuffer_add_value (&buf, na); if (assembly != NULL){ for (i = 0; i < na; ++i) { MonoReflectionType *type = mono_array_get (sig->arguments, MonoReflectionType*, i); encode_reflection_type (assembly, type, &buf); } } buflen = buf.p - buf.buf; result = mono_array_new (mono_domain_get (), mono_defaults.byte_class, buflen); memcpy (mono_array_addr (result, char, 0), buf.buf, buflen); sigbuffer_free (&buf); return result; } MonoArray * mono_reflection_sighelper_get_signature_field (MonoReflectionSigHelper *sig) { MonoDynamicImage *assembly = sig->module->dynamic_image; guint32 na = sig->arguments ? mono_array_length (sig->arguments) : 0; guint32 buflen, i; MonoArray *result; SigBuffer buf; check_array_for_usertypes (sig->arguments); sigbuffer_init (&buf, 32); sigbuffer_add_value (&buf, 0x06); for (i = 0; i < na; ++i) { MonoReflectionType *type = mono_array_get (sig->arguments, MonoReflectionType*, i); encode_reflection_type (assembly, type, &buf); } buflen = buf.p - buf.buf; result = mono_array_new (mono_domain_get (), mono_defaults.byte_class, buflen); memcpy (mono_array_addr (result, char, 0), buf.buf, buflen); sigbuffer_free (&buf); return result; } void mono_reflection_create_dynamic_method (MonoReflectionDynamicMethod *mb) { ReflectionMethodBuilder rmb; MonoMethodSignature *sig; MonoClass *klass; GSList *l; int i; sig = dynamic_method_to_signature (mb); reflection_methodbuilder_from_dynamic_method (&rmb, mb); /* * Resolve references. */ /* * Every second entry in the refs array is reserved for storing handle_class, * which is needed by the ldtoken implementation in the JIT. */ rmb.nrefs = mb->nrefs; rmb.refs = g_new0 (gpointer, mb->nrefs + 1); for (i = 0; i < mb->nrefs; i += 2) { MonoClass *handle_class; gpointer ref; MonoObject *obj = mono_array_get (mb->refs, MonoObject*, i); if (strcmp (obj->vtable->klass->name, "DynamicMethod") == 0) { MonoReflectionDynamicMethod *method = (MonoReflectionDynamicMethod*)obj; /* * The referenced DynamicMethod should already be created by the managed * code, except in the case of circular references. In that case, we store * method in the refs array, and fix it up later when the referenced * DynamicMethod is created. */ if (method->mhandle) { ref = method->mhandle; } else { /* FIXME: GC object stored in unmanaged memory */ ref = method; /* FIXME: GC object stored in unmanaged memory */ method->referenced_by = g_slist_append (method->referenced_by, mb); } handle_class = mono_defaults.methodhandle_class; } else { MonoException *ex = NULL; ref = resolve_object (mb->module->image, obj, &handle_class, NULL); if (!ref) ex = mono_get_exception_type_load (NULL, NULL); else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR) ex = mono_security_core_clr_ensure_dynamic_method_resolved_object (ref, handle_class); if (ex) { g_free (rmb.refs); mono_raise_exception (ex); return; } } rmb.refs [i] = ref; /* FIXME: GC object stored in unmanaged memory (change also resolve_object() signature) */ rmb.refs [i + 1] = handle_class; } klass = mb->owner ? mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)mb->owner)) : mono_defaults.object_class; mb->mhandle = reflection_methodbuilder_to_mono_method (klass, &rmb, sig); /* Fix up refs entries pointing at us */ for (l = mb->referenced_by; l; l = l->next) { MonoReflectionDynamicMethod *method = (MonoReflectionDynamicMethod*)l->data; MonoMethodWrapper *wrapper = (MonoMethodWrapper*)method->mhandle; gpointer *data; g_assert (method->mhandle); data = (gpointer*)wrapper->method_data; for (i = 0; i < GPOINTER_TO_UINT (data [0]); i += 2) { if ((data [i + 1] == mb) && (data [i + 1 + 1] == mono_defaults.methodhandle_class)) data [i + 1] = mb->mhandle; } } g_slist_free (mb->referenced_by); g_free (rmb.refs); /* ilgen is no longer needed */ mb->ilgen = NULL; } #endif /* DISABLE_REFLECTION_EMIT */ void mono_reflection_destroy_dynamic_method (MonoReflectionDynamicMethod *mb) { g_assert (mb); if (mb->mhandle) mono_runtime_free_method ( mono_object_get_domain ((MonoObject*)mb), mb->mhandle); } /** * * mono_reflection_is_valid_dynamic_token: * * Returns TRUE if token is valid. * */ gboolean mono_reflection_is_valid_dynamic_token (MonoDynamicImage *image, guint32 token) { return mono_g_hash_table_lookup (image->tokens, GUINT_TO_POINTER (token)) != NULL; } MonoMethodSignature * mono_reflection_lookup_signature (MonoImage *image, MonoMethod *method, guint32 token) { MonoMethodSignature *sig; g_assert (image->dynamic); sig = g_hash_table_lookup (((MonoDynamicImage*)image)->vararg_aux_hash, GUINT_TO_POINTER (token)); if (sig) return sig; return mono_method_signature (method); } #ifndef DISABLE_REFLECTION_EMIT /** * mono_reflection_lookup_dynamic_token: * * Finish the Builder object pointed to by TOKEN and return the corresponding * runtime structure. If HANDLE_CLASS is not NULL, it is set to the class required by * mono_ldtoken. If valid_token is TRUE, assert if it is not found in the token->object * mapping table. * * LOCKING: Take the loader lock */ gpointer mono_reflection_lookup_dynamic_token (MonoImage *image, guint32 token, gboolean valid_token, MonoClass **handle_class, MonoGenericContext *context) { MonoDynamicImage *assembly = (MonoDynamicImage*)image; MonoObject *obj; MonoClass *klass; mono_loader_lock (); obj = mono_g_hash_table_lookup (assembly->tokens, GUINT_TO_POINTER (token)); mono_loader_unlock (); if (!obj) { if (valid_token) g_error ("Could not find required dynamic token 0x%08x", token); else return NULL; } if (!handle_class) handle_class = &klass; return resolve_object (image, obj, handle_class, context); } /* * ensure_complete_type: * * Ensure that KLASS is completed if it is a dynamic type, or references * dynamic types. */ static void ensure_complete_type (MonoClass *klass) { if (klass->image->dynamic && !klass->wastypebuilder && mono_class_get_ref_info (klass)) { MonoReflectionTypeBuilder *tb = mono_class_get_ref_info (klass); mono_domain_try_type_resolve (mono_domain_get (), NULL, (MonoObject*)tb); // Asserting here could break a lot of code //g_assert (klass->wastypebuilder); } if (klass->generic_class) { MonoGenericInst *inst = klass->generic_class->context.class_inst; int i; for (i = 0; i < inst->type_argc; ++i) { ensure_complete_type (mono_class_from_mono_type (inst->type_argv [i])); } } } static gpointer resolve_object (MonoImage *image, MonoObject *obj, MonoClass **handle_class, MonoGenericContext *context) { gpointer result = NULL; if (strcmp (obj->vtable->klass->name, "String") == 0) { result = mono_string_intern ((MonoString*)obj); *handle_class = mono_defaults.string_class; g_assert (result); } else if (strcmp (obj->vtable->klass->name, "MonoType") == 0) { MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType*)obj); MonoClass *mc = mono_class_from_mono_type (type); if (!mono_class_init (mc)) mono_raise_exception (mono_class_get_exception_for_failure (mc)); if (context) { MonoType *inflated = mono_class_inflate_generic_type (type, context); result = mono_class_from_mono_type (inflated); mono_metadata_free_type (inflated); } else { result = mono_class_from_mono_type (type); } *handle_class = mono_defaults.typehandle_class; g_assert (result); } else if (strcmp (obj->vtable->klass->name, "MonoMethod") == 0 || strcmp (obj->vtable->klass->name, "MonoCMethod") == 0 || strcmp (obj->vtable->klass->name, "MonoGenericCMethod") == 0 || strcmp (obj->vtable->klass->name, "MonoGenericMethod") == 0) { result = ((MonoReflectionMethod*)obj)->method; if (context) result = mono_class_inflate_generic_method (result, context); *handle_class = mono_defaults.methodhandle_class; g_assert (result); } else if (strcmp (obj->vtable->klass->name, "MethodBuilder") == 0) { MonoReflectionMethodBuilder *mb = (MonoReflectionMethodBuilder*)obj; result = mb->mhandle; if (!result) { /* Type is not yet created */ MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder*)mb->type; mono_domain_try_type_resolve (mono_domain_get (), NULL, (MonoObject*)tb); /* * Hopefully this has been filled in by calling CreateType() on the * TypeBuilder. */ /* * TODO: This won't work if the application finishes another * TypeBuilder instance instead of this one. */ result = mb->mhandle; } if (context) result = mono_class_inflate_generic_method (result, context); *handle_class = mono_defaults.methodhandle_class; } else if (strcmp (obj->vtable->klass->name, "ConstructorBuilder") == 0) { MonoReflectionCtorBuilder *cb = (MonoReflectionCtorBuilder*)obj; result = cb->mhandle; if (!result) { MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder*)cb->type; mono_domain_try_type_resolve (mono_domain_get (), NULL, (MonoObject*)tb); result = cb->mhandle; } if (context) result = mono_class_inflate_generic_method (result, context); *handle_class = mono_defaults.methodhandle_class; } else if (strcmp (obj->vtable->klass->name, "MonoField") == 0) { MonoClassField *field = ((MonoReflectionField*)obj)->field; ensure_complete_type (field->parent); if (context) { MonoType *inflated = mono_class_inflate_generic_type (&field->parent->byval_arg, context); MonoClass *class = mono_class_from_mono_type (inflated); MonoClassField *inflated_field; gpointer iter = NULL; mono_metadata_free_type (inflated); while ((inflated_field = mono_class_get_fields (class, &iter))) { if (!strcmp (field->name, inflated_field->name)) break; } g_assert (inflated_field && !strcmp (field->name, inflated_field->name)); result = inflated_field; } else { result = field; } *handle_class = mono_defaults.fieldhandle_class; g_assert (result); } else if (strcmp (obj->vtable->klass->name, "FieldBuilder") == 0) { MonoReflectionFieldBuilder *fb = (MonoReflectionFieldBuilder*)obj; result = fb->handle; if (!result) { MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder*)fb->typeb; mono_domain_try_type_resolve (mono_domain_get (), NULL, (MonoObject*)tb); result = fb->handle; } if (fb->handle && fb->handle->parent->generic_container) { MonoClass *klass = fb->handle->parent; MonoType *type = mono_class_inflate_generic_type (&klass->byval_arg, context); MonoClass *inflated = mono_class_from_mono_type (type); result = mono_class_get_field_from_name (inflated, mono_field_get_name (fb->handle)); g_assert (result); mono_metadata_free_type (type); } *handle_class = mono_defaults.fieldhandle_class; } else if (strcmp (obj->vtable->klass->name, "TypeBuilder") == 0) { MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder*)obj; MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType*)tb); MonoClass *klass; klass = type->data.klass; if (klass->wastypebuilder) { /* Already created */ result = klass; } else { mono_domain_try_type_resolve (mono_domain_get (), NULL, (MonoObject*)tb); result = type->data.klass; g_assert (result); } *handle_class = mono_defaults.typehandle_class; } else if (strcmp (obj->vtable->klass->name, "SignatureHelper") == 0) { MonoReflectionSigHelper *helper = (MonoReflectionSigHelper*)obj; MonoMethodSignature *sig; int nargs, i; if (helper->arguments) nargs = mono_array_length (helper->arguments); else nargs = 0; sig = mono_metadata_signature_alloc (image, nargs); sig->explicit_this = helper->call_conv & 64 ? 1 : 0; sig->hasthis = helper->call_conv & 32 ? 1 : 0; if (helper->unmanaged_call_conv) { /* unmanaged */ sig->call_convention = helper->unmanaged_call_conv - 1; sig->pinvoke = TRUE; } else if (helper->call_conv & 0x02) { sig->call_convention = MONO_CALL_VARARG; } else { sig->call_convention = MONO_CALL_DEFAULT; } sig->param_count = nargs; /* TODO: Copy type ? */ sig->ret = helper->return_type->type; for (i = 0; i < nargs; ++i) sig->params [i] = mono_type_array_get_and_resolve (helper->arguments, i); result = sig; *handle_class = NULL; } else if (strcmp (obj->vtable->klass->name, "DynamicMethod") == 0) { MonoReflectionDynamicMethod *method = (MonoReflectionDynamicMethod*)obj; /* Already created by the managed code */ g_assert (method->mhandle); result = method->mhandle; *handle_class = mono_defaults.methodhandle_class; } else if (strcmp (obj->vtable->klass->name, "GenericTypeParameterBuilder") == 0) { MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType*)obj); type = mono_class_inflate_generic_type (type, context); result = mono_class_from_mono_type (type); *handle_class = mono_defaults.typehandle_class; g_assert (result); mono_metadata_free_type (type); } else if (strcmp (obj->vtable->klass->name, "MonoGenericClass") == 0) { MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType*)obj); type = mono_class_inflate_generic_type (type, context); result = mono_class_from_mono_type (type); *handle_class = mono_defaults.typehandle_class; g_assert (result); mono_metadata_free_type (type); } else if (strcmp (obj->vtable->klass->name, "FieldOnTypeBuilderInst") == 0) { MonoReflectionFieldOnTypeBuilderInst *f = (MonoReflectionFieldOnTypeBuilderInst*)obj; MonoClass *inflated; MonoType *type; MonoClassField *field; if (is_sre_field_builder (mono_object_class (f->fb))) field = ((MonoReflectionFieldBuilder*)f->fb)->handle; else if (is_sr_mono_field (mono_object_class (f->fb))) field = ((MonoReflectionField*)f->fb)->field; else g_error ("resolve_object:: can't handle a FTBI with base_method of type %s", mono_type_get_full_name (mono_object_class (f->fb))); type = mono_class_inflate_generic_type (mono_reflection_type_get_handle ((MonoReflectionType*)f->inst), context); inflated = mono_class_from_mono_type (type); result = field = mono_class_get_field_from_name (inflated, mono_field_get_name (field)); ensure_complete_type (field->parent); g_assert (result); mono_metadata_free_type (type); *handle_class = mono_defaults.fieldhandle_class; } else if (strcmp (obj->vtable->klass->name, "ConstructorOnTypeBuilderInst") == 0) { MonoReflectionCtorOnTypeBuilderInst *c = (MonoReflectionCtorOnTypeBuilderInst*)obj; MonoType *type = mono_class_inflate_generic_type (mono_reflection_type_get_handle ((MonoReflectionType*)c->inst), context); MonoClass *inflated_klass = mono_class_from_mono_type (type); MonoMethod *method; if (is_sre_ctor_builder (mono_object_class (c->cb))) method = ((MonoReflectionCtorBuilder *)c->cb)->mhandle; else if (is_sr_mono_cmethod (mono_object_class (c->cb))) method = ((MonoReflectionMethod *)c->cb)->method; else g_error ("resolve_object:: can't handle a CTBI with base_method of type %s", mono_type_get_full_name (mono_object_class (c->cb))); result = inflate_mono_method (inflated_klass, method, (MonoObject*)c->cb); *handle_class = mono_defaults.methodhandle_class; mono_metadata_free_type (type); } else if (strcmp (obj->vtable->klass->name, "MethodOnTypeBuilderInst") == 0) { MonoReflectionMethodOnTypeBuilderInst *m = (MonoReflectionMethodOnTypeBuilderInst*)obj; if (m->method_args) { result = mono_reflection_method_on_tb_inst_get_handle (m); if (context) result = mono_class_inflate_generic_method (result, context); } else { MonoType *type = mono_class_inflate_generic_type (mono_reflection_type_get_handle ((MonoReflectionType*)m->inst), context); MonoClass *inflated_klass = mono_class_from_mono_type (type); MonoMethod *method; if (is_sre_method_builder (mono_object_class (m->mb))) method = ((MonoReflectionMethodBuilder *)m->mb)->mhandle; else if (is_sr_mono_method (mono_object_class (m->mb))) method = ((MonoReflectionMethod *)m->mb)->method; else g_error ("resolve_object:: can't handle a MTBI with base_method of type %s", mono_type_get_full_name (mono_object_class (m->mb))); result = inflate_mono_method (inflated_klass, method, (MonoObject*)m->mb); mono_metadata_free_type (type); } *handle_class = mono_defaults.methodhandle_class; } else if (strcmp (obj->vtable->klass->name, "MonoArrayMethod") == 0) { MonoReflectionArrayMethod *m = (MonoReflectionArrayMethod*)obj; MonoType *mtype; MonoClass *klass; MonoMethod *method; gpointer iter; char *name; mtype = mono_reflection_type_get_handle (m->parent); klass = mono_class_from_mono_type (mtype); /* Find the method */ name = mono_string_to_utf8 (m->name); iter = NULL; while ((method = mono_class_get_methods (klass, &iter))) { if (!strcmp (method->name, name)) break; } g_free (name); // FIXME: g_assert (method); // FIXME: Check parameters/return value etc. match result = method; *handle_class = mono_defaults.methodhandle_class; } else if (is_sre_array (mono_object_get_class(obj)) || is_sre_byref (mono_object_get_class(obj)) || is_sre_pointer (mono_object_get_class(obj))) { MonoReflectionType *ref_type = (MonoReflectionType *)obj; MonoType *type = mono_reflection_type_get_handle (ref_type); result = mono_class_from_mono_type (type); *handle_class = mono_defaults.typehandle_class; } else { g_print ("%s\n", obj->vtable->klass->name); g_assert_not_reached (); } return result; } #else /* DISABLE_REFLECTION_EMIT */ MonoArray* mono_reflection_get_custom_attrs_blob (MonoReflectionAssembly *assembly, MonoObject *ctor, MonoArray *ctorArgs, MonoArray *properties, MonoArray *propValues, MonoArray *fields, MonoArray* fieldValues) { g_assert_not_reached (); return NULL; } void mono_reflection_setup_internal_class (MonoReflectionTypeBuilder *tb) { g_assert_not_reached (); } void mono_reflection_setup_generic_class (MonoReflectionTypeBuilder *tb) { g_assert_not_reached (); } void mono_reflection_create_generic_class (MonoReflectionTypeBuilder *tb) { g_assert_not_reached (); } void mono_reflection_create_internal_class (MonoReflectionTypeBuilder *tb) { g_assert_not_reached (); } void mono_image_basic_init (MonoReflectionAssemblyBuilder *assemblyb) { g_error ("This mono runtime was configured with --enable-minimal=reflection_emit, so System.Reflection.Emit is not supported."); } void mono_image_module_basic_init (MonoReflectionModuleBuilder *moduleb) { g_assert_not_reached (); } void mono_image_set_wrappers_type (MonoReflectionModuleBuilder *moduleb, MonoReflectionType *type) { g_assert_not_reached (); } MonoReflectionModule * mono_image_load_module_dynamic (MonoReflectionAssemblyBuilder *ab, MonoString *fileName) { g_assert_not_reached (); return NULL; } guint32 mono_image_insert_string (MonoReflectionModuleBuilder *module, MonoString *str) { g_assert_not_reached (); return 0; } guint32 mono_image_create_method_token (MonoDynamicImage *assembly, MonoObject *obj, MonoArray *opt_param_types) { g_assert_not_reached (); return 0; } guint32 mono_image_create_token (MonoDynamicImage *assembly, MonoObject *obj, gboolean create_open_instance, gboolean register_token) { g_assert_not_reached (); return 0; } void mono_image_register_token (MonoDynamicImage *assembly, guint32 token, MonoObject *obj) { } void mono_reflection_generic_class_initialize (MonoReflectionGenericClass *type, MonoArray *methods, MonoArray *ctors, MonoArray *fields, MonoArray *properties, MonoArray *events) { g_assert_not_reached (); } void mono_reflection_get_dynamic_overrides (MonoClass *klass, MonoMethod ***overrides, int *num_overrides) { *overrides = NULL; *num_overrides = 0; } MonoReflectionEvent * mono_reflection_event_builder_get_event_info (MonoReflectionTypeBuilder *tb, MonoReflectionEventBuilder *eb) { g_assert_not_reached (); return NULL; } MonoReflectionType* mono_reflection_create_runtime_class (MonoReflectionTypeBuilder *tb) { g_assert_not_reached (); return NULL; } void mono_reflection_initialize_generic_parameter (MonoReflectionGenericParam *gparam) { g_assert_not_reached (); } MonoArray * mono_reflection_sighelper_get_signature_local (MonoReflectionSigHelper *sig) { g_assert_not_reached (); return NULL; } MonoArray * mono_reflection_sighelper_get_signature_field (MonoReflectionSigHelper *sig) { g_assert_not_reached (); return NULL; } void mono_reflection_create_dynamic_method (MonoReflectionDynamicMethod *mb) { } gpointer mono_reflection_lookup_dynamic_token (MonoImage *image, guint32 token, gboolean valid_token, MonoClass **handle_class, MonoGenericContext *context) { return NULL; } MonoType* mono_reflection_type_get_handle (MonoReflectionType* ref) { if (!ref) return NULL; return ref->type; } #endif /* DISABLE_REFLECTION_EMIT */ /* SECURITY_ACTION_* are defined in mono/metadata/tabledefs.h */ const static guint32 declsec_flags_map[] = { 0x00000000, /* empty */ MONO_DECLSEC_FLAG_REQUEST, /* SECURITY_ACTION_REQUEST (x01) */ MONO_DECLSEC_FLAG_DEMAND, /* SECURITY_ACTION_DEMAND (x02) */ MONO_DECLSEC_FLAG_ASSERT, /* SECURITY_ACTION_ASSERT (x03) */ MONO_DECLSEC_FLAG_DENY, /* SECURITY_ACTION_DENY (x04) */ MONO_DECLSEC_FLAG_PERMITONLY, /* SECURITY_ACTION_PERMITONLY (x05) */ MONO_DECLSEC_FLAG_LINKDEMAND, /* SECURITY_ACTION_LINKDEMAND (x06) */ MONO_DECLSEC_FLAG_INHERITANCEDEMAND, /* SECURITY_ACTION_INHERITANCEDEMAND (x07) */ MONO_DECLSEC_FLAG_REQUEST_MINIMUM, /* SECURITY_ACTION_REQUEST_MINIMUM (x08) */ MONO_DECLSEC_FLAG_REQUEST_OPTIONAL, /* SECURITY_ACTION_REQUEST_OPTIONAL (x09) */ MONO_DECLSEC_FLAG_REQUEST_REFUSE, /* SECURITY_ACTION_REQUEST_REFUSE (x0A) */ MONO_DECLSEC_FLAG_PREJIT_GRANT, /* SECURITY_ACTION_PREJIT_GRANT (x0B) */ MONO_DECLSEC_FLAG_PREJIT_DENY, /* SECURITY_ACTION_PREJIT_DENY (x0C) */ MONO_DECLSEC_FLAG_NONCAS_DEMAND, /* SECURITY_ACTION_NONCAS_DEMAND (x0D) */ MONO_DECLSEC_FLAG_NONCAS_LINKDEMAND, /* SECURITY_ACTION_NONCAS_LINKDEMAND (x0E) */ MONO_DECLSEC_FLAG_NONCAS_INHERITANCEDEMAND, /* SECURITY_ACTION_NONCAS_INHERITANCEDEMAND (x0F) */ MONO_DECLSEC_FLAG_LINKDEMAND_CHOICE, /* SECURITY_ACTION_LINKDEMAND_CHOICE (x10) */ MONO_DECLSEC_FLAG_INHERITANCEDEMAND_CHOICE, /* SECURITY_ACTION_INHERITANCEDEMAND_CHOICE (x11) */ MONO_DECLSEC_FLAG_DEMAND_CHOICE, /* SECURITY_ACTION_DEMAND_CHOICE (x12) */ }; /* * Returns flags that includes all available security action associated to the handle. * @token: metadata token (either for a class or a method) * @image: image where resides the metadata. */ static guint32 mono_declsec_get_flags (MonoImage *image, guint32 token) { int index = mono_metadata_declsec_from_index (image, token); MonoTableInfo *t = &image->tables [MONO_TABLE_DECLSECURITY]; guint32 result = 0; guint32 action; int i; /* HasSecurity can be present for other, not specially encoded, attributes, e.g. SuppressUnmanagedCodeSecurityAttribute */ if (index < 0) return 0; for (i = index; i < t->rows; i++) { guint32 cols [MONO_DECL_SECURITY_SIZE]; mono_metadata_decode_row (t, i, cols, MONO_DECL_SECURITY_SIZE); if (cols [MONO_DECL_SECURITY_PARENT] != token) break; action = cols [MONO_DECL_SECURITY_ACTION]; if ((action >= MONO_DECLSEC_ACTION_MIN) && (action <= MONO_DECLSEC_ACTION_MAX)) { result |= declsec_flags_map [action]; } else { g_assert_not_reached (); } } return result; } /* * Get the security actions (in the form of flags) associated with the specified method. * * @method: The method for which we want the declarative security flags. * Return the declarative security flags for the method (only). * * Note: To keep MonoMethod size down we do not cache the declarative security flags * (except for the stack modifiers which are kept in the MonoJitInfo structure) */ guint32 mono_declsec_flags_from_method (MonoMethod *method) { if (method->flags & METHOD_ATTRIBUTE_HAS_SECURITY) { /* FIXME: No cache (for the moment) */ guint32 idx = mono_method_get_index (method); idx <<= MONO_HAS_DECL_SECURITY_BITS; idx |= MONO_HAS_DECL_SECURITY_METHODDEF; return mono_declsec_get_flags (method->klass->image, idx); } return 0; } /* * Get the security actions (in the form of flags) associated with the specified class. * * @klass: The class for which we want the declarative security flags. * Return the declarative security flags for the class. * * Note: We cache the flags inside the MonoClass structure as this will get * called very often (at least for each method). */ guint32 mono_declsec_flags_from_class (MonoClass *klass) { if (klass->flags & TYPE_ATTRIBUTE_HAS_SECURITY) { if (!klass->ext || !klass->ext->declsec_flags) { guint32 idx; idx = mono_metadata_token_index (klass->type_token); idx <<= MONO_HAS_DECL_SECURITY_BITS; idx |= MONO_HAS_DECL_SECURITY_TYPEDEF; mono_loader_lock (); mono_class_alloc_ext (klass); mono_loader_unlock (); /* we cache the flags on classes */ klass->ext->declsec_flags = mono_declsec_get_flags (klass->image, idx); } return klass->ext->declsec_flags; } return 0; } /* * Get the security actions (in the form of flags) associated with the specified assembly. * * @assembly: The assembly for which we want the declarative security flags. * Return the declarative security flags for the assembly. */ guint32 mono_declsec_flags_from_assembly (MonoAssembly *assembly) { guint32 idx = 1; /* there is only one assembly */ idx <<= MONO_HAS_DECL_SECURITY_BITS; idx |= MONO_HAS_DECL_SECURITY_ASSEMBLY; return mono_declsec_get_flags (assembly->image, idx); } /* * Fill actions for the specific index (which may either be an encoded class token or * an encoded method token) from the metadata image. * Returns TRUE if some actions requiring code generation are present, FALSE otherwise. */ static MonoBoolean fill_actions_from_index (MonoImage *image, guint32 token, MonoDeclSecurityActions* actions, guint32 id_std, guint32 id_noncas, guint32 id_choice) { MonoBoolean result = FALSE; MonoTableInfo *t; guint32 cols [MONO_DECL_SECURITY_SIZE]; int index = mono_metadata_declsec_from_index (image, token); int i; t = &image->tables [MONO_TABLE_DECLSECURITY]; for (i = index; i < t->rows; i++) { mono_metadata_decode_row (t, i, cols, MONO_DECL_SECURITY_SIZE); if (cols [MONO_DECL_SECURITY_PARENT] != token) return result; /* if present only replace (class) permissions with method permissions */ /* if empty accept either class or method permissions */ if (cols [MONO_DECL_SECURITY_ACTION] == id_std) { if (!actions->demand.blob) { const char *blob = mono_metadata_blob_heap (image, cols [MONO_DECL_SECURITY_PERMISSIONSET]); actions->demand.index = cols [MONO_DECL_SECURITY_PERMISSIONSET]; actions->demand.blob = (char*) (blob + 2); actions->demand.size = mono_metadata_decode_blob_size (blob, &blob); result = TRUE; } } else if (cols [MONO_DECL_SECURITY_ACTION] == id_noncas) { if (!actions->noncasdemand.blob) { const char *blob = mono_metadata_blob_heap (image, cols [MONO_DECL_SECURITY_PERMISSIONSET]); actions->noncasdemand.index = cols [MONO_DECL_SECURITY_PERMISSIONSET]; actions->noncasdemand.blob = (char*) (blob + 2); actions->noncasdemand.size = mono_metadata_decode_blob_size (blob, &blob); result = TRUE; } } else if (cols [MONO_DECL_SECURITY_ACTION] == id_choice) { if (!actions->demandchoice.blob) { const char *blob = mono_metadata_blob_heap (image, cols [MONO_DECL_SECURITY_PERMISSIONSET]); actions->demandchoice.index = cols [MONO_DECL_SECURITY_PERMISSIONSET]; actions->demandchoice.blob = (char*) (blob + 2); actions->demandchoice.size = mono_metadata_decode_blob_size (blob, &blob); result = TRUE; } } } return result; } static MonoBoolean mono_declsec_get_class_demands_params (MonoClass *klass, MonoDeclSecurityActions* demands, guint32 id_std, guint32 id_noncas, guint32 id_choice) { guint32 idx = mono_metadata_token_index (klass->type_token); idx <<= MONO_HAS_DECL_SECURITY_BITS; idx |= MONO_HAS_DECL_SECURITY_TYPEDEF; return fill_actions_from_index (klass->image, idx, demands, id_std, id_noncas, id_choice); } static MonoBoolean mono_declsec_get_method_demands_params (MonoMethod *method, MonoDeclSecurityActions* demands, guint32 id_std, guint32 id_noncas, guint32 id_choice) { guint32 idx = mono_method_get_index (method); idx <<= MONO_HAS_DECL_SECURITY_BITS; idx |= MONO_HAS_DECL_SECURITY_METHODDEF; return fill_actions_from_index (method->klass->image, idx, demands, id_std, id_noncas, id_choice); } /* * Collect all actions (that requires to generate code in mini) assigned for * the specified method. * Note: Don't use the content of actions if the function return FALSE. */ MonoBoolean mono_declsec_get_demands (MonoMethod *method, MonoDeclSecurityActions* demands) { guint32 mask = MONO_DECLSEC_FLAG_DEMAND | MONO_DECLSEC_FLAG_NONCAS_DEMAND | MONO_DECLSEC_FLAG_DEMAND_CHOICE; MonoBoolean result = FALSE; guint32 flags; /* quick exit if no declarative security is present in the metadata */ if (!method->klass->image->tables [MONO_TABLE_DECLSECURITY].rows) return FALSE; /* we want the original as the wrapper is "free" of the security informations */ if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE || method->wrapper_type == MONO_WRAPPER_MANAGED_TO_MANAGED) { method = mono_marshal_method_from_wrapper (method); if (!method) return FALSE; } /* First we look for method-level attributes */ if (method->flags & METHOD_ATTRIBUTE_HAS_SECURITY) { mono_class_init (method->klass); memset (demands, 0, sizeof (MonoDeclSecurityActions)); result = mono_declsec_get_method_demands_params (method, demands, SECURITY_ACTION_DEMAND, SECURITY_ACTION_NONCASDEMAND, SECURITY_ACTION_DEMANDCHOICE); } /* Here we use (or create) the class declarative cache to look for demands */ flags = mono_declsec_flags_from_class (method->klass); if (flags & mask) { if (!result) { mono_class_init (method->klass); memset (demands, 0, sizeof (MonoDeclSecurityActions)); } result |= mono_declsec_get_class_demands_params (method->klass, demands, SECURITY_ACTION_DEMAND, SECURITY_ACTION_NONCASDEMAND, SECURITY_ACTION_DEMANDCHOICE); } /* The boolean return value is used as a shortcut in case nothing needs to be generated (e.g. LinkDemand[Choice] and InheritanceDemand[Choice]) */ return result; } /* * Collect all Link actions: LinkDemand, NonCasLinkDemand and LinkDemandChoice (2.0). * * Note: Don't use the content of actions if the function return FALSE. */ MonoBoolean mono_declsec_get_linkdemands (MonoMethod *method, MonoDeclSecurityActions* klass, MonoDeclSecurityActions *cmethod) { MonoBoolean result = FALSE; guint32 flags; /* quick exit if no declarative security is present in the metadata */ if (!method->klass->image->tables [MONO_TABLE_DECLSECURITY].rows) return FALSE; /* we want the original as the wrapper is "free" of the security informations */ if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE || method->wrapper_type == MONO_WRAPPER_MANAGED_TO_MANAGED) { method = mono_marshal_method_from_wrapper (method); if (!method) return FALSE; } /* results are independant - zeroize both */ memset (cmethod, 0, sizeof (MonoDeclSecurityActions)); memset (klass, 0, sizeof (MonoDeclSecurityActions)); /* First we look for method-level attributes */ if (method->flags & METHOD_ATTRIBUTE_HAS_SECURITY) { mono_class_init (method->klass); result = mono_declsec_get_method_demands_params (method, cmethod, SECURITY_ACTION_LINKDEMAND, SECURITY_ACTION_NONCASLINKDEMAND, SECURITY_ACTION_LINKDEMANDCHOICE); } /* Here we use (or create) the class declarative cache to look for demands */ flags = mono_declsec_flags_from_class (method->klass); if (flags & (MONO_DECLSEC_FLAG_LINKDEMAND | MONO_DECLSEC_FLAG_NONCAS_LINKDEMAND | MONO_DECLSEC_FLAG_LINKDEMAND_CHOICE)) { mono_class_init (method->klass); result |= mono_declsec_get_class_demands_params (method->klass, klass, SECURITY_ACTION_LINKDEMAND, SECURITY_ACTION_NONCASLINKDEMAND, SECURITY_ACTION_LINKDEMANDCHOICE); } return result; } /* * Collect all Inherit actions: InheritanceDemand, NonCasInheritanceDemand and InheritanceDemandChoice (2.0). * * @klass The inherited class - this is the class that provides the security check (attributes) * @demans * return TRUE if inheritance demands (any kind) are present, FALSE otherwise. * * Note: Don't use the content of actions if the function return FALSE. */ MonoBoolean mono_declsec_get_inheritdemands_class (MonoClass *klass, MonoDeclSecurityActions* demands) { MonoBoolean result = FALSE; guint32 flags; /* quick exit if no declarative security is present in the metadata */ if (!klass->image->tables [MONO_TABLE_DECLSECURITY].rows) return FALSE; /* Here we use (or create) the class declarative cache to look for demands */ flags = mono_declsec_flags_from_class (klass); if (flags & (MONO_DECLSEC_FLAG_INHERITANCEDEMAND | MONO_DECLSEC_FLAG_NONCAS_INHERITANCEDEMAND | MONO_DECLSEC_FLAG_INHERITANCEDEMAND_CHOICE)) { mono_class_init (klass); memset (demands, 0, sizeof (MonoDeclSecurityActions)); result |= mono_declsec_get_class_demands_params (klass, demands, SECURITY_ACTION_INHERITDEMAND, SECURITY_ACTION_NONCASINHERITANCE, SECURITY_ACTION_INHERITDEMANDCHOICE); } return result; } /* * Collect all Inherit actions: InheritanceDemand, NonCasInheritanceDemand and InheritanceDemandChoice (2.0). * * Note: Don't use the content of actions if the function return FALSE. */ MonoBoolean mono_declsec_get_inheritdemands_method (MonoMethod *method, MonoDeclSecurityActions* demands) { /* quick exit if no declarative security is present in the metadata */ if (!method->klass->image->tables [MONO_TABLE_DECLSECURITY].rows) return FALSE; /* we want the original as the wrapper is "free" of the security informations */ if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE || method->wrapper_type == MONO_WRAPPER_MANAGED_TO_MANAGED) { method = mono_marshal_method_from_wrapper (method); if (!method) return FALSE; } if (method->flags & METHOD_ATTRIBUTE_HAS_SECURITY) { mono_class_init (method->klass); memset (demands, 0, sizeof (MonoDeclSecurityActions)); return mono_declsec_get_method_demands_params (method, demands, SECURITY_ACTION_INHERITDEMAND, SECURITY_ACTION_NONCASINHERITANCE, SECURITY_ACTION_INHERITDEMANDCHOICE); } return FALSE; } static MonoBoolean get_declsec_action (MonoImage *image, guint32 token, guint32 action, MonoDeclSecurityEntry *entry) { guint32 cols [MONO_DECL_SECURITY_SIZE]; MonoTableInfo *t; int i; int index = mono_metadata_declsec_from_index (image, token); if (index == -1) return FALSE; t = &image->tables [MONO_TABLE_DECLSECURITY]; for (i = index; i < t->rows; i++) { mono_metadata_decode_row (t, i, cols, MONO_DECL_SECURITY_SIZE); /* shortcut - index are ordered */ if (token != cols [MONO_DECL_SECURITY_PARENT]) return FALSE; if (cols [MONO_DECL_SECURITY_ACTION] == action) { const char *metadata = mono_metadata_blob_heap (image, cols [MONO_DECL_SECURITY_PERMISSIONSET]); entry->blob = (char*) (metadata + 2); entry->size = mono_metadata_decode_blob_size (metadata, &metadata); return TRUE; } } return FALSE; } MonoBoolean mono_declsec_get_method_action (MonoMethod *method, guint32 action, MonoDeclSecurityEntry *entry) { if (method->flags & METHOD_ATTRIBUTE_HAS_SECURITY) { guint32 idx = mono_method_get_index (method); idx <<= MONO_HAS_DECL_SECURITY_BITS; idx |= MONO_HAS_DECL_SECURITY_METHODDEF; return get_declsec_action (method->klass->image, idx, action, entry); } return FALSE; } MonoBoolean mono_declsec_get_class_action (MonoClass *klass, guint32 action, MonoDeclSecurityEntry *entry) { /* use cache */ guint32 flags = mono_declsec_flags_from_class (klass); if (declsec_flags_map [action] & flags) { guint32 idx = mono_metadata_token_index (klass->type_token); idx <<= MONO_HAS_DECL_SECURITY_BITS; idx |= MONO_HAS_DECL_SECURITY_TYPEDEF; return get_declsec_action (klass->image, idx, action, entry); } return FALSE; } MonoBoolean mono_declsec_get_assembly_action (MonoAssembly *assembly, guint32 action, MonoDeclSecurityEntry *entry) { guint32 idx = 1; /* there is only one assembly */ idx <<= MONO_HAS_DECL_SECURITY_BITS; idx |= MONO_HAS_DECL_SECURITY_ASSEMBLY; return get_declsec_action (assembly->image, idx, action, entry); } gboolean mono_reflection_call_is_assignable_to (MonoClass *klass, MonoClass *oklass) { MonoObject *res, *exc; void *params [1]; static MonoClass *System_Reflection_Emit_TypeBuilder = NULL; static MonoMethod *method = NULL; if (!System_Reflection_Emit_TypeBuilder) { System_Reflection_Emit_TypeBuilder = mono_class_from_name (mono_defaults.corlib, "System.Reflection.Emit", "TypeBuilder"); g_assert (System_Reflection_Emit_TypeBuilder); } if (method == NULL) { method = mono_class_get_method_from_name (System_Reflection_Emit_TypeBuilder, "IsAssignableTo", 1); g_assert (method); } /* * The result of mono_type_get_object () might be a System.MonoType but we * need a TypeBuilder so use mono_class_get_ref_info (klass). */ g_assert (mono_class_get_ref_info (klass)); g_assert (!strcmp (((MonoObject*)(mono_class_get_ref_info (klass)))->vtable->klass->name, "TypeBuilder")); params [0] = mono_type_get_object (mono_domain_get (), &oklass->byval_arg); res = mono_runtime_invoke (method, (MonoObject*)(mono_class_get_ref_info (klass)), params, &exc); if (exc) return FALSE; else return *(MonoBoolean*)mono_object_unbox (res); }
./CrossVul/dataset_final_sorted/CWE-20/c/good_4718_0
crossvul-cpp_data_bad_2513_0
/* * Copyright 2014, Red Hat, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ /* * A daemon that supports a simplified interface for writing TCMU * handlers. */ #define _GNU_SOURCE #define _BITS_UIO_H #include <stdio.h> #include <stdlib.h> #include <limits.h> #include <errno.h> #include <dirent.h> #include <sys/stat.h> #include <sys/types.h> #include <sys/mman.h> #include <assert.h> #include <dlfcn.h> #include <pthread.h> #include <signal.h> #include <glib.h> #include <glib-unix.h> #include <gio/gio.h> #include <getopt.h> #include <poll.h> #include <scsi/scsi.h> #include <libkmod.h> #include <sys/utsname.h> #include "target_core_user_local.h" #include "darray.h" #include "tcmu-runner.h" #include "tcmur_aio.h" #include "tcmur_device.h" #include "tcmur_cmd_handler.h" #include "libtcmu.h" #include "tcmuhandler-generated.h" #include "version.h" #include "libtcmu_config.h" #include "libtcmu_log.h" static char *handler_path = DEFAULT_HANDLER_PATH; /* tcmu log dir path */ extern char *tcmu_log_dir; static struct tcmu_config *tcmu_cfg; darray(struct tcmur_handler *) g_runner_handlers = darray_new(); static struct tcmur_handler *find_handler_by_subtype(gchar *subtype) { struct tcmur_handler **handler; darray_foreach(handler, g_runner_handlers) { if (strcmp((*handler)->subtype, subtype) == 0) return *handler; } return NULL; } int tcmur_register_handler(struct tcmur_handler *handler) { struct tcmur_handler *h; int i; for (i = 0; i < darray_size(g_runner_handlers); i++) { h = darray_item(g_runner_handlers, i); if (!strcmp(h->subtype, handler->subtype)) { tcmu_err("Handler %s has already been registered\n", handler->subtype); return -1; } } darray_append(g_runner_handlers, handler); return 0; } bool tcmur_unregister_handler(struct tcmur_handler *handler) { int i; for (i = 0; i < darray_size(g_runner_handlers); i++) { if (darray_item(g_runner_handlers, i) == handler) { darray_remove(g_runner_handlers, i); return true; } } return false; } static int is_handler(const struct dirent *dirent) { if (strncmp(dirent->d_name, "handler_", 8)) return 0; return 1; } static int open_handlers(void) { struct dirent **dirent_list; int num_handlers; int num_good = 0; int i; num_handlers = scandir(handler_path, &dirent_list, is_handler, alphasort); if (num_handlers == -1) return -1; for (i = 0; i < num_handlers; i++) { char *path; void *handle; int (*handler_init)(void); int ret; ret = asprintf(&path, "%s/%s", handler_path, dirent_list[i]->d_name); if (ret == -1) { tcmu_err("ENOMEM\n"); continue; } handle = dlopen(path, RTLD_NOW|RTLD_LOCAL); if (!handle) { tcmu_err("Could not open handler at %s: %s\n", path, dlerror()); free(path); continue; } handler_init = dlsym(handle, "handler_init"); if (!handler_init) { tcmu_err("dlsym failure on %s\n", path); free(path); continue; } ret = handler_init(); free(path); if (ret == 0) num_good++; } for (i = 0; i < num_handlers; i++) free(dirent_list[i]); free(dirent_list); return num_good; } static gboolean sighandler(gpointer user_data) { tcmulib_cleanup_all_cmdproc_threads(); tcmu_cancel_log_thread(); tcmu_cancel_config_thread(tcmu_cfg); g_main_loop_quit((GMainLoop*)user_data); return G_SOURCE_CONTINUE; } gboolean tcmulib_callback(GIOChannel *source, GIOCondition condition, gpointer data) { struct tcmulib_context *ctx = data; tcmulib_master_fd_ready(ctx); return TRUE; } static GDBusObjectManagerServer *manager = NULL; static gboolean on_check_config(TCMUService1 *interface, GDBusMethodInvocation *invocation, gchar *cfgstring, gpointer user_data) { struct tcmur_handler *handler = user_data; char *reason = NULL; bool str_ok = true; if (handler->check_config) str_ok = handler->check_config(cfgstring, &reason); if (str_ok) reason = "success"; g_dbus_method_invocation_return_value(invocation, g_variant_new("(bs)", str_ok, reason ? : "unknown")); if (!str_ok) free(reason); return TRUE; } static void dbus_export_handler(struct tcmur_handler *handler, GCallback check_config) { GDBusObjectSkeleton *object; char obj_name[128]; TCMUService1 *interface; snprintf(obj_name, sizeof(obj_name), "/org/kernel/TCMUService1/%s", handler->subtype); object = g_dbus_object_skeleton_new(obj_name); interface = tcmuservice1_skeleton_new(); g_dbus_object_skeleton_add_interface(object, G_DBUS_INTERFACE_SKELETON(interface)); g_signal_connect(interface, "handle-check-config", check_config, handler); /* user_data */ tcmuservice1_set_config_desc(interface, handler->cfg_desc); g_dbus_object_manager_server_export(manager, G_DBUS_OBJECT_SKELETON(object)); g_object_unref(object); } static bool dbus_unexport_handler(struct tcmur_handler *handler) { char obj_name[128]; snprintf(obj_name, sizeof(obj_name), "/org/kernel/TCMUService1/%s", handler->subtype); return g_dbus_object_manager_server_unexport(manager, obj_name) == TRUE; } struct dbus_info { guint watcher_id; /* The RegisterHandler invocation on * org.kernel.TCMUService1.HandlerManager1 interface. */ GDBusMethodInvocation *register_invocation; /* Connection to the handler's bus_name. */ GDBusConnection *connection; }; static int dbus_handler_open(struct tcmu_device *dev) { return -1; } static void dbus_handler_close(struct tcmu_device *dev) { /* nop */ } static int dbus_handler_handle_cmd(struct tcmu_device *dev, struct tcmulib_cmd *cmd) { abort(); } static gboolean on_dbus_check_config(TCMUService1 *interface, GDBusMethodInvocation *invocation, gchar *cfgstring, gpointer user_data) { char *bus_name, *obj_name; struct tcmur_handler *handler = user_data; GDBusConnection *connection; GError *error = NULL; GVariant *result; bus_name = g_strdup_printf("org.kernel.TCMUService1.HandlerManager1.%s", handler->subtype); obj_name = g_strdup_printf("/org/kernel/TCMUService1/HandlerManager1/%s", handler->subtype); connection = g_dbus_method_invocation_get_connection(invocation); result = g_dbus_connection_call_sync(connection, bus_name, obj_name, "org.kernel.TCMUService1", "CheckConfig", g_variant_new("(s)", cfgstring), NULL, G_DBUS_CALL_FLAGS_NONE, -1, NULL, &error); if (result) g_dbus_method_invocation_return_value(invocation, result); else g_dbus_method_invocation_return_value(invocation, g_variant_new("(bs)", FALSE, error->message)); g_free(bus_name); g_free(obj_name); return TRUE; } static void on_handler_appeared(GDBusConnection *connection, const gchar *name, const gchar *name_owner, gpointer user_data) { struct tcmur_handler *handler = user_data; struct dbus_info *info = handler->opaque; if (info->register_invocation) { info->connection = connection; tcmur_register_handler(handler); dbus_export_handler(handler, G_CALLBACK(on_dbus_check_config)); g_dbus_method_invocation_return_value(info->register_invocation, g_variant_new("(bs)", TRUE, "succeeded")); info->register_invocation = NULL; } } static void on_handler_vanished(GDBusConnection *connection, const gchar *name, gpointer user_data) { struct tcmur_handler *handler = user_data; struct dbus_info *info = handler->opaque; if (info->register_invocation) { char *reason; reason = g_strdup_printf("Cannot find handler bus name: " "org.kernel.TCMUService1.HandlerManager1.%s", handler->subtype); g_dbus_method_invocation_return_value(info->register_invocation, g_variant_new("(bs)", FALSE, reason)); g_free(reason); } tcmur_unregister_handler(handler); dbus_unexport_handler(handler); } static gboolean on_register_handler(TCMUService1HandlerManager1 *interface, GDBusMethodInvocation *invocation, gchar *subtype, gchar *cfg_desc, gpointer user_data) { struct tcmur_handler *handler; struct dbus_info *info; char *bus_name; bus_name = g_strdup_printf("org.kernel.TCMUService1.HandlerManager1.%s", subtype); handler = g_new0(struct tcmur_handler, 1); handler->subtype = g_strdup(subtype); handler->cfg_desc = g_strdup(cfg_desc); handler->open = dbus_handler_open; handler->close = dbus_handler_close; handler->handle_cmd = dbus_handler_handle_cmd; info = g_new0(struct dbus_info, 1); info->register_invocation = invocation; info->watcher_id = g_bus_watch_name(G_BUS_TYPE_SYSTEM, bus_name, G_BUS_NAME_WATCHER_FLAGS_NONE, on_handler_appeared, on_handler_vanished, handler, NULL); g_free(bus_name); handler->opaque = info; return TRUE; } static gboolean on_unregister_handler(TCMUService1HandlerManager1 *interface, GDBusMethodInvocation *invocation, gchar *subtype, gpointer user_data) { struct tcmur_handler *handler = find_handler_by_subtype(subtype); struct dbus_info *info = handler->opaque; if (!handler) { g_dbus_method_invocation_return_value(invocation, g_variant_new("(bs)", FALSE, "unknown subtype")); return TRUE; } dbus_unexport_handler(handler); tcmur_unregister_handler(handler); g_bus_unwatch_name(info->watcher_id); g_free(info); g_free(handler); g_dbus_method_invocation_return_value(invocation, g_variant_new("(bs)", TRUE, "succeeded")); return TRUE; } void dbus_handler_manager1_init(GDBusConnection *connection) { GError *error = NULL; TCMUService1HandlerManager1 *interface; gboolean ret; interface = tcmuservice1_handler_manager1_skeleton_new(); ret = g_dbus_interface_skeleton_export( G_DBUS_INTERFACE_SKELETON(interface), connection, "/org/kernel/TCMUService1/HandlerManager1", &error); g_signal_connect(interface, "handle-register-handler", G_CALLBACK (on_register_handler), NULL); g_signal_connect(interface, "handle-unregister-handler", G_CALLBACK (on_unregister_handler), NULL); if (!ret) tcmu_err("Handler manager export failed: %s\n", error ? error->message : "unknown error"); if (error) g_error_free(error); } static void dbus_bus_acquired(GDBusConnection *connection, const gchar *name, gpointer user_data) { struct tcmur_handler **handler; tcmu_dbg("bus %s acquired\n", name); manager = g_dbus_object_manager_server_new("/org/kernel/TCMUService1"); darray_foreach(handler, g_runner_handlers) { dbus_export_handler(*handler, G_CALLBACK(on_check_config)); } dbus_handler_manager1_init(connection); g_dbus_object_manager_server_set_connection(manager, connection); } static void dbus_name_acquired(GDBusConnection *connection, const gchar *name, gpointer user_data) { tcmu_dbg("name %s acquired\n", name); } static void dbus_name_lost(GDBusConnection *connection, const gchar *name, gpointer user_data) { tcmu_dbg("name lost\n"); } static int load_our_module(void) { struct kmod_list *list = NULL, *itr; struct kmod_ctx *ctx; struct stat sb; struct utsname u; int ret; ctx = kmod_new(NULL, NULL); if (!ctx) { tcmu_err("kmod_new() failed: %m\n"); return -1; } ret = kmod_module_new_from_lookup(ctx, "target_core_user", &list); if (ret < 0) { /* In some environments like containers, /lib/modules/`uname -r` * will not exist, in such cases the load module job be taken * care by admin, either by manual load or makesure it's builtin */ if (ENOENT == errno) { if (uname(&u) < 0) { tcmu_err("uname() failed: %m\n"); } else { tcmu_info("no modules directory '/lib/modules/%s', checking module target_core_user entry in '/sys/modules/'\n", u.release); ret = stat("/sys/module/target_core_user", &sb); if (!ret) { tcmu_dbg("Module target_core_user already loaded\n"); } else { tcmu_err("stat() on '/sys/module/target_core_user' failed: %m\n"); } } } else { tcmu_err("kmod_module_new_from_lookup() failed to lookup alias target_core_use %m\n"); } kmod_unref(ctx); return ret; } if (!list) { tcmu_err("kmod_module_new_from_lookup() failed to find module target_core_user\n"); kmod_unref(ctx); return -ENOENT; } kmod_list_foreach(itr, list) { int state, err; struct kmod_module *mod = kmod_module_get_module(itr); state = kmod_module_get_initstate(mod); switch (state) { case KMOD_MODULE_BUILTIN: tcmu_info("Module '%s' is builtin\n", kmod_module_get_name(mod)); break; case KMOD_MODULE_LIVE: tcmu_dbg("Module '%s' is already loaded\n", kmod_module_get_name(mod)); break; default: err = kmod_module_probe_insert_module(mod, KMOD_PROBE_APPLY_BLACKLIST, NULL, NULL, NULL, NULL); if (err == 0) { tcmu_info("Inserted module '%s'\n", kmod_module_get_name(mod)); } else if (err == KMOD_PROBE_APPLY_BLACKLIST) { tcmu_err("Module '%s' is blacklisted\n", kmod_module_get_name(mod)); } else { tcmu_err("Failed to insert '%s'\n", kmod_module_get_name(mod)); } ret = err; } kmod_module_unref(mod); } kmod_module_unref_list(list); kmod_unref(ctx); return ret; } static void cmdproc_thread_cleanup(void *arg) { struct tcmu_device *dev = arg; struct tcmur_handler *rhandler = tcmu_get_runner_handler(dev); rhandler->close(dev); } static void *tcmur_cmdproc_thread(void *arg) { struct tcmu_device *dev = arg; struct tcmur_handler *rhandler = tcmu_get_runner_handler(dev); struct pollfd pfd; int ret; pthread_cleanup_push(cmdproc_thread_cleanup, dev); while (1) { int completed = 0; struct tcmulib_cmd *cmd; tcmulib_processing_start(dev); while ((cmd = tcmulib_get_next_command(dev)) != NULL) { if (tcmu_get_log_level() == TCMU_LOG_DEBUG_SCSI_CMD) tcmu_cdb_debug_info(cmd); if (tcmur_handler_is_passthrough_only(rhandler)) ret = tcmur_cmd_passthrough_handler(dev, cmd); else ret = tcmur_generic_handle_cmd(dev, cmd); if (ret == TCMU_NOT_HANDLED) tcmu_warn("Command 0x%x not supported\n", cmd->cdb[0]); /* * command (processing) completion is called in the following * scenarios: * - handle_cmd: synchronous handlers * - generic_handle_cmd: non tcmur handler calls (see generic_cmd()) * and on errors when calling tcmur handler. */ if (ret != TCMU_ASYNC_HANDLED) { completed = 1; tcmur_command_complete(dev, cmd, ret); } } if (completed) tcmulib_processing_complete(dev); pfd.fd = tcmu_get_dev_fd(dev); pfd.events = POLLIN; pfd.revents = 0; poll(&pfd, 1, -1); if (pfd.revents != POLLIN) { tcmu_err("poll received unexpected revent: 0x%x\n", pfd.revents); break; } } tcmu_err("thread terminating, should never happen\n"); pthread_cleanup_pop(1); return NULL; } static int dev_added(struct tcmu_device *dev) { struct tcmur_handler *rhandler = tcmu_get_runner_handler(dev); struct tcmur_device *rdev; int32_t block_size, max_sectors; int64_t dev_size; int ret; rdev = calloc(1, sizeof(*rdev)); if (!rdev) return -ENOMEM; tcmu_set_daemon_dev_private(dev, rdev); ret = -EINVAL; block_size = tcmu_get_attribute(dev, "hw_block_size"); if (block_size <= 0) { tcmu_dev_err(dev, "Could not get hw_block_size\n"); goto free_rdev; } tcmu_set_dev_block_size(dev, block_size); dev_size = tcmu_get_device_size(dev); if (dev_size < 0) { tcmu_dev_err(dev, "Could not get device size\n"); goto free_rdev; } tcmu_set_dev_num_lbas(dev, dev_size / block_size); max_sectors = tcmu_get_attribute(dev, "hw_max_sectors"); if (max_sectors < 0) goto free_rdev; tcmu_set_dev_max_xfer_len(dev, max_sectors); tcmu_dev_dbg(dev, "Got block_size %ld, size in bytes %lld\n", block_size, dev_size); ret = pthread_spin_init(&rdev->lock, 0); if (ret != 0) goto free_rdev; ret = pthread_mutex_init(&rdev->caw_lock, NULL); if (ret != 0) goto cleanup_dev_lock; ret = pthread_mutex_init(&rdev->format_lock, NULL); if (ret != 0) goto cleanup_caw_lock; ret = setup_io_work_queue(dev); if (ret < 0) goto cleanup_format_lock; ret = setup_aio_tracking(rdev); if (ret < 0) goto cleanup_io_work_queue; ret = rhandler->open(dev); if (ret) goto cleanup_aio_tracking; ret = tcmulib_start_cmdproc_thread(dev, tcmur_cmdproc_thread); if (ret < 0) goto close_dev; return 0; close_dev: rhandler->close(dev); cleanup_aio_tracking: cleanup_aio_tracking(rdev); cleanup_io_work_queue: cleanup_io_work_queue(dev, true); cleanup_format_lock: pthread_mutex_destroy(&rdev->format_lock); cleanup_caw_lock: pthread_mutex_destroy(&rdev->caw_lock); cleanup_dev_lock: pthread_spin_destroy(&rdev->lock); free_rdev: free(rdev); return ret; } static void dev_removed(struct tcmu_device *dev) { struct tcmur_device *rdev = tcmu_get_daemon_dev_private(dev); int ret; /* * The order of cleaning up worker threads and calling ->removed() * is important: for sync handlers, the worker thread needs to be * terminated before removing the handler (i.e., calling handlers * ->close() callout) in order to ensure that no handler callouts * are getting invoked when shutting down the handler. */ cleanup_io_work_queue_threads(dev); tcmulib_cleanup_cmdproc_thread(dev); cleanup_io_work_queue(dev, false); cleanup_aio_tracking(rdev); ret = pthread_mutex_destroy(&rdev->format_lock); if (ret != 0) tcmu_err("could not cleanup format lock %d\n", ret); ret = pthread_mutex_destroy(&rdev->caw_lock); if (ret != 0) tcmu_err("could not cleanup caw lock %d\n", ret); ret = pthread_spin_destroy(&rdev->lock); if (ret != 0) tcmu_err("could not cleanup mailbox lock %d\n", ret); free(rdev); } static bool tcmu_logdir_create(const char *path) { DIR* dir = opendir(path); if (dir) { closedir(dir); } else if (errno == ENOENT) { if (mkdir(path, 0755) == -1) { tcmu_err("mkdir(%s) failed: %m\n", path); return FALSE; } } else { tcmu_err("opendir(%s) failed: %m\n", path); return FALSE; } return TRUE; } static void usage(void) { printf("\nusage:\n"); printf("\ttcmu-runner [options]\n"); printf("\noptions:\n"); printf("\t-h, --help: print this message and exit\n"); printf("\t-V, --version: print version and exit\n"); printf("\t-d, --debug: enable debug messages\n"); printf("\t--handler-path: set path to search for handler modules\n"); printf("\t\tdefault is %s\n", DEFAULT_HANDLER_PATH); printf("\t-l, --tcmu-log-dir: tcmu log dir\n"); printf("\t\tdefault is %s\n", TCMU_LOG_DIR_DEFAULT); printf("\n"); } static struct option long_options[] = { {"debug", no_argument, 0, 'd'}, {"handler-path", required_argument, 0, 0}, {"tcmu-log-dir", required_argument, 0, 'l'}, {"help", no_argument, 0, 'h'}, {"version", no_argument, 0, 'V'}, {0, 0, 0, 0}, }; int main(int argc, char **argv) { darray(struct tcmulib_handler) handlers = darray_new(); struct tcmulib_context *tcmulib_context; struct tcmur_handler **tmp_r_handler; GMainLoop *loop; GIOChannel *libtcmu_gio; guint reg_id; int ret; tcmu_cfg = tcmu_config_new(); if (!tcmu_cfg) exit(1); ret = tcmu_load_config(tcmu_cfg, NULL); if (ret == -1) goto err_out; while (1) { int option_index = 0; int c; c = getopt_long(argc, argv, "dhlV", long_options, &option_index); if (c == -1) break; switch (c) { case 0: if (option_index == 1) handler_path = strdup(optarg); break; case 'l': if (strlen(optarg) > PATH_MAX - TCMU_LOG_FILENAME_MAX) { tcmu_err("--tcmu-log-dir='%s' cannot exceed %d characters\n", optarg, PATH_MAX - TCMU_LOG_FILENAME_MAX); } if (!tcmu_logdir_create(optarg)) { goto err_out; } tcmu_log_dir = strdup(optarg); break; case 'd': tcmu_set_log_level(TCMU_CONF_LOG_DEBUG_SCSI_CMD); break; case 'V': printf("tcmu-runner %s\n", TCMUR_VERSION); goto err_out; default: case 'h': usage(); goto err_out; } } tcmu_dbg("handler path: %s\n", handler_path); ret = load_our_module(); if (ret < 0) { tcmu_err("couldn't load module\n"); goto err_out; } ret = open_handlers(); if (ret < 0) { tcmu_err("couldn't open handlers\n"); goto err_out; } tcmu_dbg("%d runner handlers found\n", ret); /* * Convert from tcmu-runner's handler struct to libtcmu's * handler struct, an array of which we pass in, below. */ darray_foreach(tmp_r_handler, g_runner_handlers) { struct tcmulib_handler tmp_handler; tmp_handler.name = (*tmp_r_handler)->name; tmp_handler.subtype = (*tmp_r_handler)->subtype; tmp_handler.cfg_desc = (*tmp_r_handler)->cfg_desc; tmp_handler.check_config = (*tmp_r_handler)->check_config; tmp_handler.reconfig = (*tmp_r_handler)->reconfig; tmp_handler.added = dev_added; tmp_handler.removed = dev_removed; /* * Can hand out a ref to an internal pointer to the * darray b/c handlers will never be added or removed * once open_handlers() is done. */ tmp_handler.hm_private = *tmp_r_handler; darray_append(handlers, tmp_handler); } tcmulib_context = tcmulib_initialize(handlers.item, handlers.size); if (!tcmulib_context) { tcmu_err("tcmulib_initialize failed\n"); goto err_out; } loop = g_main_loop_new(NULL, FALSE); if (g_unix_signal_add(SIGINT, sighandler, loop) <= 0 || g_unix_signal_add(SIGTERM, sighandler, loop) <= 0) { tcmu_err("couldn't setup signal handlers\n"); goto err_tcmulib_close; } /* Set up event for libtcmu */ libtcmu_gio = g_io_channel_unix_new(tcmulib_get_master_fd(tcmulib_context)); g_io_add_watch(libtcmu_gio, G_IO_IN, tcmulib_callback, tcmulib_context); /* Set up DBus name, see callback */ reg_id = g_bus_own_name(G_BUS_TYPE_SYSTEM, "org.kernel.TCMUService1", G_BUS_NAME_OWNER_FLAGS_NONE, dbus_bus_acquired, dbus_name_acquired, // name acquired dbus_name_lost, // name lost NULL, // user data NULL // user date free func ); g_main_loop_run(loop); tcmu_dbg("Exiting...\n"); g_bus_unown_name(reg_id); g_main_loop_unref(loop); tcmulib_close(tcmulib_context); tcmu_config_destroy(tcmu_cfg); return 0; err_tcmulib_close: tcmulib_close(tcmulib_context); err_out: tcmu_config_destroy(tcmu_cfg); exit(1); }
./CrossVul/dataset_final_sorted/CWE-20/c/bad_2513_0
crossvul-cpp_data_good_5586_0
#include <linux/mutex.h> #include <linux/socket.h> #include <linux/skbuff.h> #include <net/netlink.h> #include <net/net_namespace.h> #include <linux/module.h> #include <net/sock.h> #include <linux/inet_diag.h> #include <linux/sock_diag.h> static const struct sock_diag_handler *sock_diag_handlers[AF_MAX]; static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh); static DEFINE_MUTEX(sock_diag_table_mutex); int sock_diag_check_cookie(void *sk, __u32 *cookie) { if ((cookie[0] != INET_DIAG_NOCOOKIE || cookie[1] != INET_DIAG_NOCOOKIE) && ((u32)(unsigned long)sk != cookie[0] || (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1])) return -ESTALE; else return 0; } EXPORT_SYMBOL_GPL(sock_diag_check_cookie); void sock_diag_save_cookie(void *sk, __u32 *cookie) { cookie[0] = (u32)(unsigned long)sk; cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1); } EXPORT_SYMBOL_GPL(sock_diag_save_cookie); int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype) { u32 mem[SK_MEMINFO_VARS]; mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk); mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf; mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk); mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf; mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc; mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued; mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc); mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len; return nla_put(skb, attrtype, sizeof(mem), &mem); } EXPORT_SYMBOL_GPL(sock_diag_put_meminfo); void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)) { mutex_lock(&sock_diag_table_mutex); inet_rcv_compat = fn; mutex_unlock(&sock_diag_table_mutex); } EXPORT_SYMBOL_GPL(sock_diag_register_inet_compat); void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)) { mutex_lock(&sock_diag_table_mutex); inet_rcv_compat = NULL; mutex_unlock(&sock_diag_table_mutex); } EXPORT_SYMBOL_GPL(sock_diag_unregister_inet_compat); int sock_diag_register(const struct sock_diag_handler *hndl) { int err = 0; if (hndl->family >= AF_MAX) return -EINVAL; mutex_lock(&sock_diag_table_mutex); if (sock_diag_handlers[hndl->family]) err = -EBUSY; else sock_diag_handlers[hndl->family] = hndl; mutex_unlock(&sock_diag_table_mutex); return err; } EXPORT_SYMBOL_GPL(sock_diag_register); void sock_diag_unregister(const struct sock_diag_handler *hnld) { int family = hnld->family; if (family >= AF_MAX) return; mutex_lock(&sock_diag_table_mutex); BUG_ON(sock_diag_handlers[family] != hnld); sock_diag_handlers[family] = NULL; mutex_unlock(&sock_diag_table_mutex); } EXPORT_SYMBOL_GPL(sock_diag_unregister); static const inline struct sock_diag_handler *sock_diag_lock_handler(int family) { if (sock_diag_handlers[family] == NULL) request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, NETLINK_SOCK_DIAG, family); mutex_lock(&sock_diag_table_mutex); return sock_diag_handlers[family]; } static inline void sock_diag_unlock_handler(const struct sock_diag_handler *h) { mutex_unlock(&sock_diag_table_mutex); } static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) { int err; struct sock_diag_req *req = nlmsg_data(nlh); const struct sock_diag_handler *hndl; if (nlmsg_len(nlh) < sizeof(*req)) return -EINVAL; if (req->sdiag_family >= AF_MAX) return -EINVAL; hndl = sock_diag_lock_handler(req->sdiag_family); if (hndl == NULL) err = -ENOENT; else err = hndl->dump(skb, nlh); sock_diag_unlock_handler(hndl); return err; } static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) { int ret; switch (nlh->nlmsg_type) { case TCPDIAG_GETSOCK: case DCCPDIAG_GETSOCK: if (inet_rcv_compat == NULL) request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, NETLINK_SOCK_DIAG, AF_INET); mutex_lock(&sock_diag_table_mutex); if (inet_rcv_compat != NULL) ret = inet_rcv_compat(skb, nlh); else ret = -EOPNOTSUPP; mutex_unlock(&sock_diag_table_mutex); return ret; case SOCK_DIAG_BY_FAMILY: return __sock_diag_rcv_msg(skb, nlh); default: return -EINVAL; } } static DEFINE_MUTEX(sock_diag_mutex); static void sock_diag_rcv(struct sk_buff *skb) { mutex_lock(&sock_diag_mutex); netlink_rcv_skb(skb, &sock_diag_rcv_msg); mutex_unlock(&sock_diag_mutex); } static int __net_init diag_net_init(struct net *net) { struct netlink_kernel_cfg cfg = { .input = sock_diag_rcv, }; net->diag_nlsk = netlink_kernel_create(net, NETLINK_SOCK_DIAG, &cfg); return net->diag_nlsk == NULL ? -ENOMEM : 0; } static void __net_exit diag_net_exit(struct net *net) { netlink_kernel_release(net->diag_nlsk); net->diag_nlsk = NULL; } static struct pernet_operations diag_net_ops = { .init = diag_net_init, .exit = diag_net_exit, }; static int __init sock_diag_init(void) { return register_pernet_subsys(&diag_net_ops); } static void __exit sock_diag_exit(void) { unregister_pernet_subsys(&diag_net_ops); } module_init(sock_diag_init); module_exit(sock_diag_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_SOCK_DIAG);
./CrossVul/dataset_final_sorted/CWE-20/c/good_5586_0
crossvul-cpp_data_good_5660_0
/* * IPv6 output functions * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * * Based on linux/net/ipv4/ip_output.c * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Changes: * A.N.Kuznetsov : airthmetics in fragmentation. * extension headers are implemented. * route changes now work. * ip6_forward does not confuse sniffers. * etc. * * H. von Brand : Added missing #include <linux/string.h> * Imran Patel : frag id should be in NBO * Kazunori MIYAZAWA @USAGI * : add ip6_append_data and related functions * for datagram xmit */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/socket.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/in6.h> #include <linux/tcp.h> #include <linux/route.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv6.h> #include <net/sock.h> #include <net/snmp.h> #include <net/ipv6.h> #include <net/ndisc.h> #include <net/protocol.h> #include <net/ip6_route.h> #include <net/addrconf.h> #include <net/rawv6.h> #include <net/icmp.h> #include <net/xfrm.h> #include <net/checksum.h> #include <linux/mroute6.h> int __ip6_local_out(struct sk_buff *skb) { int len; len = skb->len - sizeof(struct ipv6hdr); if (len > IPV6_MAXPLEN) len = 0; ipv6_hdr(skb)->payload_len = htons(len); return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev, dst_output); } int ip6_local_out(struct sk_buff *skb) { int err; err = __ip6_local_out(skb); if (likely(err == 1)) err = dst_output(skb); return err; } EXPORT_SYMBOL_GPL(ip6_local_out); static int ip6_finish_output2(struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct net_device *dev = dst->dev; struct neighbour *neigh; struct in6_addr *nexthop; int ret; skb->protocol = htons(ETH_P_IPV6); skb->dev = dev; if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) { struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) && ((mroute6_socket(dev_net(dev), skb) && !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) || ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr, &ipv6_hdr(skb)->saddr))) { struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); /* Do not check for IFF_ALLMULTI; multicast routing is not supported in any case. */ if (newskb) NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, newskb, NULL, newskb->dev, dev_loopback_xmit); if (ipv6_hdr(skb)->hop_limit == 0) { IP6_INC_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); return 0; } } IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST, skb->len); if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <= IPV6_ADDR_SCOPE_NODELOCAL && !(dev->flags & IFF_LOOPBACK)) { kfree_skb(skb); return 0; } } rcu_read_lock_bh(); nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr); neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop); if (unlikely(!neigh)) neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false); if (!IS_ERR(neigh)) { ret = dst_neigh_output(dst, neigh, skb); rcu_read_unlock_bh(); return ret; } rcu_read_unlock_bh(); IP6_INC_STATS_BH(dev_net(dst->dev), ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); kfree_skb(skb); return -EINVAL; } static int ip6_finish_output(struct sk_buff *skb) { if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || dst_allfrag(skb_dst(skb))) return ip6_fragment(skb, ip6_finish_output2); else return ip6_finish_output2(skb); } int ip6_output(struct sk_buff *skb) { struct net_device *dev = skb_dst(skb)->dev; struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); if (unlikely(idev->cnf.disable_ipv6)) { IP6_INC_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); return 0; } return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, dev, ip6_finish_output, !(IP6CB(skb)->flags & IP6SKB_REROUTED)); } /* * xmit an sk_buff (used by TCP, SCTP and DCCP) */ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, struct ipv6_txoptions *opt, int tclass) { struct net *net = sock_net(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct in6_addr *first_hop = &fl6->daddr; struct dst_entry *dst = skb_dst(skb); struct ipv6hdr *hdr; u8 proto = fl6->flowi6_proto; int seg_len = skb->len; int hlimit = -1; u32 mtu; if (opt) { unsigned int head_room; /* First: exthdrs may take lots of space (~8K for now) MAX_HEADER is not enough. */ head_room = opt->opt_nflen + opt->opt_flen; seg_len += head_room; head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev); if (skb_headroom(skb) < head_room) { struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room); if (skb2 == NULL) { IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); return -ENOBUFS; } consume_skb(skb); skb = skb2; skb_set_owner_w(skb, sk); } if (opt->opt_flen) ipv6_push_frag_opts(skb, opt, &proto); if (opt->opt_nflen) ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop); } skb_push(skb, sizeof(struct ipv6hdr)); skb_reset_network_header(skb); hdr = ipv6_hdr(skb); /* * Fill in the IPv6 header */ if (np) hlimit = np->hop_limit; if (hlimit < 0) hlimit = ip6_dst_hoplimit(dst); ip6_flow_hdr(hdr, tclass, fl6->flowlabel); hdr->payload_len = htons(seg_len); hdr->nexthdr = proto; hdr->hop_limit = hlimit; hdr->saddr = fl6->saddr; hdr->daddr = *first_hop; skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; mtu = dst_mtu(dst); if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) { IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUT, skb->len); return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev, dst_output); } skb->dev = dst->dev; ipv6_local_error(sk, EMSGSIZE, fl6, mtu); IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return -EMSGSIZE; } EXPORT_SYMBOL(ip6_xmit); static int ip6_call_ra_chain(struct sk_buff *skb, int sel) { struct ip6_ra_chain *ra; struct sock *last = NULL; read_lock(&ip6_ra_lock); for (ra = ip6_ra_chain; ra; ra = ra->next) { struct sock *sk = ra->sk; if (sk && ra->sel == sel && (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == skb->dev->ifindex)) { if (last) { struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2) rawv6_rcv(last, skb2); } last = sk; } } if (last) { rawv6_rcv(last, skb); read_unlock(&ip6_ra_lock); return 1; } read_unlock(&ip6_ra_lock); return 0; } static int ip6_forward_proxy_check(struct sk_buff *skb) { struct ipv6hdr *hdr = ipv6_hdr(skb); u8 nexthdr = hdr->nexthdr; __be16 frag_off; int offset; if (ipv6_ext_hdr(nexthdr)) { offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off); if (offset < 0) return 0; } else offset = sizeof(struct ipv6hdr); if (nexthdr == IPPROTO_ICMPV6) { struct icmp6hdr *icmp6; if (!pskb_may_pull(skb, (skb_network_header(skb) + offset + 1 - skb->data))) return 0; icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset); switch (icmp6->icmp6_type) { case NDISC_ROUTER_SOLICITATION: case NDISC_ROUTER_ADVERTISEMENT: case NDISC_NEIGHBOUR_SOLICITATION: case NDISC_NEIGHBOUR_ADVERTISEMENT: case NDISC_REDIRECT: /* For reaction involving unicast neighbor discovery * message destined to the proxied address, pass it to * input function. */ return 1; default: break; } } /* * The proxying router can't forward traffic sent to a link-local * address, so signal the sender and discard the packet. This * behavior is clarified by the MIPv6 specification. */ if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) { dst_link_failure(skb); return -1; } return 0; } static inline int ip6_forward_finish(struct sk_buff *skb) { return dst_output(skb); } int ip6_forward(struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct ipv6hdr *hdr = ipv6_hdr(skb); struct inet6_skb_parm *opt = IP6CB(skb); struct net *net = dev_net(dst->dev); u32 mtu; if (net->ipv6.devconf_all->forwarding == 0) goto error; if (skb_warn_if_lro(skb)) goto drop; if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); goto drop; } if (skb->pkt_type != PACKET_HOST) goto drop; skb_forward_csum(skb); /* * We DO NOT make any processing on * RA packets, pushing them to user level AS IS * without ane WARRANTY that application will be able * to interpret them. The reason is that we * cannot make anything clever here. * * We are not end-node, so that if packet contains * AH/ESP, we cannot make anything. * Defragmentation also would be mistake, RA packets * cannot be fragmented, because there is no warranty * that different fragments will go along one path. --ANK */ if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) { if (ip6_call_ra_chain(skb, ntohs(opt->ra))) return 0; } /* * check and decrement ttl */ if (hdr->hop_limit <= 1) { /* Force OUTPUT device used as source address */ skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0); IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS); kfree_skb(skb); return -ETIMEDOUT; } /* XXX: idev->cnf.proxy_ndp? */ if (net->ipv6.devconf_all->proxy_ndp && pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) { int proxied = ip6_forward_proxy_check(skb); if (proxied > 0) return ip6_input(skb); else if (proxied < 0) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); goto drop; } } if (!xfrm6_route_forward(skb)) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); goto drop; } dst = skb_dst(skb); /* IPv6 specs say nothing about it, but it is clear that we cannot send redirects to source routed frames. We don't send redirects to frames decapsulated from IPsec. */ if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) { struct in6_addr *target = NULL; struct inet_peer *peer; struct rt6_info *rt; /* * incoming and outgoing devices are the same * send a redirect. */ rt = (struct rt6_info *) dst; if (rt->rt6i_flags & RTF_GATEWAY) target = &rt->rt6i_gateway; else target = &hdr->daddr; peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1); /* Limit redirects both by destination (here) and by source (inside ndisc_send_redirect) */ if (inet_peer_xrlim_allow(peer, 1*HZ)) ndisc_send_redirect(skb, target); if (peer) inet_putpeer(peer); } else { int addrtype = ipv6_addr_type(&hdr->saddr); /* This check is security critical. */ if (addrtype == IPV6_ADDR_ANY || addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK)) goto error; if (addrtype & IPV6_ADDR_LINKLOCAL) { icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_NOT_NEIGHBOUR, 0); goto error; } } mtu = dst_mtu(dst); if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; if ((!skb->local_df && skb->len > mtu && !skb_is_gso(skb)) || (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)) { /* Again, force OUTPUT device used as source address */ skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS); IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return -EMSGSIZE; } if (skb_cow(skb, dst->dev->hard_header_len)) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS); goto drop; } hdr = ipv6_hdr(skb); /* Mangling hops number delayed to point after skb COW */ hdr->hop_limit--; IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len); return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish); error: IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS); drop: kfree_skb(skb); return -EINVAL; } static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from) { to->pkt_type = from->pkt_type; to->priority = from->priority; to->protocol = from->protocol; skb_dst_drop(to); skb_dst_set(to, dst_clone(skb_dst(from))); to->dev = from->dev; to->mark = from->mark; #ifdef CONFIG_NET_SCHED to->tc_index = from->tc_index; #endif nf_copy(to, from); #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) to->nf_trace = from->nf_trace; #endif skb_copy_secmark(to, from); } int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) { struct sk_buff *frag; struct rt6_info *rt = (struct rt6_info*)skb_dst(skb); struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL; struct ipv6hdr *tmp_hdr; struct frag_hdr *fh; unsigned int mtu, hlen, left, len; int hroom, troom; __be32 frag_id = 0; int ptr, offset = 0, err=0; u8 *prevhdr, nexthdr = 0; struct net *net = dev_net(skb_dst(skb)->dev); hlen = ip6_find_1stfragopt(skb, &prevhdr); nexthdr = *prevhdr; mtu = ip6_skb_dst_mtu(skb); /* We must not fragment if the socket is set to force MTU discovery * or if the skb it not generated by a local socket. */ if (unlikely(!skb->local_df && skb->len > mtu) || (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)) { if (skb->sk && dst_allfrag(skb_dst(skb))) sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK); skb->dev = skb_dst(skb)->dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return -EMSGSIZE; } if (np && np->frag_size < mtu) { if (np->frag_size) mtu = np->frag_size; } mtu -= hlen + sizeof(struct frag_hdr); if (skb_has_frag_list(skb)) { int first_len = skb_pagelen(skb); struct sk_buff *frag2; if (first_len - hlen > mtu || ((first_len - hlen) & 7) || skb_cloned(skb)) goto slow_path; skb_walk_frags(skb, frag) { /* Correct geometry. */ if (frag->len > mtu || ((frag->len & 7) && frag->next) || skb_headroom(frag) < hlen) goto slow_path_clean; /* Partially cloned skb? */ if (skb_shared(frag)) goto slow_path_clean; BUG_ON(frag->sk); if (skb->sk) { frag->sk = skb->sk; frag->destructor = sock_wfree; } skb->truesize -= frag->truesize; } err = 0; offset = 0; frag = skb_shinfo(skb)->frag_list; skb_frag_list_init(skb); /* BUILD HEADER */ *prevhdr = NEXTHDR_FRAGMENT; tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC); if (!tmp_hdr) { IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); return -ENOMEM; } __skb_pull(skb, hlen); fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr)); __skb_push(skb, hlen); skb_reset_network_header(skb); memcpy(skb_network_header(skb), tmp_hdr, hlen); ipv6_select_ident(fh, rt); fh->nexthdr = nexthdr; fh->reserved = 0; fh->frag_off = htons(IP6_MF); frag_id = fh->identification; first_len = skb_pagelen(skb); skb->data_len = first_len - skb_headlen(skb); skb->len = first_len; ipv6_hdr(skb)->payload_len = htons(first_len - sizeof(struct ipv6hdr)); dst_hold(&rt->dst); for (;;) { /* Prepare header of the next frame, * before previous one went down. */ if (frag) { frag->ip_summed = CHECKSUM_NONE; skb_reset_transport_header(frag); fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr)); __skb_push(frag, hlen); skb_reset_network_header(frag); memcpy(skb_network_header(frag), tmp_hdr, hlen); offset += skb->len - hlen - sizeof(struct frag_hdr); fh->nexthdr = nexthdr; fh->reserved = 0; fh->frag_off = htons(offset); if (frag->next != NULL) fh->frag_off |= htons(IP6_MF); fh->identification = frag_id; ipv6_hdr(frag)->payload_len = htons(frag->len - sizeof(struct ipv6hdr)); ip6_copy_metadata(frag, skb); } err = output(skb); if(!err) IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), IPSTATS_MIB_FRAGCREATES); if (err || !frag) break; skb = frag; frag = skb->next; skb->next = NULL; } kfree(tmp_hdr); if (err == 0) { IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), IPSTATS_MIB_FRAGOKS); ip6_rt_put(rt); return 0; } while (frag) { skb = frag->next; kfree_skb(frag); frag = skb; } IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), IPSTATS_MIB_FRAGFAILS); ip6_rt_put(rt); return err; slow_path_clean: skb_walk_frags(skb, frag2) { if (frag2 == frag) break; frag2->sk = NULL; frag2->destructor = NULL; skb->truesize += frag2->truesize; } } slow_path: if ((skb->ip_summed == CHECKSUM_PARTIAL) && skb_checksum_help(skb)) goto fail; left = skb->len - hlen; /* Space per frame */ ptr = hlen; /* Where to start from */ /* * Fragment the datagram. */ *prevhdr = NEXTHDR_FRAGMENT; hroom = LL_RESERVED_SPACE(rt->dst.dev); troom = rt->dst.dev->needed_tailroom; /* * Keep copying data until we run out. */ while(left > 0) { len = left; /* IF: it doesn't fit, use 'mtu' - the data space left */ if (len > mtu) len = mtu; /* IF: we are not sending up to and including the packet end then align the next start on an eight byte boundary */ if (len < left) { len &= ~7; } /* * Allocate buffer. */ if ((frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) + hroom + troom, GFP_ATOMIC)) == NULL) { NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n"); IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); err = -ENOMEM; goto fail; } /* * Set up data on packet */ ip6_copy_metadata(frag, skb); skb_reserve(frag, hroom); skb_put(frag, len + hlen + sizeof(struct frag_hdr)); skb_reset_network_header(frag); fh = (struct frag_hdr *)(skb_network_header(frag) + hlen); frag->transport_header = (frag->network_header + hlen + sizeof(struct frag_hdr)); /* * Charge the memory for the fragment to any owner * it might possess */ if (skb->sk) skb_set_owner_w(frag, skb->sk); /* * Copy the packet header into the new buffer. */ skb_copy_from_linear_data(skb, skb_network_header(frag), hlen); /* * Build fragment header. */ fh->nexthdr = nexthdr; fh->reserved = 0; if (!frag_id) { ipv6_select_ident(fh, rt); frag_id = fh->identification; } else fh->identification = frag_id; /* * Copy a block of the IP datagram. */ if (skb_copy_bits(skb, ptr, skb_transport_header(frag), len)) BUG(); left -= len; fh->frag_off = htons(offset); if (left > 0) fh->frag_off |= htons(IP6_MF); ipv6_hdr(frag)->payload_len = htons(frag->len - sizeof(struct ipv6hdr)); ptr += len; offset += len; /* * Put this fragment into the sending queue. */ err = output(frag); if (err) goto fail; IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGCREATES); } IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGOKS); consume_skb(skb); return err; fail: IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return err; } static inline int ip6_rt_check(const struct rt6key *rt_key, const struct in6_addr *fl_addr, const struct in6_addr *addr_cache) { return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) && (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache)); } static struct dst_entry *ip6_sk_dst_check(struct sock *sk, struct dst_entry *dst, const struct flowi6 *fl6) { struct ipv6_pinfo *np = inet6_sk(sk); struct rt6_info *rt; if (!dst) goto out; if (dst->ops->family != AF_INET6) { dst_release(dst); return NULL; } rt = (struct rt6_info *)dst; /* Yes, checking route validity in not connected * case is not very simple. Take into account, * that we do not support routing by source, TOS, * and MSG_DONTROUTE --ANK (980726) * * 1. ip6_rt_check(): If route was host route, * check that cached destination is current. * If it is network route, we still may * check its validity using saved pointer * to the last used address: daddr_cache. * We do not want to save whole address now, * (because main consumer of this service * is tcp, which has not this problem), * so that the last trick works only on connected * sockets. * 2. oif also should be the same. */ if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) || #ifdef CONFIG_IPV6_SUBTREES ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) || #endif (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) { dst_release(dst); dst = NULL; } out: return dst; } static int ip6_dst_lookup_tail(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6) { struct net *net = sock_net(sk); #ifdef CONFIG_IPV6_OPTIMISTIC_DAD struct neighbour *n; struct rt6_info *rt; #endif int err; if (*dst == NULL) *dst = ip6_route_output(net, sk, fl6); if ((err = (*dst)->error)) goto out_err_release; if (ipv6_addr_any(&fl6->saddr)) { struct rt6_info *rt = (struct rt6_info *) *dst; err = ip6_route_get_saddr(net, rt, &fl6->daddr, sk ? inet6_sk(sk)->srcprefs : 0, &fl6->saddr); if (err) goto out_err_release; } #ifdef CONFIG_IPV6_OPTIMISTIC_DAD /* * Here if the dst entry we've looked up * has a neighbour entry that is in the INCOMPLETE * state and the src address from the flow is * marked as OPTIMISTIC, we release the found * dst entry and replace it instead with the * dst entry of the nexthop router */ rt = (struct rt6_info *) *dst; rcu_read_lock_bh(); n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt, &fl6->daddr)); err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0; rcu_read_unlock_bh(); if (err) { struct inet6_ifaddr *ifp; struct flowi6 fl_gw6; int redirect; ifp = ipv6_get_ifaddr(net, &fl6->saddr, (*dst)->dev, 1); redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC); if (ifp) in6_ifa_put(ifp); if (redirect) { /* * We need to get the dst entry for the * default router instead */ dst_release(*dst); memcpy(&fl_gw6, fl6, sizeof(struct flowi6)); memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr)); *dst = ip6_route_output(net, sk, &fl_gw6); if ((err = (*dst)->error)) goto out_err_release; } } #endif return 0; out_err_release: if (err == -ENETUNREACH) IP6_INC_STATS_BH(net, NULL, IPSTATS_MIB_OUTNOROUTES); dst_release(*dst); *dst = NULL; return err; } /** * ip6_dst_lookup - perform route lookup on flow * @sk: socket which provides route info * @dst: pointer to dst_entry * for result * @fl6: flow to lookup * * This function performs a route lookup on the given flow. * * It returns zero on success, or a standard errno code on error. */ int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6) { *dst = NULL; return ip6_dst_lookup_tail(sk, dst, fl6); } EXPORT_SYMBOL_GPL(ip6_dst_lookup); /** * ip6_dst_lookup_flow - perform route lookup on flow with ipsec * @sk: socket which provides route info * @fl6: flow to lookup * @final_dst: final destination address for ipsec lookup * @can_sleep: we are in a sleepable context * * This function performs a route lookup on the given flow. * * It returns a valid dst pointer on success, or a pointer encoded * error code. */ struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, const struct in6_addr *final_dst, bool can_sleep) { struct dst_entry *dst = NULL; int err; err = ip6_dst_lookup_tail(sk, &dst, fl6); if (err) return ERR_PTR(err); if (final_dst) fl6->daddr = *final_dst; if (can_sleep) fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP; return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); } EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow); /** * ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow * @sk: socket which provides the dst cache and route info * @fl6: flow to lookup * @final_dst: final destination address for ipsec lookup * @can_sleep: we are in a sleepable context * * This function performs a route lookup on the given flow with the * possibility of using the cached route in the socket if it is valid. * It will take the socket dst lock when operating on the dst cache. * As a result, this function can only be used in process context. * * It returns a valid dst pointer on success, or a pointer encoded * error code. */ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, const struct in6_addr *final_dst, bool can_sleep) { struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie); int err; dst = ip6_sk_dst_check(sk, dst, fl6); err = ip6_dst_lookup_tail(sk, &dst, fl6); if (err) return ERR_PTR(err); if (final_dst) fl6->daddr = *final_dst; if (can_sleep) fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP; return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); } EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow); static inline int ip6_ufo_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int hh_len, int fragheaderlen, int transhdrlen, int mtu,unsigned int flags, struct rt6_info *rt) { struct sk_buff *skb; int err; /* There is support for UDP large send offload by network * device, so create one single skb packet containing complete * udp datagram */ if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) { skb = sock_alloc_send_skb(sk, hh_len + fragheaderlen + transhdrlen + 20, (flags & MSG_DONTWAIT), &err); if (skb == NULL) return err; /* reserve space for Hardware header */ skb_reserve(skb, hh_len); /* create space for UDP/IP header */ skb_put(skb,fragheaderlen + transhdrlen); /* initialize network header pointer */ skb_reset_network_header(skb); /* initialize protocol header pointer */ skb->transport_header = skb->network_header + fragheaderlen; skb->ip_summed = CHECKSUM_PARTIAL; skb->csum = 0; } err = skb_append_datato_frags(sk,skb, getfrag, from, (length - transhdrlen)); if (!err) { struct frag_hdr fhdr; /* Specify the length of each IPv6 datagram fragment. * It has to be a multiple of 8. */ skb_shinfo(skb)->gso_size = (mtu - fragheaderlen - sizeof(struct frag_hdr)) & ~7; skb_shinfo(skb)->gso_type = SKB_GSO_UDP; ipv6_select_ident(&fhdr, rt); skb_shinfo(skb)->ip6_frag_id = fhdr.identification; __skb_queue_tail(&sk->sk_write_queue, skb); return 0; } /* There is not enough support do UPD LSO, * so follow normal path */ kfree_skb(skb); return err; } static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src, gfp_t gfp) { return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL; } static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src, gfp_t gfp) { return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL; } static void ip6_append_data_mtu(int *mtu, int *maxfraglen, unsigned int fragheaderlen, struct sk_buff *skb, struct rt6_info *rt) { if (!(rt->dst.flags & DST_XFRM_TUNNEL)) { if (skb == NULL) { /* first fragment, reserve header_len */ *mtu = *mtu - rt->dst.header_len; } else { /* * this fragment is not first, the headers * space is regarded as data space. */ *mtu = dst_mtu(rt->dst.path); } *maxfraglen = ((*mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr); } } int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6, struct rt6_info *rt, unsigned int flags, int dontfrag) { struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct inet_cork *cork; struct sk_buff *skb, *skb_prev = NULL; unsigned int maxfraglen, fragheaderlen; int exthdrlen; int dst_exthdrlen; int hh_len; int mtu; int copy; int err; int offset = 0; __u8 tx_flags = 0; if (flags&MSG_PROBE) return 0; cork = &inet->cork.base; if (skb_queue_empty(&sk->sk_write_queue)) { /* * setup for corking */ if (opt) { if (WARN_ON(np->cork.opt)) return -EINVAL; np->cork.opt = kzalloc(opt->tot_len, sk->sk_allocation); if (unlikely(np->cork.opt == NULL)) return -ENOBUFS; np->cork.opt->tot_len = opt->tot_len; np->cork.opt->opt_flen = opt->opt_flen; np->cork.opt->opt_nflen = opt->opt_nflen; np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt, sk->sk_allocation); if (opt->dst0opt && !np->cork.opt->dst0opt) return -ENOBUFS; np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt, sk->sk_allocation); if (opt->dst1opt && !np->cork.opt->dst1opt) return -ENOBUFS; np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt, sk->sk_allocation); if (opt->hopopt && !np->cork.opt->hopopt) return -ENOBUFS; np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt, sk->sk_allocation); if (opt->srcrt && !np->cork.opt->srcrt) return -ENOBUFS; /* need source address above miyazawa*/ } dst_hold(&rt->dst); cork->dst = &rt->dst; inet->cork.fl.u.ip6 = *fl6; np->cork.hop_limit = hlimit; np->cork.tclass = tclass; if (rt->dst.flags & DST_XFRM_TUNNEL) mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ? rt->dst.dev->mtu : dst_mtu(&rt->dst); else mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ? rt->dst.dev->mtu : dst_mtu(rt->dst.path); if (np->frag_size < mtu) { if (np->frag_size) mtu = np->frag_size; } cork->fragsize = mtu; if (dst_allfrag(rt->dst.path)) cork->flags |= IPCORK_ALLFRAG; cork->length = 0; exthdrlen = (opt ? opt->opt_flen : 0); length += exthdrlen; transhdrlen += exthdrlen; dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len; } else { rt = (struct rt6_info *)cork->dst; fl6 = &inet->cork.fl.u.ip6; opt = np->cork.opt; transhdrlen = 0; exthdrlen = 0; dst_exthdrlen = 0; mtu = cork->fragsize; } hh_len = LL_RESERVED_SPACE(rt->dst.dev); fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len + (opt ? opt->opt_nflen : 0); maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr); if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) { if (cork->length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) { ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen); return -EMSGSIZE; } } /* For UDP, check if TX timestamp is enabled */ if (sk->sk_type == SOCK_DGRAM) sock_tx_timestamp(sk, &tx_flags); /* * Let's try using as much space as possible. * Use MTU if total length of the message fits into the MTU. * Otherwise, we need to reserve fragment header and * fragment alignment (= 8-15 octects, in total). * * Note that we may need to "move" the data from the tail of * of the buffer to the new fragment when we split * the message. * * FIXME: It may be fragmented into multiple chunks * at once if non-fragmentable extension headers * are too large. * --yoshfuji */ cork->length += length; if (length > mtu) { int proto = sk->sk_protocol; if (dontfrag && (proto == IPPROTO_UDP || proto == IPPROTO_RAW)){ ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen); return -EMSGSIZE; } if (proto == IPPROTO_UDP && (rt->dst.dev->features & NETIF_F_UFO)) { err = ip6_ufo_append_data(sk, getfrag, from, length, hh_len, fragheaderlen, transhdrlen, mtu, flags, rt); if (err) goto error; return 0; } } if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) goto alloc_new_skb; while (length > 0) { /* Check if the remaining data fits into current packet. */ copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len; if (copy < length) copy = maxfraglen - skb->len; if (copy <= 0) { char *data; unsigned int datalen; unsigned int fraglen; unsigned int fraggap; unsigned int alloclen; alloc_new_skb: /* There's no room in the current skb */ if (skb) fraggap = skb->len - maxfraglen; else fraggap = 0; /* update mtu and maxfraglen if necessary */ if (skb == NULL || skb_prev == NULL) ip6_append_data_mtu(&mtu, &maxfraglen, fragheaderlen, skb, rt); skb_prev = skb; /* * If remaining data exceeds the mtu, * we know we need more fragment(s). */ datalen = length + fraggap; if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen) datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len; if ((flags & MSG_MORE) && !(rt->dst.dev->features&NETIF_F_SG)) alloclen = mtu; else alloclen = datalen + fragheaderlen; alloclen += dst_exthdrlen; if (datalen != length + fraggap) { /* * this is not the last fragment, the trailer * space is regarded as data space. */ datalen += rt->dst.trailer_len; } alloclen += rt->dst.trailer_len; fraglen = datalen + fragheaderlen; /* * We just reserve space for fragment header. * Note: this may be overallocation if the message * (without MSG_MORE) fits into the MTU. */ alloclen += sizeof(struct frag_hdr); if (transhdrlen) { skb = sock_alloc_send_skb(sk, alloclen + hh_len, (flags & MSG_DONTWAIT), &err); } else { skb = NULL; if (atomic_read(&sk->sk_wmem_alloc) <= 2 * sk->sk_sndbuf) skb = sock_wmalloc(sk, alloclen + hh_len, 1, sk->sk_allocation); if (unlikely(skb == NULL)) err = -ENOBUFS; else { /* Only the initial fragment * is time stamped. */ tx_flags = 0; } } if (skb == NULL) goto error; /* * Fill in the control structures */ skb->ip_summed = CHECKSUM_NONE; skb->csum = 0; /* reserve for fragmentation and ipsec header */ skb_reserve(skb, hh_len + sizeof(struct frag_hdr) + dst_exthdrlen); if (sk->sk_type == SOCK_DGRAM) skb_shinfo(skb)->tx_flags = tx_flags; /* * Find where to start putting bytes */ data = skb_put(skb, fraglen); skb_set_network_header(skb, exthdrlen); data += fragheaderlen; skb->transport_header = (skb->network_header + fragheaderlen); if (fraggap) { skb->csum = skb_copy_and_csum_bits( skb_prev, maxfraglen, data + transhdrlen, fraggap, 0); skb_prev->csum = csum_sub(skb_prev->csum, skb->csum); data += fraggap; pskb_trim_unique(skb_prev, maxfraglen); } copy = datalen - transhdrlen - fraggap; if (copy < 0) { err = -EINVAL; kfree_skb(skb); goto error; } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) { err = -EFAULT; kfree_skb(skb); goto error; } offset += copy; length -= datalen - fraggap; transhdrlen = 0; exthdrlen = 0; dst_exthdrlen = 0; /* * Put the packet on the pending queue */ __skb_queue_tail(&sk->sk_write_queue, skb); continue; } if (copy > length) copy = length; if (!(rt->dst.dev->features&NETIF_F_SG)) { unsigned int off; off = skb->len; if (getfrag(from, skb_put(skb, copy), offset, copy, off, skb) < 0) { __skb_trim(skb, off); err = -EFAULT; goto error; } } else { int i = skb_shinfo(skb)->nr_frags; struct page_frag *pfrag = sk_page_frag(sk); err = -ENOMEM; if (!sk_page_frag_refill(sk, pfrag)) goto error; if (!skb_can_coalesce(skb, i, pfrag->page, pfrag->offset)) { err = -EMSGSIZE; if (i == MAX_SKB_FRAGS) goto error; __skb_fill_page_desc(skb, i, pfrag->page, pfrag->offset, 0); skb_shinfo(skb)->nr_frags = ++i; get_page(pfrag->page); } copy = min_t(int, copy, pfrag->size - pfrag->offset); if (getfrag(from, page_address(pfrag->page) + pfrag->offset, offset, copy, skb->len, skb) < 0) goto error_efault; pfrag->offset += copy; skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); skb->len += copy; skb->data_len += copy; skb->truesize += copy; atomic_add(copy, &sk->sk_wmem_alloc); } offset += copy; length -= copy; } return 0; error_efault: err = -EFAULT; error: cork->length -= length; IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); return err; } EXPORT_SYMBOL_GPL(ip6_append_data); static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np) { if (np->cork.opt) { kfree(np->cork.opt->dst0opt); kfree(np->cork.opt->dst1opt); kfree(np->cork.opt->hopopt); kfree(np->cork.opt->srcrt); kfree(np->cork.opt); np->cork.opt = NULL; } if (inet->cork.base.dst) { dst_release(inet->cork.base.dst); inet->cork.base.dst = NULL; inet->cork.base.flags &= ~IPCORK_ALLFRAG; } memset(&inet->cork.fl, 0, sizeof(inet->cork.fl)); } int ip6_push_pending_frames(struct sock *sk) { struct sk_buff *skb, *tmp_skb; struct sk_buff **tail_skb; struct in6_addr final_dst_buf, *final_dst = &final_dst_buf; struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct net *net = sock_net(sk); struct ipv6hdr *hdr; struct ipv6_txoptions *opt = np->cork.opt; struct rt6_info *rt = (struct rt6_info *)inet->cork.base.dst; struct flowi6 *fl6 = &inet->cork.fl.u.ip6; unsigned char proto = fl6->flowi6_proto; int err = 0; if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL) goto out; tail_skb = &(skb_shinfo(skb)->frag_list); /* move skb->data to ip header from ext header */ if (skb->data < skb_network_header(skb)) __skb_pull(skb, skb_network_offset(skb)); while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) { __skb_pull(tmp_skb, skb_network_header_len(skb)); *tail_skb = tmp_skb; tail_skb = &(tmp_skb->next); skb->len += tmp_skb->len; skb->data_len += tmp_skb->len; skb->truesize += tmp_skb->truesize; tmp_skb->destructor = NULL; tmp_skb->sk = NULL; } /* Allow local fragmentation. */ if (np->pmtudisc < IPV6_PMTUDISC_DO) skb->local_df = 1; *final_dst = fl6->daddr; __skb_pull(skb, skb_network_header_len(skb)); if (opt && opt->opt_flen) ipv6_push_frag_opts(skb, opt, &proto); if (opt && opt->opt_nflen) ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst); skb_push(skb, sizeof(struct ipv6hdr)); skb_reset_network_header(skb); hdr = ipv6_hdr(skb); ip6_flow_hdr(hdr, np->cork.tclass, fl6->flowlabel); hdr->hop_limit = np->cork.hop_limit; hdr->nexthdr = proto; hdr->saddr = fl6->saddr; hdr->daddr = *final_dst; skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; skb_dst_set(skb, dst_clone(&rt->dst)); IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len); if (proto == IPPROTO_ICMPV6) { struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); ICMP6MSGOUT_INC_STATS_BH(net, idev, icmp6_hdr(skb)->icmp6_type); ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); } err = ip6_local_out(skb); if (err) { if (err > 0) err = net_xmit_errno(err); if (err) goto error; } out: ip6_cork_release(inet, np); return err; error: IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); goto out; } EXPORT_SYMBOL_GPL(ip6_push_pending_frames); void ip6_flush_pending_frames(struct sock *sk) { struct sk_buff *skb; while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) { if (skb_dst(skb)) IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); } ip6_cork_release(inet_sk(sk), inet6_sk(sk)); } EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);
./CrossVul/dataset_final_sorted/CWE-20/c/good_5660_0
crossvul-cpp_data_good_3070_2
/* * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the OpenSSL license (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include "../ssl_locl.h" #include "internal/constant_time_locl.h" #include <openssl/rand.h> #include "record_locl.h" static const unsigned char ssl3_pad_1[48] = { 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36 }; static const unsigned char ssl3_pad_2[48] = { 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c }; /* * Clear the contents of an SSL3_RECORD but retain any memory allocated */ void SSL3_RECORD_clear(SSL3_RECORD *r, unsigned int num_recs) { unsigned char *comp; unsigned int i; for (i = 0; i < num_recs; i++) { comp = r[i].comp; memset(&r[i], 0, sizeof(*r)); r[i].comp = comp; } } void SSL3_RECORD_release(SSL3_RECORD *r, unsigned int num_recs) { unsigned int i; for (i = 0; i < num_recs; i++) { OPENSSL_free(r[i].comp); r[i].comp = NULL; } } void SSL3_RECORD_set_seq_num(SSL3_RECORD *r, const unsigned char *seq_num) { memcpy(r->seq_num, seq_num, SEQ_NUM_SIZE); } /* * Peeks ahead into "read_ahead" data to see if we have a whole record waiting * for us in the buffer. */ static int ssl3_record_app_data_waiting(SSL *s) { SSL3_BUFFER *rbuf; int left, len; unsigned char *p; rbuf = RECORD_LAYER_get_rbuf(&s->rlayer); p = SSL3_BUFFER_get_buf(rbuf); if (p == NULL) return 0; left = SSL3_BUFFER_get_left(rbuf); if (left < SSL3_RT_HEADER_LENGTH) return 0; p += SSL3_BUFFER_get_offset(rbuf); /* * We only check the type and record length, we will sanity check version * etc later */ if (*p != SSL3_RT_APPLICATION_DATA) return 0; p += 3; n2s(p, len); if (left < SSL3_RT_HEADER_LENGTH + len) return 0; return 1; } /* * MAX_EMPTY_RECORDS defines the number of consecutive, empty records that * will be processed per call to ssl3_get_record. Without this limit an * attacker could send empty records at a faster rate than we can process and * cause ssl3_get_record to loop forever. */ #define MAX_EMPTY_RECORDS 32 #define SSL2_RT_HEADER_LENGTH 2 /*- * Call this to get new input records. * It will return <= 0 if more data is needed, normally due to an error * or non-blocking IO. * When it finishes, |numrpipes| records have been decoded. For each record 'i': * rr[i].type - is the type of record * rr[i].data, - data * rr[i].length, - number of bytes * Multiple records will only be returned if the record types are all * SSL3_RT_APPLICATION_DATA. The number of records returned will always be <= * |max_pipelines| */ /* used only by ssl3_read_bytes */ int ssl3_get_record(SSL *s) { int ssl_major, ssl_minor, al; int enc_err, n, i, ret = -1; SSL3_RECORD *rr; SSL3_BUFFER *rbuf; SSL_SESSION *sess; unsigned char *p; unsigned char md[EVP_MAX_MD_SIZE]; short version; unsigned mac_size; unsigned int num_recs = 0; unsigned int max_recs; unsigned int j; rr = RECORD_LAYER_get_rrec(&s->rlayer); rbuf = RECORD_LAYER_get_rbuf(&s->rlayer); max_recs = s->max_pipelines; if (max_recs == 0) max_recs = 1; sess = s->session; do { /* check if we have the header */ if ((RECORD_LAYER_get_rstate(&s->rlayer) != SSL_ST_READ_BODY) || (RECORD_LAYER_get_packet_length(&s->rlayer) < SSL3_RT_HEADER_LENGTH)) { n = ssl3_read_n(s, SSL3_RT_HEADER_LENGTH, SSL3_BUFFER_get_len(rbuf), 0, num_recs == 0 ? 1 : 0); if (n <= 0) return (n); /* error or non-blocking */ RECORD_LAYER_set_rstate(&s->rlayer, SSL_ST_READ_BODY); p = RECORD_LAYER_get_packet(&s->rlayer); /* * The first record received by the server may be a V2ClientHello. */ if (s->server && RECORD_LAYER_is_first_record(&s->rlayer) && (p[0] & 0x80) && (p[2] == SSL2_MT_CLIENT_HELLO)) { /* * SSLv2 style record * * |num_recs| here will actually always be 0 because * |num_recs > 0| only ever occurs when we are processing * multiple app data records - which we know isn't the case here * because it is an SSLv2ClientHello. We keep it using * |num_recs| for the sake of consistency */ rr[num_recs].type = SSL3_RT_HANDSHAKE; rr[num_recs].rec_version = SSL2_VERSION; rr[num_recs].length = ((p[0] & 0x7f) << 8) | p[1]; if (rr[num_recs].length > SSL3_BUFFER_get_len(rbuf) - SSL2_RT_HEADER_LENGTH) { al = SSL_AD_RECORD_OVERFLOW; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_PACKET_LENGTH_TOO_LONG); goto f_err; } if (rr[num_recs].length < MIN_SSL2_RECORD_LEN) { al = SSL_AD_HANDSHAKE_FAILURE; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_LENGTH_TOO_SHORT); goto f_err; } } else { /* SSLv3+ style record */ if (s->msg_callback) s->msg_callback(0, 0, SSL3_RT_HEADER, p, 5, s, s->msg_callback_arg); /* Pull apart the header into the SSL3_RECORD */ rr[num_recs].type = *(p++); ssl_major = *(p++); ssl_minor = *(p++); version = (ssl_major << 8) | ssl_minor; rr[num_recs].rec_version = version; n2s(p, rr[num_recs].length); /* Lets check version */ if (!s->first_packet && version != s->version) { SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_WRONG_VERSION_NUMBER); if ((s->version & 0xFF00) == (version & 0xFF00) && !s->enc_write_ctx && !s->write_hash) { if (rr->type == SSL3_RT_ALERT) { /* * The record is using an incorrect version number, * but what we've got appears to be an alert. We * haven't read the body yet to check whether its a * fatal or not - but chances are it is. We probably * shouldn't send a fatal alert back. We'll just * end. */ goto err; } /* * Send back error using their minor version number :-) */ s->version = (unsigned short)version; } al = SSL_AD_PROTOCOL_VERSION; goto f_err; } if ((version >> 8) != SSL3_VERSION_MAJOR) { if (RECORD_LAYER_is_first_record(&s->rlayer)) { /* Go back to start of packet, look at the five bytes * that we have. */ p = RECORD_LAYER_get_packet(&s->rlayer); if (strncmp((char *)p, "GET ", 4) == 0 || strncmp((char *)p, "POST ", 5) == 0 || strncmp((char *)p, "HEAD ", 5) == 0 || strncmp((char *)p, "PUT ", 4) == 0) { SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_HTTP_REQUEST); goto err; } else if (strncmp((char *)p, "CONNE", 5) == 0) { SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_HTTPS_PROXY_REQUEST); goto err; } /* Doesn't look like TLS - don't send an alert */ SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_WRONG_VERSION_NUMBER); goto err; } else { SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_WRONG_VERSION_NUMBER); al = SSL_AD_PROTOCOL_VERSION; goto f_err; } } if (rr[num_recs].length > SSL3_BUFFER_get_len(rbuf) - SSL3_RT_HEADER_LENGTH) { al = SSL_AD_RECORD_OVERFLOW; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_PACKET_LENGTH_TOO_LONG); goto f_err; } } /* now s->rlayer.rstate == SSL_ST_READ_BODY */ } /* * s->rlayer.rstate == SSL_ST_READ_BODY, get and decode the data. * Calculate how much more data we need to read for the rest of the * record */ if (rr[num_recs].rec_version == SSL2_VERSION) { i = rr[num_recs].length + SSL2_RT_HEADER_LENGTH - SSL3_RT_HEADER_LENGTH; } else { i = rr[num_recs].length; } if (i > 0) { /* now s->packet_length == SSL3_RT_HEADER_LENGTH */ n = ssl3_read_n(s, i, i, 1, 0); if (n <= 0) return (n); /* error or non-blocking io */ } /* set state for later operations */ RECORD_LAYER_set_rstate(&s->rlayer, SSL_ST_READ_HEADER); /* * At this point, s->packet_length == SSL3_RT_HEADER_LENGTH + rr->length, * or s->packet_length == SSL2_RT_HEADER_LENGTH + rr->length * and we have that many bytes in s->packet */ if (rr[num_recs].rec_version == SSL2_VERSION) { rr[num_recs].input = &(RECORD_LAYER_get_packet(&s->rlayer)[SSL2_RT_HEADER_LENGTH]); } else { rr[num_recs].input = &(RECORD_LAYER_get_packet(&s->rlayer)[SSL3_RT_HEADER_LENGTH]); } /* * ok, we can now read from 's->packet' data into 'rr' rr->input points * at rr->length bytes, which need to be copied into rr->data by either * the decryption or by the decompression When the data is 'copied' into * the rr->data buffer, rr->input will be pointed at the new buffer */ /* * We now have - encrypted [ MAC [ compressed [ plain ] ] ] rr->length * bytes of encrypted compressed stuff. */ /* check is not needed I believe */ if (rr[num_recs].length > SSL3_RT_MAX_ENCRYPTED_LENGTH) { al = SSL_AD_RECORD_OVERFLOW; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_ENCRYPTED_LENGTH_TOO_LONG); goto f_err; } /* decrypt in place in 'rr->input' */ rr[num_recs].data = rr[num_recs].input; rr[num_recs].orig_len = rr[num_recs].length; /* Mark this record as not read by upper layers yet */ rr[num_recs].read = 0; num_recs++; /* we have pulled in a full packet so zero things */ RECORD_LAYER_reset_packet_length(&s->rlayer); RECORD_LAYER_clear_first_record(&s->rlayer); } while (num_recs < max_recs && rr[num_recs - 1].type == SSL3_RT_APPLICATION_DATA && SSL_USE_EXPLICIT_IV(s) && s->enc_read_ctx != NULL && (EVP_CIPHER_flags(EVP_CIPHER_CTX_cipher(s->enc_read_ctx)) & EVP_CIPH_FLAG_PIPELINE) && ssl3_record_app_data_waiting(s)); /* * If in encrypt-then-mac mode calculate mac from encrypted record. All * the details below are public so no timing details can leak. */ if (SSL_READ_ETM(s) && s->read_hash) { unsigned char *mac; mac_size = EVP_MD_CTX_size(s->read_hash); OPENSSL_assert(mac_size <= EVP_MAX_MD_SIZE); for (j = 0; j < num_recs; j++) { if (rr[j].length < mac_size) { al = SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_LENGTH_TOO_SHORT); goto f_err; } rr[j].length -= mac_size; mac = rr[j].data + rr[j].length; i = s->method->ssl3_enc->mac(s, &rr[j], md, 0 /* not send */ ); if (i < 0 || CRYPTO_memcmp(md, mac, (size_t)mac_size) != 0) { al = SSL_AD_BAD_RECORD_MAC; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_DECRYPTION_FAILED_OR_BAD_RECORD_MAC); goto f_err; } } } enc_err = s->method->ssl3_enc->enc(s, rr, num_recs, 0); /*- * enc_err is: * 0: (in non-constant time) if the record is publically invalid. * 1: if the padding is valid * -1: if the padding is invalid */ if (enc_err == 0) { al = SSL_AD_DECRYPTION_FAILED; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_BLOCK_CIPHER_PAD_IS_WRONG); goto f_err; } #ifdef SSL_DEBUG printf("dec %d\n", rr->length); { unsigned int z; for (z = 0; z < rr->length; z++) printf("%02X%c", rr->data[z], ((z + 1) % 16) ? ' ' : '\n'); } printf("\n"); #endif /* r->length is now the compressed data plus mac */ if ((sess != NULL) && (s->enc_read_ctx != NULL) && (!SSL_READ_ETM(s) && EVP_MD_CTX_md(s->read_hash) != NULL)) { /* s->read_hash != NULL => mac_size != -1 */ unsigned char *mac = NULL; unsigned char mac_tmp[EVP_MAX_MD_SIZE]; mac_size = EVP_MD_CTX_size(s->read_hash); OPENSSL_assert(mac_size <= EVP_MAX_MD_SIZE); for (j = 0; j < num_recs; j++) { /* * orig_len is the length of the record before any padding was * removed. This is public information, as is the MAC in use, * therefore we can safely process the record in a different amount * of time if it's too short to possibly contain a MAC. */ if (rr[j].orig_len < mac_size || /* CBC records must have a padding length byte too. */ (EVP_CIPHER_CTX_mode(s->enc_read_ctx) == EVP_CIPH_CBC_MODE && rr[j].orig_len < mac_size + 1)) { al = SSL_AD_DECODE_ERROR; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_LENGTH_TOO_SHORT); goto f_err; } if (EVP_CIPHER_CTX_mode(s->enc_read_ctx) == EVP_CIPH_CBC_MODE) { /* * We update the length so that the TLS header bytes can be * constructed correctly but we need to extract the MAC in * constant time from within the record, without leaking the * contents of the padding bytes. */ mac = mac_tmp; ssl3_cbc_copy_mac(mac_tmp, &rr[j], mac_size); rr[j].length -= mac_size; } else { /* * In this case there's no padding, so |rec->orig_len| equals * |rec->length| and we checked that there's enough bytes for * |mac_size| above. */ rr[j].length -= mac_size; mac = &rr[j].data[rr[j].length]; } i = s->method->ssl3_enc->mac(s, &rr[j], md, 0 /* not send */ ); if (i < 0 || mac == NULL || CRYPTO_memcmp(md, mac, (size_t)mac_size) != 0) enc_err = -1; if (rr->length > SSL3_RT_MAX_COMPRESSED_LENGTH + mac_size) enc_err = -1; } } if (enc_err < 0) { /* * A separate 'decryption_failed' alert was introduced with TLS 1.0, * SSL 3.0 only has 'bad_record_mac'. But unless a decryption * failure is directly visible from the ciphertext anyway, we should * not reveal which kind of error occurred -- this might become * visible to an attacker (e.g. via a logfile) */ al = SSL_AD_BAD_RECORD_MAC; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_DECRYPTION_FAILED_OR_BAD_RECORD_MAC); goto f_err; } for (j = 0; j < num_recs; j++) { /* rr[j].length is now just compressed */ if (s->expand != NULL) { if (rr[j].length > SSL3_RT_MAX_COMPRESSED_LENGTH) { al = SSL_AD_RECORD_OVERFLOW; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_COMPRESSED_LENGTH_TOO_LONG); goto f_err; } if (!ssl3_do_uncompress(s, &rr[j])) { al = SSL_AD_DECOMPRESSION_FAILURE; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_BAD_DECOMPRESSION); goto f_err; } } if (rr[j].length > SSL3_RT_MAX_PLAIN_LENGTH) { al = SSL_AD_RECORD_OVERFLOW; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_DATA_LENGTH_TOO_LONG); goto f_err; } rr[j].off = 0; /*- * So at this point the following is true * rr[j].type is the type of record * rr[j].length == number of bytes in record * rr[j].off == offset to first valid byte * rr[j].data == where to take bytes from, increment after use :-). */ /* just read a 0 length packet */ if (rr[j].length == 0) { RECORD_LAYER_inc_empty_record_count(&s->rlayer); if (RECORD_LAYER_get_empty_record_count(&s->rlayer) > MAX_EMPTY_RECORDS) { al = SSL_AD_UNEXPECTED_MESSAGE; SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_RECORD_TOO_SMALL); goto f_err; } } else { RECORD_LAYER_reset_empty_record_count(&s->rlayer); } } RECORD_LAYER_set_numrpipes(&s->rlayer, num_recs); return 1; f_err: ssl3_send_alert(s, SSL3_AL_FATAL, al); err: return ret; } int ssl3_do_uncompress(SSL *ssl, SSL3_RECORD *rr) { #ifndef OPENSSL_NO_COMP int i; if (rr->comp == NULL) { rr->comp = (unsigned char *) OPENSSL_malloc(SSL3_RT_MAX_ENCRYPTED_LENGTH); } if (rr->comp == NULL) return 0; i = COMP_expand_block(ssl->expand, rr->comp, SSL3_RT_MAX_PLAIN_LENGTH, rr->data, (int)rr->length); if (i < 0) return 0; else rr->length = i; rr->data = rr->comp; #endif return 1; } int ssl3_do_compress(SSL *ssl, SSL3_RECORD *wr) { #ifndef OPENSSL_NO_COMP int i; i = COMP_compress_block(ssl->compress, wr->data, SSL3_RT_MAX_COMPRESSED_LENGTH, wr->input, (int)wr->length); if (i < 0) return (0); else wr->length = i; wr->input = wr->data; #endif return (1); } /*- * ssl3_enc encrypts/decrypts |n_recs| records in |inrecs| * * Returns: * 0: (in non-constant time) if the record is publically invalid (i.e. too * short etc). * 1: if the record's padding is valid / the encryption was successful. * -1: if the record's padding is invalid or, if sending, an internal error * occurred. */ int ssl3_enc(SSL *s, SSL3_RECORD *inrecs, unsigned int n_recs, int send) { SSL3_RECORD *rec; EVP_CIPHER_CTX *ds; unsigned long l; int bs, i, mac_size = 0; const EVP_CIPHER *enc; rec = inrecs; /* * We shouldn't ever be called with more than one record in the SSLv3 case */ if (n_recs != 1) return 0; if (send) { ds = s->enc_write_ctx; if (s->enc_write_ctx == NULL) enc = NULL; else enc = EVP_CIPHER_CTX_cipher(s->enc_write_ctx); } else { ds = s->enc_read_ctx; if (s->enc_read_ctx == NULL) enc = NULL; else enc = EVP_CIPHER_CTX_cipher(s->enc_read_ctx); } if ((s->session == NULL) || (ds == NULL) || (enc == NULL)) { memmove(rec->data, rec->input, rec->length); rec->input = rec->data; } else { l = rec->length; bs = EVP_CIPHER_CTX_block_size(ds); /* COMPRESS */ if ((bs != 1) && send) { i = bs - ((int)l % bs); /* we need to add 'i-1' padding bytes */ l += i; /* * the last of these zero bytes will be overwritten with the * padding length. */ memset(&rec->input[rec->length], 0, i); rec->length += i; rec->input[l - 1] = (i - 1); } if (!send) { if (l == 0 || l % bs != 0) return 0; /* otherwise, rec->length >= bs */ } if (EVP_Cipher(ds, rec->data, rec->input, l) < 1) return -1; if (EVP_MD_CTX_md(s->read_hash) != NULL) mac_size = EVP_MD_CTX_size(s->read_hash); if ((bs != 1) && !send) return ssl3_cbc_remove_padding(rec, bs, mac_size); } return (1); } /*- * tls1_enc encrypts/decrypts |n_recs| in |recs|. * * Returns: * 0: (in non-constant time) if the record is publically invalid (i.e. too * short etc). * 1: if the record's padding is valid / the encryption was successful. * -1: if the record's padding/AEAD-authenticator is invalid or, if sending, * an internal error occurred. */ int tls1_enc(SSL *s, SSL3_RECORD *recs, unsigned int n_recs, int send) { EVP_CIPHER_CTX *ds; size_t reclen[SSL_MAX_PIPELINES]; unsigned char buf[SSL_MAX_PIPELINES][EVP_AEAD_TLS1_AAD_LEN]; int bs, i, j, k, pad = 0, ret, mac_size = 0; const EVP_CIPHER *enc; unsigned int ctr; if (send) { if (EVP_MD_CTX_md(s->write_hash)) { int n = EVP_MD_CTX_size(s->write_hash); OPENSSL_assert(n >= 0); } ds = s->enc_write_ctx; if (s->enc_write_ctx == NULL) enc = NULL; else { int ivlen; enc = EVP_CIPHER_CTX_cipher(s->enc_write_ctx); /* For TLSv1.1 and later explicit IV */ if (SSL_USE_EXPLICIT_IV(s) && EVP_CIPHER_mode(enc) == EVP_CIPH_CBC_MODE) ivlen = EVP_CIPHER_iv_length(enc); else ivlen = 0; if (ivlen > 1) { for (ctr = 0; ctr < n_recs; ctr++) { if (recs[ctr].data != recs[ctr].input) { /* * we can't write into the input stream: Can this ever * happen?? (steve) */ SSLerr(SSL_F_TLS1_ENC, ERR_R_INTERNAL_ERROR); return -1; } else if (RAND_bytes(recs[ctr].input, ivlen) <= 0) { SSLerr(SSL_F_TLS1_ENC, ERR_R_INTERNAL_ERROR); return -1; } } } } } else { if (EVP_MD_CTX_md(s->read_hash)) { int n = EVP_MD_CTX_size(s->read_hash); OPENSSL_assert(n >= 0); } ds = s->enc_read_ctx; if (s->enc_read_ctx == NULL) enc = NULL; else enc = EVP_CIPHER_CTX_cipher(s->enc_read_ctx); } if ((s->session == NULL) || (ds == NULL) || (enc == NULL)) { for (ctr = 0; ctr < n_recs; ctr++) { memmove(recs[ctr].data, recs[ctr].input, recs[ctr].length); recs[ctr].input = recs[ctr].data; } ret = 1; } else { bs = EVP_CIPHER_block_size(EVP_CIPHER_CTX_cipher(ds)); if (n_recs > 1) { if (!(EVP_CIPHER_flags(EVP_CIPHER_CTX_cipher(ds)) & EVP_CIPH_FLAG_PIPELINE)) { /* * We shouldn't have been called with pipeline data if the * cipher doesn't support pipelining */ SSLerr(SSL_F_TLS1_ENC, SSL_R_PIPELINE_FAILURE); return -1; } } for (ctr = 0; ctr < n_recs; ctr++) { reclen[ctr] = recs[ctr].length; if (EVP_CIPHER_flags(EVP_CIPHER_CTX_cipher(ds)) & EVP_CIPH_FLAG_AEAD_CIPHER) { unsigned char *seq; seq = send ? RECORD_LAYER_get_write_sequence(&s->rlayer) : RECORD_LAYER_get_read_sequence(&s->rlayer); if (SSL_IS_DTLS(s)) { /* DTLS does not support pipelining */ unsigned char dtlsseq[9], *p = dtlsseq; s2n(send ? DTLS_RECORD_LAYER_get_w_epoch(&s->rlayer) : DTLS_RECORD_LAYER_get_r_epoch(&s->rlayer), p); memcpy(p, &seq[2], 6); memcpy(buf[ctr], dtlsseq, 8); } else { memcpy(buf[ctr], seq, 8); for (i = 7; i >= 0; i--) { /* increment */ ++seq[i]; if (seq[i] != 0) break; } } buf[ctr][8] = recs[ctr].type; buf[ctr][9] = (unsigned char)(s->version >> 8); buf[ctr][10] = (unsigned char)(s->version); buf[ctr][11] = recs[ctr].length >> 8; buf[ctr][12] = recs[ctr].length & 0xff; pad = EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_AEAD_TLS1_AAD, EVP_AEAD_TLS1_AAD_LEN, buf[ctr]); if (pad <= 0) return -1; if (send) { reclen[ctr] += pad; recs[ctr].length += pad; } } else if ((bs != 1) && send) { i = bs - ((int)reclen[ctr] % bs); /* Add weird padding of upto 256 bytes */ /* we need to add 'i' padding bytes of value j */ j = i - 1; for (k = (int)reclen[ctr]; k < (int)(reclen[ctr] + i); k++) recs[ctr].input[k] = j; reclen[ctr] += i; recs[ctr].length += i; } if (!send) { if (reclen[ctr] == 0 || reclen[ctr] % bs != 0) return 0; } } if (n_recs > 1) { unsigned char *data[SSL_MAX_PIPELINES]; /* Set the output buffers */ for (ctr = 0; ctr < n_recs; ctr++) { data[ctr] = recs[ctr].data; } if (EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_SET_PIPELINE_OUTPUT_BUFS, n_recs, data) <= 0) { SSLerr(SSL_F_TLS1_ENC, SSL_R_PIPELINE_FAILURE); } /* Set the input buffers */ for (ctr = 0; ctr < n_recs; ctr++) { data[ctr] = recs[ctr].input; } if (EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_SET_PIPELINE_INPUT_BUFS, n_recs, data) <= 0 || EVP_CIPHER_CTX_ctrl(ds, EVP_CTRL_SET_PIPELINE_INPUT_LENS, n_recs, reclen) <= 0) { SSLerr(SSL_F_TLS1_ENC, SSL_R_PIPELINE_FAILURE); return -1; } } i = EVP_Cipher(ds, recs[0].data, recs[0].input, reclen[0]); if ((EVP_CIPHER_flags(EVP_CIPHER_CTX_cipher(ds)) & EVP_CIPH_FLAG_CUSTOM_CIPHER) ? (i < 0) : (i == 0)) return -1; /* AEAD can fail to verify MAC */ if (send == 0) { if (EVP_CIPHER_mode(enc) == EVP_CIPH_GCM_MODE) { for (ctr = 0; ctr < n_recs; ctr++) { recs[ctr].data += EVP_GCM_TLS_EXPLICIT_IV_LEN; recs[ctr].input += EVP_GCM_TLS_EXPLICIT_IV_LEN; recs[ctr].length -= EVP_GCM_TLS_EXPLICIT_IV_LEN; } } else if (EVP_CIPHER_mode(enc) == EVP_CIPH_CCM_MODE) { for (ctr = 0; ctr < n_recs; ctr++) { recs[ctr].data += EVP_CCM_TLS_EXPLICIT_IV_LEN; recs[ctr].input += EVP_CCM_TLS_EXPLICIT_IV_LEN; recs[ctr].length -= EVP_CCM_TLS_EXPLICIT_IV_LEN; } } } ret = 1; if (!SSL_READ_ETM(s) && EVP_MD_CTX_md(s->read_hash) != NULL) mac_size = EVP_MD_CTX_size(s->read_hash); if ((bs != 1) && !send) { int tmpret; for (ctr = 0; ctr < n_recs; ctr++) { tmpret = tls1_cbc_remove_padding(s, &recs[ctr], bs, mac_size); /* * If tmpret == 0 then this means publicly invalid so we can * short circuit things here. Otherwise we must respect constant * time behaviour. */ if (tmpret == 0) return 0; ret = constant_time_select_int(constant_time_eq_int(tmpret, 1), ret, -1); } } if (pad && !send) { for (ctr = 0; ctr < n_recs; ctr++) { recs[ctr].length -= pad; } } } return ret; } int n_ssl3_mac(SSL *ssl, SSL3_RECORD *rec, unsigned char *md, int send) { unsigned char *mac_sec, *seq; const EVP_MD_CTX *hash; unsigned char *p, rec_char; size_t md_size; int npad; int t; if (send) { mac_sec = &(ssl->s3->write_mac_secret[0]); seq = RECORD_LAYER_get_write_sequence(&ssl->rlayer); hash = ssl->write_hash; } else { mac_sec = &(ssl->s3->read_mac_secret[0]); seq = RECORD_LAYER_get_read_sequence(&ssl->rlayer); hash = ssl->read_hash; } t = EVP_MD_CTX_size(hash); if (t < 0) return -1; md_size = t; npad = (48 / md_size) * md_size; if (!send && EVP_CIPHER_CTX_mode(ssl->enc_read_ctx) == EVP_CIPH_CBC_MODE && ssl3_cbc_record_digest_supported(hash)) { /* * This is a CBC-encrypted record. We must avoid leaking any * timing-side channel information about how many blocks of data we * are hashing because that gives an attacker a timing-oracle. */ /*- * npad is, at most, 48 bytes and that's with MD5: * 16 + 48 + 8 (sequence bytes) + 1 + 2 = 75. * * With SHA-1 (the largest hash speced for SSLv3) the hash size * goes up 4, but npad goes down by 8, resulting in a smaller * total size. */ unsigned char header[75]; unsigned j = 0; memcpy(header + j, mac_sec, md_size); j += md_size; memcpy(header + j, ssl3_pad_1, npad); j += npad; memcpy(header + j, seq, 8); j += 8; header[j++] = rec->type; header[j++] = rec->length >> 8; header[j++] = rec->length & 0xff; /* Final param == is SSLv3 */ if (ssl3_cbc_digest_record(hash, md, &md_size, header, rec->input, rec->length + md_size, rec->orig_len, mac_sec, md_size, 1) <= 0) return -1; } else { unsigned int md_size_u; /* Chop the digest off the end :-) */ EVP_MD_CTX *md_ctx = EVP_MD_CTX_new(); if (md_ctx == NULL) return -1; rec_char = rec->type; p = md; s2n(rec->length, p); if (EVP_MD_CTX_copy_ex(md_ctx, hash) <= 0 || EVP_DigestUpdate(md_ctx, mac_sec, md_size) <= 0 || EVP_DigestUpdate(md_ctx, ssl3_pad_1, npad) <= 0 || EVP_DigestUpdate(md_ctx, seq, 8) <= 0 || EVP_DigestUpdate(md_ctx, &rec_char, 1) <= 0 || EVP_DigestUpdate(md_ctx, md, 2) <= 0 || EVP_DigestUpdate(md_ctx, rec->input, rec->length) <= 0 || EVP_DigestFinal_ex(md_ctx, md, NULL) <= 0 || EVP_MD_CTX_copy_ex(md_ctx, hash) <= 0 || EVP_DigestUpdate(md_ctx, mac_sec, md_size) <= 0 || EVP_DigestUpdate(md_ctx, ssl3_pad_2, npad) <= 0 || EVP_DigestUpdate(md_ctx, md, md_size) <= 0 || EVP_DigestFinal_ex(md_ctx, md, &md_size_u) <= 0) { EVP_MD_CTX_reset(md_ctx); return -1; } md_size = md_size_u; EVP_MD_CTX_free(md_ctx); } ssl3_record_sequence_update(seq); return (md_size); } int tls1_mac(SSL *ssl, SSL3_RECORD *rec, unsigned char *md, int send) { unsigned char *seq; EVP_MD_CTX *hash; size_t md_size; int i; EVP_MD_CTX *hmac = NULL, *mac_ctx; unsigned char header[13]; int stream_mac = (send ? (ssl->mac_flags & SSL_MAC_FLAG_WRITE_MAC_STREAM) : (ssl->mac_flags & SSL_MAC_FLAG_READ_MAC_STREAM)); int t; if (send) { seq = RECORD_LAYER_get_write_sequence(&ssl->rlayer); hash = ssl->write_hash; } else { seq = RECORD_LAYER_get_read_sequence(&ssl->rlayer); hash = ssl->read_hash; } t = EVP_MD_CTX_size(hash); OPENSSL_assert(t >= 0); md_size = t; /* I should fix this up TLS TLS TLS TLS TLS XXXXXXXX */ if (stream_mac) { mac_ctx = hash; } else { hmac = EVP_MD_CTX_new(); if (hmac == NULL || !EVP_MD_CTX_copy(hmac, hash)) return -1; mac_ctx = hmac; } if (SSL_IS_DTLS(ssl)) { unsigned char dtlsseq[8], *p = dtlsseq; s2n(send ? DTLS_RECORD_LAYER_get_w_epoch(&ssl->rlayer) : DTLS_RECORD_LAYER_get_r_epoch(&ssl->rlayer), p); memcpy(p, &seq[2], 6); memcpy(header, dtlsseq, 8); } else memcpy(header, seq, 8); header[8] = rec->type; header[9] = (unsigned char)(ssl->version >> 8); header[10] = (unsigned char)(ssl->version); header[11] = (rec->length) >> 8; header[12] = (rec->length) & 0xff; if (!send && !SSL_READ_ETM(ssl) && EVP_CIPHER_CTX_mode(ssl->enc_read_ctx) == EVP_CIPH_CBC_MODE && ssl3_cbc_record_digest_supported(mac_ctx)) { /* * This is a CBC-encrypted record. We must avoid leaking any * timing-side channel information about how many blocks of data we * are hashing because that gives an attacker a timing-oracle. */ /* Final param == not SSLv3 */ if (ssl3_cbc_digest_record(mac_ctx, md, &md_size, header, rec->input, rec->length + md_size, rec->orig_len, ssl->s3->read_mac_secret, ssl->s3->read_mac_secret_size, 0) <= 0) { EVP_MD_CTX_free(hmac); return -1; } } else { if (EVP_DigestSignUpdate(mac_ctx, header, sizeof(header)) <= 0 || EVP_DigestSignUpdate(mac_ctx, rec->input, rec->length) <= 0 || EVP_DigestSignFinal(mac_ctx, md, &md_size) <= 0) { EVP_MD_CTX_free(hmac); return -1; } if (!send && !SSL_READ_ETM(ssl) && FIPS_mode()) if (!tls_fips_digest_extra(ssl->enc_read_ctx, mac_ctx, rec->input, rec->length, rec->orig_len)) { EVP_MD_CTX_free(hmac); return -1; } } EVP_MD_CTX_free(hmac); #ifdef SSL_DEBUG fprintf(stderr, "seq="); { int z; for (z = 0; z < 8; z++) fprintf(stderr, "%02X ", seq[z]); fprintf(stderr, "\n"); } fprintf(stderr, "rec="); { unsigned int z; for (z = 0; z < rec->length; z++) fprintf(stderr, "%02X ", rec->data[z]); fprintf(stderr, "\n"); } #endif if (!SSL_IS_DTLS(ssl)) { for (i = 7; i >= 0; i--) { ++seq[i]; if (seq[i] != 0) break; } } #ifdef SSL_DEBUG { unsigned int z; for (z = 0; z < md_size; z++) fprintf(stderr, "%02X ", md[z]); fprintf(stderr, "\n"); } #endif return (md_size); } /*- * ssl3_cbc_remove_padding removes padding from the decrypted, SSLv3, CBC * record in |rec| by updating |rec->length| in constant time. * * block_size: the block size of the cipher used to encrypt the record. * returns: * 0: (in non-constant time) if the record is publicly invalid. * 1: if the padding was valid * -1: otherwise. */ int ssl3_cbc_remove_padding(SSL3_RECORD *rec, unsigned block_size, unsigned mac_size) { unsigned padding_length, good; const unsigned overhead = 1 /* padding length byte */ + mac_size; /* * These lengths are all public so we can test them in non-constant time. */ if (overhead > rec->length) return 0; padding_length = rec->data[rec->length - 1]; good = constant_time_ge(rec->length, padding_length + overhead); /* SSLv3 requires that the padding is minimal. */ good &= constant_time_ge(block_size, padding_length + 1); rec->length -= good & (padding_length + 1); return constant_time_select_int(good, 1, -1); } /*- * tls1_cbc_remove_padding removes the CBC padding from the decrypted, TLS, CBC * record in |rec| in constant time and returns 1 if the padding is valid and * -1 otherwise. It also removes any explicit IV from the start of the record * without leaking any timing about whether there was enough space after the * padding was removed. * * block_size: the block size of the cipher used to encrypt the record. * returns: * 0: (in non-constant time) if the record is publicly invalid. * 1: if the padding was valid * -1: otherwise. */ int tls1_cbc_remove_padding(const SSL *s, SSL3_RECORD *rec, unsigned block_size, unsigned mac_size) { unsigned padding_length, good, to_check, i; const unsigned overhead = 1 /* padding length byte */ + mac_size; /* Check if version requires explicit IV */ if (SSL_USE_EXPLICIT_IV(s)) { /* * These lengths are all public so we can test them in non-constant * time. */ if (overhead + block_size > rec->length) return 0; /* We can now safely skip explicit IV */ rec->data += block_size; rec->input += block_size; rec->length -= block_size; rec->orig_len -= block_size; } else if (overhead > rec->length) return 0; padding_length = rec->data[rec->length - 1]; if (EVP_CIPHER_flags(EVP_CIPHER_CTX_cipher(s->enc_read_ctx)) & EVP_CIPH_FLAG_AEAD_CIPHER) { /* padding is already verified */ rec->length -= padding_length + 1; return 1; } good = constant_time_ge(rec->length, overhead + padding_length); /* * The padding consists of a length byte at the end of the record and * then that many bytes of padding, all with the same value as the length * byte. Thus, with the length byte included, there are i+1 bytes of * padding. We can't check just |padding_length+1| bytes because that * leaks decrypted information. Therefore we always have to check the * maximum amount of padding possible. (Again, the length of the record * is public information so we can use it.) */ to_check = 256; /* maximum amount of padding, inc length byte. */ if (to_check > rec->length) to_check = rec->length; for (i = 0; i < to_check; i++) { unsigned char mask = constant_time_ge_8(padding_length, i); unsigned char b = rec->data[rec->length - 1 - i]; /* * The final |padding_length+1| bytes should all have the value * |padding_length|. Therefore the XOR should be zero. */ good &= ~(mask & (padding_length ^ b)); } /* * If any of the final |padding_length+1| bytes had the wrong value, one * or more of the lower eight bits of |good| will be cleared. */ good = constant_time_eq(0xff, good & 0xff); rec->length -= good & (padding_length + 1); return constant_time_select_int(good, 1, -1); } /*- * ssl3_cbc_copy_mac copies |md_size| bytes from the end of |rec| to |out| in * constant time (independent of the concrete value of rec->length, which may * vary within a 256-byte window). * * ssl3_cbc_remove_padding or tls1_cbc_remove_padding must be called prior to * this function. * * On entry: * rec->orig_len >= md_size * md_size <= EVP_MAX_MD_SIZE * * If CBC_MAC_ROTATE_IN_PLACE is defined then the rotation is performed with * variable accesses in a 64-byte-aligned buffer. Assuming that this fits into * a single or pair of cache-lines, then the variable memory accesses don't * actually affect the timing. CPUs with smaller cache-lines [if any] are * not multi-core and are not considered vulnerable to cache-timing attacks. */ #define CBC_MAC_ROTATE_IN_PLACE void ssl3_cbc_copy_mac(unsigned char *out, const SSL3_RECORD *rec, unsigned md_size) { #if defined(CBC_MAC_ROTATE_IN_PLACE) unsigned char rotated_mac_buf[64 + EVP_MAX_MD_SIZE]; unsigned char *rotated_mac; #else unsigned char rotated_mac[EVP_MAX_MD_SIZE]; #endif /* * mac_end is the index of |rec->data| just after the end of the MAC. */ unsigned mac_end = rec->length; unsigned mac_start = mac_end - md_size; unsigned in_mac; /* * scan_start contains the number of bytes that we can ignore because the * MAC's position can only vary by 255 bytes. */ unsigned scan_start = 0; unsigned i, j; unsigned rotate_offset; OPENSSL_assert(rec->orig_len >= md_size); OPENSSL_assert(md_size <= EVP_MAX_MD_SIZE); #if defined(CBC_MAC_ROTATE_IN_PLACE) rotated_mac = rotated_mac_buf + ((0 - (size_t)rotated_mac_buf) & 63); #endif /* This information is public so it's safe to branch based on it. */ if (rec->orig_len > md_size + 255 + 1) scan_start = rec->orig_len - (md_size + 255 + 1); in_mac = 0; rotate_offset = 0; memset(rotated_mac, 0, md_size); for (i = scan_start, j = 0; i < rec->orig_len; i++) { unsigned mac_started = constant_time_eq(i, mac_start); unsigned mac_ended = constant_time_lt(i, mac_end); unsigned char b = rec->data[i]; in_mac |= mac_started; in_mac &= mac_ended; rotate_offset |= j & mac_started; rotated_mac[j++] |= b & in_mac; j &= constant_time_lt(j, md_size); } /* Now rotate the MAC */ #if defined(CBC_MAC_ROTATE_IN_PLACE) j = 0; for (i = 0; i < md_size; i++) { /* in case cache-line is 32 bytes, touch second line */ ((volatile unsigned char *)rotated_mac)[rotate_offset ^ 32]; out[j++] = rotated_mac[rotate_offset++]; rotate_offset &= constant_time_lt(rotate_offset, md_size); } #else memset(out, 0, md_size); rotate_offset = md_size - rotate_offset; rotate_offset &= constant_time_lt(rotate_offset, md_size); for (i = 0; i < md_size; i++) { for (j = 0; j < md_size; j++) out[j] |= rotated_mac[i] & constant_time_eq_8(j, rotate_offset); rotate_offset++; rotate_offset &= constant_time_lt(rotate_offset, md_size); } #endif } int dtls1_process_record(SSL *s, DTLS1_BITMAP *bitmap) { int i, al; int enc_err; SSL_SESSION *sess; SSL3_RECORD *rr; unsigned int mac_size; unsigned char md[EVP_MAX_MD_SIZE]; rr = RECORD_LAYER_get_rrec(&s->rlayer); sess = s->session; /* * At this point, s->packet_length == SSL3_RT_HEADER_LNGTH + rr->length, * and we have that many bytes in s->packet */ rr->input = &(RECORD_LAYER_get_packet(&s->rlayer)[DTLS1_RT_HEADER_LENGTH]); /* * ok, we can now read from 's->packet' data into 'rr' rr->input points * at rr->length bytes, which need to be copied into rr->data by either * the decryption or by the decompression When the data is 'copied' into * the rr->data buffer, rr->input will be pointed at the new buffer */ /* * We now have - encrypted [ MAC [ compressed [ plain ] ] ] rr->length * bytes of encrypted compressed stuff. */ /* check is not needed I believe */ if (rr->length > SSL3_RT_MAX_ENCRYPTED_LENGTH) { al = SSL_AD_RECORD_OVERFLOW; SSLerr(SSL_F_DTLS1_PROCESS_RECORD, SSL_R_ENCRYPTED_LENGTH_TOO_LONG); goto f_err; } /* decrypt in place in 'rr->input' */ rr->data = rr->input; rr->orig_len = rr->length; enc_err = s->method->ssl3_enc->enc(s, rr, 1, 0); /*- * enc_err is: * 0: (in non-constant time) if the record is publically invalid. * 1: if the padding is valid * -1: if the padding is invalid */ if (enc_err == 0) { /* For DTLS we simply ignore bad packets. */ rr->length = 0; RECORD_LAYER_reset_packet_length(&s->rlayer); goto err; } #ifdef SSL_DEBUG printf("dec %d\n", rr->length); { unsigned int z; for (z = 0; z < rr->length; z++) printf("%02X%c", rr->data[z], ((z + 1) % 16) ? ' ' : '\n'); } printf("\n"); #endif /* r->length is now the compressed data plus mac */ if ((sess != NULL) && (s->enc_read_ctx != NULL) && (EVP_MD_CTX_md(s->read_hash) != NULL)) { /* s->read_hash != NULL => mac_size != -1 */ unsigned char *mac = NULL; unsigned char mac_tmp[EVP_MAX_MD_SIZE]; mac_size = EVP_MD_CTX_size(s->read_hash); OPENSSL_assert(mac_size <= EVP_MAX_MD_SIZE); /* * orig_len is the length of the record before any padding was * removed. This is public information, as is the MAC in use, * therefore we can safely process the record in a different amount * of time if it's too short to possibly contain a MAC. */ if (rr->orig_len < mac_size || /* CBC records must have a padding length byte too. */ (EVP_CIPHER_CTX_mode(s->enc_read_ctx) == EVP_CIPH_CBC_MODE && rr->orig_len < mac_size + 1)) { al = SSL_AD_DECODE_ERROR; SSLerr(SSL_F_DTLS1_PROCESS_RECORD, SSL_R_LENGTH_TOO_SHORT); goto f_err; } if (EVP_CIPHER_CTX_mode(s->enc_read_ctx) == EVP_CIPH_CBC_MODE) { /* * We update the length so that the TLS header bytes can be * constructed correctly but we need to extract the MAC in * constant time from within the record, without leaking the * contents of the padding bytes. */ mac = mac_tmp; ssl3_cbc_copy_mac(mac_tmp, rr, mac_size); rr->length -= mac_size; } else { /* * In this case there's no padding, so |rec->orig_len| equals * |rec->length| and we checked that there's enough bytes for * |mac_size| above. */ rr->length -= mac_size; mac = &rr->data[rr->length]; } i = s->method->ssl3_enc->mac(s, rr, md, 0 /* not send */ ); if (i < 0 || mac == NULL || CRYPTO_memcmp(md, mac, (size_t)mac_size) != 0) enc_err = -1; if (rr->length > SSL3_RT_MAX_COMPRESSED_LENGTH + mac_size) enc_err = -1; } if (enc_err < 0) { /* decryption failed, silently discard message */ rr->length = 0; RECORD_LAYER_reset_packet_length(&s->rlayer); goto err; } /* r->length is now just compressed */ if (s->expand != NULL) { if (rr->length > SSL3_RT_MAX_COMPRESSED_LENGTH) { al = SSL_AD_RECORD_OVERFLOW; SSLerr(SSL_F_DTLS1_PROCESS_RECORD, SSL_R_COMPRESSED_LENGTH_TOO_LONG); goto f_err; } if (!ssl3_do_uncompress(s, rr)) { al = SSL_AD_DECOMPRESSION_FAILURE; SSLerr(SSL_F_DTLS1_PROCESS_RECORD, SSL_R_BAD_DECOMPRESSION); goto f_err; } } if (rr->length > SSL3_RT_MAX_PLAIN_LENGTH) { al = SSL_AD_RECORD_OVERFLOW; SSLerr(SSL_F_DTLS1_PROCESS_RECORD, SSL_R_DATA_LENGTH_TOO_LONG); goto f_err; } rr->off = 0; /*- * So at this point the following is true * ssl->s3->rrec.type is the type of record * ssl->s3->rrec.length == number of bytes in record * ssl->s3->rrec.off == offset to first valid byte * ssl->s3->rrec.data == where to take bytes from, increment * after use :-). */ /* we have pulled in a full packet so zero things */ RECORD_LAYER_reset_packet_length(&s->rlayer); /* Mark receipt of record. */ dtls1_record_bitmap_update(s, bitmap); return (1); f_err: ssl3_send_alert(s, SSL3_AL_FATAL, al); err: return (0); } /* * retrieve a buffered record that belongs to the current epoch, ie, * processed */ #define dtls1_get_processed_record(s) \ dtls1_retrieve_buffered_record((s), \ &(DTLS_RECORD_LAYER_get_processed_rcds(&s->rlayer))) /*- * Call this to get a new input record. * It will return <= 0 if more data is needed, normally due to an error * or non-blocking IO. * When it finishes, one packet has been decoded and can be found in * ssl->s3->rrec.type - is the type of record * ssl->s3->rrec.data, - data * ssl->s3->rrec.length, - number of bytes */ /* used only by dtls1_read_bytes */ int dtls1_get_record(SSL *s) { int ssl_major, ssl_minor; int i, n; SSL3_RECORD *rr; unsigned char *p = NULL; unsigned short version; DTLS1_BITMAP *bitmap; unsigned int is_next_epoch; rr = RECORD_LAYER_get_rrec(&s->rlayer); again: /* * The epoch may have changed. If so, process all the pending records. * This is a non-blocking operation. */ if (!dtls1_process_buffered_records(s)) return -1; /* if we're renegotiating, then there may be buffered records */ if (dtls1_get_processed_record(s)) return 1; /* get something from the wire */ /* check if we have the header */ if ((RECORD_LAYER_get_rstate(&s->rlayer) != SSL_ST_READ_BODY) || (RECORD_LAYER_get_packet_length(&s->rlayer) < DTLS1_RT_HEADER_LENGTH)) { n = ssl3_read_n(s, DTLS1_RT_HEADER_LENGTH, SSL3_BUFFER_get_len(&s->rlayer.rbuf), 0, 1); /* read timeout is handled by dtls1_read_bytes */ if (n <= 0) return (n); /* error or non-blocking */ /* this packet contained a partial record, dump it */ if (RECORD_LAYER_get_packet_length(&s->rlayer) != DTLS1_RT_HEADER_LENGTH) { RECORD_LAYER_reset_packet_length(&s->rlayer); goto again; } RECORD_LAYER_set_rstate(&s->rlayer, SSL_ST_READ_BODY); p = RECORD_LAYER_get_packet(&s->rlayer); if (s->msg_callback) s->msg_callback(0, 0, SSL3_RT_HEADER, p, DTLS1_RT_HEADER_LENGTH, s, s->msg_callback_arg); /* Pull apart the header into the DTLS1_RECORD */ rr->type = *(p++); ssl_major = *(p++); ssl_minor = *(p++); version = (ssl_major << 8) | ssl_minor; /* sequence number is 64 bits, with top 2 bytes = epoch */ n2s(p, rr->epoch); memcpy(&(RECORD_LAYER_get_read_sequence(&s->rlayer)[2]), p, 6); p += 6; n2s(p, rr->length); /* Lets check version */ if (!s->first_packet) { if (version != s->version) { /* unexpected version, silently discard */ rr->length = 0; RECORD_LAYER_reset_packet_length(&s->rlayer); goto again; } } if ((version & 0xff00) != (s->version & 0xff00)) { /* wrong version, silently discard record */ rr->length = 0; RECORD_LAYER_reset_packet_length(&s->rlayer); goto again; } if (rr->length > SSL3_RT_MAX_ENCRYPTED_LENGTH) { /* record too long, silently discard it */ rr->length = 0; RECORD_LAYER_reset_packet_length(&s->rlayer); goto again; } /* now s->rlayer.rstate == SSL_ST_READ_BODY */ } /* s->rlayer.rstate == SSL_ST_READ_BODY, get and decode the data */ if (rr->length > RECORD_LAYER_get_packet_length(&s->rlayer) - DTLS1_RT_HEADER_LENGTH) { /* now s->packet_length == DTLS1_RT_HEADER_LENGTH */ i = rr->length; n = ssl3_read_n(s, i, i, 1, 1); /* this packet contained a partial record, dump it */ if (n != i) { rr->length = 0; RECORD_LAYER_reset_packet_length(&s->rlayer); goto again; } /* * now n == rr->length, and s->packet_length == * DTLS1_RT_HEADER_LENGTH + rr->length */ } /* set state for later operations */ RECORD_LAYER_set_rstate(&s->rlayer, SSL_ST_READ_HEADER); /* match epochs. NULL means the packet is dropped on the floor */ bitmap = dtls1_get_bitmap(s, rr, &is_next_epoch); if (bitmap == NULL) { rr->length = 0; RECORD_LAYER_reset_packet_length(&s->rlayer); /* dump this record */ goto again; /* get another record */ } #ifndef OPENSSL_NO_SCTP /* Only do replay check if no SCTP bio */ if (!BIO_dgram_is_sctp(SSL_get_rbio(s))) { #endif /* Check whether this is a repeat, or aged record. */ /* * TODO: Does it make sense to have replay protection in epoch 0 where * we have no integrity negotiated yet? */ if (!dtls1_record_replay_check(s, bitmap)) { rr->length = 0; RECORD_LAYER_reset_packet_length(&s->rlayer); /* dump this record */ goto again; /* get another record */ } #ifndef OPENSSL_NO_SCTP } #endif /* just read a 0 length packet */ if (rr->length == 0) goto again; /* * If this record is from the next epoch (either HM or ALERT), and a * handshake is currently in progress, buffer it since it cannot be * processed at this time. */ if (is_next_epoch) { if ((SSL_in_init(s) || ossl_statem_get_in_handshake(s))) { if (dtls1_buffer_record (s, &(DTLS_RECORD_LAYER_get_unprocessed_rcds(&s->rlayer)), rr->seq_num) < 0) return -1; } rr->length = 0; RECORD_LAYER_reset_packet_length(&s->rlayer); goto again; } if (!dtls1_process_record(s, bitmap)) { rr->length = 0; RECORD_LAYER_reset_packet_length(&s->rlayer); /* dump this record */ goto again; /* get another record */ } return (1); }
./CrossVul/dataset_final_sorted/CWE-20/c/good_3070_2
crossvul-cpp_data_bad_5876_0
/* * linux/mm/filemap.c * * Copyright (C) 1994-1999 Linus Torvalds */ /* * This file handles the generic file mmap semantics used by * most "normal" filesystems (but you don't /have/ to use this: * the NFS filesystem used to do this differently, for example) */ #include <linux/module.h> #include <linux/slab.h> #include <linux/compiler.h> #include <linux/fs.h> #include <linux/uaccess.h> #include <linux/aio.h> #include <linux/capability.h> #include <linux/kernel_stat.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/mman.h> #include <linux/pagemap.h> #include <linux/file.h> #include <linux/uio.h> #include <linux/hash.h> #include <linux/writeback.h> #include <linux/backing-dev.h> #include <linux/pagevec.h> #include <linux/blkdev.h> #include <linux/backing-dev.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/cpuset.h> #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */ #include "internal.h" /* * FIXME: remove all knowledge of the buffer layer from the core VM */ #include <linux/buffer_head.h> /* for generic_osync_inode */ #include <asm/mman.h> static ssize_t generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs); /* * Shared mappings implemented 30.11.1994. It's not fully working yet, * though. * * Shared mappings now work. 15.8.1995 Bruno. * * finished 'unifying' the page and buffer cache and SMP-threaded the * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com> * * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de> */ /* * Lock ordering: * * ->i_mmap_lock (vmtruncate) * ->private_lock (__free_pte->__set_page_dirty_buffers) * ->swap_lock (exclusive_swap_page, others) * ->mapping->tree_lock * ->zone.lock * * ->i_mutex * ->i_mmap_lock (truncate->unmap_mapping_range) * * ->mmap_sem * ->i_mmap_lock * ->page_table_lock or pte_lock (various, mainly in memory.c) * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock) * * ->mmap_sem * ->lock_page (access_process_vm) * * ->i_mutex (generic_file_buffered_write) * ->mmap_sem (fault_in_pages_readable->do_page_fault) * * ->i_mutex * ->i_alloc_sem (various) * * ->inode_lock * ->sb_lock (fs/fs-writeback.c) * ->mapping->tree_lock (__sync_single_inode) * * ->i_mmap_lock * ->anon_vma.lock (vma_adjust) * * ->anon_vma.lock * ->page_table_lock or pte_lock (anon_vma_prepare and various) * * ->page_table_lock or pte_lock * ->swap_lock (try_to_unmap_one) * ->private_lock (try_to_unmap_one) * ->tree_lock (try_to_unmap_one) * ->zone.lru_lock (follow_page->mark_page_accessed) * ->zone.lru_lock (check_pte_range->isolate_lru_page) * ->private_lock (page_remove_rmap->set_page_dirty) * ->tree_lock (page_remove_rmap->set_page_dirty) * ->inode_lock (page_remove_rmap->set_page_dirty) * ->inode_lock (zap_pte_range->set_page_dirty) * ->private_lock (zap_pte_range->__set_page_dirty_buffers) * * ->task->proc_lock * ->dcache_lock (proc_pid_lookup) */ /* * Remove a page from the page cache and free it. Caller has to make * sure the page is locked and that nobody else uses it - or that usage * is safe. The caller must hold a write_lock on the mapping's tree_lock. */ void __remove_from_page_cache(struct page *page) { struct address_space *mapping = page->mapping; radix_tree_delete(&mapping->page_tree, page->index); page->mapping = NULL; mapping->nrpages--; __dec_zone_page_state(page, NR_FILE_PAGES); BUG_ON(page_mapped(page)); /* * Some filesystems seem to re-dirty the page even after * the VM has canceled the dirty bit (eg ext3 journaling). * * Fix it up by doing a final dirty accounting check after * having removed the page entirely. */ if (PageDirty(page) && mapping_cap_account_dirty(mapping)) { dec_zone_page_state(page, NR_FILE_DIRTY); dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); } } void remove_from_page_cache(struct page *page) { struct address_space *mapping = page->mapping; BUG_ON(!PageLocked(page)); write_lock_irq(&mapping->tree_lock); __remove_from_page_cache(page); write_unlock_irq(&mapping->tree_lock); } static int sync_page(void *word) { struct address_space *mapping; struct page *page; page = container_of((unsigned long *)word, struct page, flags); /* * page_mapping() is being called without PG_locked held. * Some knowledge of the state and use of the page is used to * reduce the requirements down to a memory barrier. * The danger here is of a stale page_mapping() return value * indicating a struct address_space different from the one it's * associated with when it is associated with one. * After smp_mb(), it's either the correct page_mapping() for * the page, or an old page_mapping() and the page's own * page_mapping() has gone NULL. * The ->sync_page() address_space operation must tolerate * page_mapping() going NULL. By an amazing coincidence, * this comes about because none of the users of the page * in the ->sync_page() methods make essential use of the * page_mapping(), merely passing the page down to the backing * device's unplug functions when it's non-NULL, which in turn * ignore it for all cases but swap, where only page_private(page) is * of interest. When page_mapping() does go NULL, the entire * call stack gracefully ignores the page and returns. * -- wli */ smp_mb(); mapping = page_mapping(page); if (mapping && mapping->a_ops && mapping->a_ops->sync_page) mapping->a_ops->sync_page(page); io_schedule(); return 0; } static int sync_page_killable(void *word) { sync_page(word); return fatal_signal_pending(current) ? -EINTR : 0; } /** * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range * @mapping: address space structure to write * @start: offset in bytes where the range starts * @end: offset in bytes where the range ends (inclusive) * @sync_mode: enable synchronous operation * * Start writeback against all of a mapping's dirty pages that lie * within the byte offsets <start, end> inclusive. * * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as * opposed to a regular memory cleansing writeback. The difference between * these two operations is that if a dirty page/buffer is encountered, it must * be waited upon, and not just skipped over. */ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end, int sync_mode) { int ret; struct writeback_control wbc = { .sync_mode = sync_mode, .nr_to_write = mapping->nrpages * 2, .range_start = start, .range_end = end, }; if (!mapping_cap_writeback_dirty(mapping)) return 0; ret = do_writepages(mapping, &wbc); return ret; } static inline int __filemap_fdatawrite(struct address_space *mapping, int sync_mode) { return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); } int filemap_fdatawrite(struct address_space *mapping) { return __filemap_fdatawrite(mapping, WB_SYNC_ALL); } EXPORT_SYMBOL(filemap_fdatawrite); static int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end) { return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); } /** * filemap_flush - mostly a non-blocking flush * @mapping: target address_space * * This is a mostly non-blocking flush. Not suitable for data-integrity * purposes - I/O may not be started against all dirty pages. */ int filemap_flush(struct address_space *mapping) { return __filemap_fdatawrite(mapping, WB_SYNC_NONE); } EXPORT_SYMBOL(filemap_flush); /** * wait_on_page_writeback_range - wait for writeback to complete * @mapping: target address_space * @start: beginning page index * @end: ending page index * * Wait for writeback to complete against pages indexed by start->end * inclusive */ int wait_on_page_writeback_range(struct address_space *mapping, pgoff_t start, pgoff_t end) { struct pagevec pvec; int nr_pages; int ret = 0; pgoff_t index; if (end < start) return 0; pagevec_init(&pvec, 0); index = start; while ((index <= end) && (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_WRITEBACK, min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) { unsigned i; for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; /* until radix tree lookup accepts end_index */ if (page->index > end) continue; wait_on_page_writeback(page); if (PageError(page)) ret = -EIO; } pagevec_release(&pvec); cond_resched(); } /* Check for outstanding write errors */ if (test_and_clear_bit(AS_ENOSPC, &mapping->flags)) ret = -ENOSPC; if (test_and_clear_bit(AS_EIO, &mapping->flags)) ret = -EIO; return ret; } /** * sync_page_range - write and wait on all pages in the passed range * @inode: target inode * @mapping: target address_space * @pos: beginning offset in pages to write * @count: number of bytes to write * * Write and wait upon all the pages in the passed range. This is a "data * integrity" operation. It waits upon in-flight writeout before starting and * waiting upon new writeout. If there was an IO error, return it. * * We need to re-take i_mutex during the generic_osync_inode list walk because * it is otherwise livelockable. */ int sync_page_range(struct inode *inode, struct address_space *mapping, loff_t pos, loff_t count) { pgoff_t start = pos >> PAGE_CACHE_SHIFT; pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT; int ret; if (!mapping_cap_writeback_dirty(mapping) || !count) return 0; ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1); if (ret == 0) { mutex_lock(&inode->i_mutex); ret = generic_osync_inode(inode, mapping, OSYNC_METADATA); mutex_unlock(&inode->i_mutex); } if (ret == 0) ret = wait_on_page_writeback_range(mapping, start, end); return ret; } EXPORT_SYMBOL(sync_page_range); /** * sync_page_range_nolock * @inode: target inode * @mapping: target address_space * @pos: beginning offset in pages to write * @count: number of bytes to write * * Note: Holding i_mutex across sync_page_range_nolock() is not a good idea * as it forces O_SYNC writers to different parts of the same file * to be serialised right until io completion. */ int sync_page_range_nolock(struct inode *inode, struct address_space *mapping, loff_t pos, loff_t count) { pgoff_t start = pos >> PAGE_CACHE_SHIFT; pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT; int ret; if (!mapping_cap_writeback_dirty(mapping) || !count) return 0; ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1); if (ret == 0) ret = generic_osync_inode(inode, mapping, OSYNC_METADATA); if (ret == 0) ret = wait_on_page_writeback_range(mapping, start, end); return ret; } EXPORT_SYMBOL(sync_page_range_nolock); /** * filemap_fdatawait - wait for all under-writeback pages to complete * @mapping: address space structure to wait for * * Walk the list of under-writeback pages of the given address space * and wait for all of them. */ int filemap_fdatawait(struct address_space *mapping) { loff_t i_size = i_size_read(mapping->host); if (i_size == 0) return 0; return wait_on_page_writeback_range(mapping, 0, (i_size - 1) >> PAGE_CACHE_SHIFT); } EXPORT_SYMBOL(filemap_fdatawait); int filemap_write_and_wait(struct address_space *mapping) { int err = 0; if (mapping->nrpages) { err = filemap_fdatawrite(mapping); /* * Even if the above returned error, the pages may be * written partially (e.g. -ENOSPC), so we wait for it. * But the -EIO is special case, it may indicate the worst * thing (e.g. bug) happened, so we avoid waiting for it. */ if (err != -EIO) { int err2 = filemap_fdatawait(mapping); if (!err) err = err2; } } return err; } EXPORT_SYMBOL(filemap_write_and_wait); /** * filemap_write_and_wait_range - write out & wait on a file range * @mapping: the address_space for the pages * @lstart: offset in bytes where the range starts * @lend: offset in bytes where the range ends (inclusive) * * Write out and wait upon file offsets lstart->lend, inclusive. * * Note that `lend' is inclusive (describes the last byte to be written) so * that this function can be used to write to the very end-of-file (end = -1). */ int filemap_write_and_wait_range(struct address_space *mapping, loff_t lstart, loff_t lend) { int err = 0; if (mapping->nrpages) { err = __filemap_fdatawrite_range(mapping, lstart, lend, WB_SYNC_ALL); /* See comment of filemap_write_and_wait() */ if (err != -EIO) { int err2 = wait_on_page_writeback_range(mapping, lstart >> PAGE_CACHE_SHIFT, lend >> PAGE_CACHE_SHIFT); if (!err) err = err2; } } return err; } /** * add_to_page_cache - add newly allocated pagecache pages * @page: page to add * @mapping: the page's address_space * @offset: page index * @gfp_mask: page allocation mode * * This function is used to add newly allocated pagecache pages; * the page is new, so we can just run SetPageLocked() against it. * The other page state flags were set by rmqueue(). * * This function does not add the page to the LRU. The caller must do that. */ int add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) { int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); if (error == 0) { write_lock_irq(&mapping->tree_lock); error = radix_tree_insert(&mapping->page_tree, offset, page); if (!error) { page_cache_get(page); SetPageLocked(page); page->mapping = mapping; page->index = offset; mapping->nrpages++; __inc_zone_page_state(page, NR_FILE_PAGES); } write_unlock_irq(&mapping->tree_lock); radix_tree_preload_end(); } return error; } EXPORT_SYMBOL(add_to_page_cache); int add_to_page_cache_lru(struct page *page, struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) { int ret = add_to_page_cache(page, mapping, offset, gfp_mask); if (ret == 0) lru_cache_add(page); return ret; } #ifdef CONFIG_NUMA struct page *__page_cache_alloc(gfp_t gfp) { if (cpuset_do_page_mem_spread()) { int n = cpuset_mem_spread_node(); return alloc_pages_node(n, gfp, 0); } return alloc_pages(gfp, 0); } EXPORT_SYMBOL(__page_cache_alloc); #endif static int __sleep_on_page_lock(void *word) { io_schedule(); return 0; } /* * In order to wait for pages to become available there must be * waitqueues associated with pages. By using a hash table of * waitqueues where the bucket discipline is to maintain all * waiters on the same queue and wake all when any of the pages * become available, and for the woken contexts to check to be * sure the appropriate page became available, this saves space * at a cost of "thundering herd" phenomena during rare hash * collisions. */ static wait_queue_head_t *page_waitqueue(struct page *page) { const struct zone *zone = page_zone(page); return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)]; } static inline void wake_up_page(struct page *page, int bit) { __wake_up_bit(page_waitqueue(page), &page->flags, bit); } void fastcall wait_on_page_bit(struct page *page, int bit_nr) { DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); if (test_bit(bit_nr, &page->flags)) __wait_on_bit(page_waitqueue(page), &wait, sync_page, TASK_UNINTERRUPTIBLE); } EXPORT_SYMBOL(wait_on_page_bit); /** * unlock_page - unlock a locked page * @page: the page * * Unlocks the page and wakes up sleepers in ___wait_on_page_locked(). * Also wakes sleepers in wait_on_page_writeback() because the wakeup * mechananism between PageLocked pages and PageWriteback pages is shared. * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. * * The first mb is necessary to safely close the critical section opened by the * TestSetPageLocked(), the second mb is necessary to enforce ordering between * the clear_bit and the read of the waitqueue (to avoid SMP races with a * parallel wait_on_page_locked()). */ void fastcall unlock_page(struct page *page) { smp_mb__before_clear_bit(); if (!TestClearPageLocked(page)) BUG(); smp_mb__after_clear_bit(); wake_up_page(page, PG_locked); } EXPORT_SYMBOL(unlock_page); /** * end_page_writeback - end writeback against a page * @page: the page */ void end_page_writeback(struct page *page) { if (!TestClearPageReclaim(page) || rotate_reclaimable_page(page)) { if (!test_clear_page_writeback(page)) BUG(); } smp_mb__after_clear_bit(); wake_up_page(page, PG_writeback); } EXPORT_SYMBOL(end_page_writeback); /** * __lock_page - get a lock on the page, assuming we need to sleep to get it * @page: the page to lock * * Ugly. Running sync_page() in state TASK_UNINTERRUPTIBLE is scary. If some * random driver's requestfn sets TASK_RUNNING, we could busywait. However * chances are that on the second loop, the block layer's plug list is empty, * so sync_page() will then return in state TASK_UNINTERRUPTIBLE. */ void fastcall __lock_page(struct page *page) { DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); __wait_on_bit_lock(page_waitqueue(page), &wait, sync_page, TASK_UNINTERRUPTIBLE); } EXPORT_SYMBOL(__lock_page); int fastcall __lock_page_killable(struct page *page) { DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); return __wait_on_bit_lock(page_waitqueue(page), &wait, sync_page_killable, TASK_KILLABLE); } /* * Variant of lock_page that does not require the caller to hold a reference * on the page's mapping. */ void fastcall __lock_page_nosync(struct page *page) { DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); __wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock, TASK_UNINTERRUPTIBLE); } /** * find_get_page - find and get a page reference * @mapping: the address_space to search * @offset: the page index * * Is there a pagecache struct page at the given (mapping, offset) tuple? * If yes, increment its refcount and return it; if no, return NULL. */ struct page * find_get_page(struct address_space *mapping, pgoff_t offset) { struct page *page; read_lock_irq(&mapping->tree_lock); page = radix_tree_lookup(&mapping->page_tree, offset); if (page) page_cache_get(page); read_unlock_irq(&mapping->tree_lock); return page; } EXPORT_SYMBOL(find_get_page); /** * find_lock_page - locate, pin and lock a pagecache page * @mapping: the address_space to search * @offset: the page index * * Locates the desired pagecache page, locks it, increments its reference * count and returns its address. * * Returns zero if the page was not present. find_lock_page() may sleep. */ struct page *find_lock_page(struct address_space *mapping, pgoff_t offset) { struct page *page; repeat: read_lock_irq(&mapping->tree_lock); page = radix_tree_lookup(&mapping->page_tree, offset); if (page) { page_cache_get(page); if (TestSetPageLocked(page)) { read_unlock_irq(&mapping->tree_lock); __lock_page(page); /* Has the page been truncated while we slept? */ if (unlikely(page->mapping != mapping)) { unlock_page(page); page_cache_release(page); goto repeat; } VM_BUG_ON(page->index != offset); goto out; } } read_unlock_irq(&mapping->tree_lock); out: return page; } EXPORT_SYMBOL(find_lock_page); /** * find_or_create_page - locate or add a pagecache page * @mapping: the page's address_space * @index: the page's index into the mapping * @gfp_mask: page allocation mode * * Locates a page in the pagecache. If the page is not present, a new page * is allocated using @gfp_mask and is added to the pagecache and to the VM's * LRU list. The returned page is locked and has its reference count * incremented. * * find_or_create_page() may sleep, even if @gfp_flags specifies an atomic * allocation! * * find_or_create_page() returns the desired page's address, or zero on * memory exhaustion. */ struct page *find_or_create_page(struct address_space *mapping, pgoff_t index, gfp_t gfp_mask) { struct page *page; int err; repeat: page = find_lock_page(mapping, index); if (!page) { page = __page_cache_alloc(gfp_mask); if (!page) return NULL; err = add_to_page_cache_lru(page, mapping, index, gfp_mask); if (unlikely(err)) { page_cache_release(page); page = NULL; if (err == -EEXIST) goto repeat; } } return page; } EXPORT_SYMBOL(find_or_create_page); /** * find_get_pages - gang pagecache lookup * @mapping: The address_space to search * @start: The starting page index * @nr_pages: The maximum number of pages * @pages: Where the resulting pages are placed * * find_get_pages() will search for and return a group of up to * @nr_pages pages in the mapping. The pages are placed at @pages. * find_get_pages() takes a reference against the returned pages. * * The search returns a group of mapping-contiguous pages with ascending * indexes. There may be holes in the indices due to not-present pages. * * find_get_pages() returns the number of pages which were found. */ unsigned find_get_pages(struct address_space *mapping, pgoff_t start, unsigned int nr_pages, struct page **pages) { unsigned int i; unsigned int ret; read_lock_irq(&mapping->tree_lock); ret = radix_tree_gang_lookup(&mapping->page_tree, (void **)pages, start, nr_pages); for (i = 0; i < ret; i++) page_cache_get(pages[i]); read_unlock_irq(&mapping->tree_lock); return ret; } /** * find_get_pages_contig - gang contiguous pagecache lookup * @mapping: The address_space to search * @index: The starting page index * @nr_pages: The maximum number of pages * @pages: Where the resulting pages are placed * * find_get_pages_contig() works exactly like find_get_pages(), except * that the returned number of pages are guaranteed to be contiguous. * * find_get_pages_contig() returns the number of pages which were found. */ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, unsigned int nr_pages, struct page **pages) { unsigned int i; unsigned int ret; read_lock_irq(&mapping->tree_lock); ret = radix_tree_gang_lookup(&mapping->page_tree, (void **)pages, index, nr_pages); for (i = 0; i < ret; i++) { if (pages[i]->mapping == NULL || pages[i]->index != index) break; page_cache_get(pages[i]); index++; } read_unlock_irq(&mapping->tree_lock); return i; } EXPORT_SYMBOL(find_get_pages_contig); /** * find_get_pages_tag - find and return pages that match @tag * @mapping: the address_space to search * @index: the starting page index * @tag: the tag index * @nr_pages: the maximum number of pages * @pages: where the resulting pages are placed * * Like find_get_pages, except we only return pages which are tagged with * @tag. We update @index to index the next page for the traversal. */ unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, int tag, unsigned int nr_pages, struct page **pages) { unsigned int i; unsigned int ret; read_lock_irq(&mapping->tree_lock); ret = radix_tree_gang_lookup_tag(&mapping->page_tree, (void **)pages, *index, nr_pages, tag); for (i = 0; i < ret; i++) page_cache_get(pages[i]); if (ret) *index = pages[ret - 1]->index + 1; read_unlock_irq(&mapping->tree_lock); return ret; } EXPORT_SYMBOL(find_get_pages_tag); /** * grab_cache_page_nowait - returns locked page at given index in given cache * @mapping: target address_space * @index: the page index * * Same as grab_cache_page(), but do not wait if the page is unavailable. * This is intended for speculative data generators, where the data can * be regenerated if the page couldn't be grabbed. This routine should * be safe to call while holding the lock for another page. * * Clear __GFP_FS when allocating the page to avoid recursion into the fs * and deadlock against the caller's locked page. */ struct page * grab_cache_page_nowait(struct address_space *mapping, pgoff_t index) { struct page *page = find_get_page(mapping, index); if (page) { if (!TestSetPageLocked(page)) return page; page_cache_release(page); return NULL; } page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS); if (page && add_to_page_cache_lru(page, mapping, index, GFP_KERNEL)) { page_cache_release(page); page = NULL; } return page; } EXPORT_SYMBOL(grab_cache_page_nowait); /* * CD/DVDs are error prone. When a medium error occurs, the driver may fail * a _large_ part of the i/o request. Imagine the worst scenario: * * ---R__________________________________________B__________ * ^ reading here ^ bad block(assume 4k) * * read(R) => miss => readahead(R...B) => media error => frustrating retries * => failing the whole request => read(R) => read(R+1) => * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) => * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) => * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ...... * * It is going insane. Fix it by quickly scaling down the readahead size. */ static void shrink_readahead_size_eio(struct file *filp, struct file_ra_state *ra) { if (!ra->ra_pages) return; ra->ra_pages /= 4; } /** * do_generic_mapping_read - generic file read routine * @mapping: address_space to be read * @ra: file's readahead state * @filp: the file to read * @ppos: current file position * @desc: read_descriptor * @actor: read method * * This is a generic file read routine, and uses the * mapping->a_ops->readpage() function for the actual low-level stuff. * * This is really ugly. But the goto's actually try to clarify some * of the logic when it comes to error handling etc. * * Note the struct file* is only passed for the use of readpage. * It may be NULL. */ void do_generic_mapping_read(struct address_space *mapping, struct file_ra_state *ra, struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor) { struct inode *inode = mapping->host; pgoff_t index; pgoff_t last_index; pgoff_t prev_index; unsigned long offset; /* offset into pagecache page */ unsigned int prev_offset; int error; index = *ppos >> PAGE_CACHE_SHIFT; prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT; prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1); last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; offset = *ppos & ~PAGE_CACHE_MASK; for (;;) { struct page *page; pgoff_t end_index; loff_t isize; unsigned long nr, ret; cond_resched(); find_page: page = find_get_page(mapping, index); if (!page) { page_cache_sync_readahead(mapping, ra, filp, index, last_index - index); page = find_get_page(mapping, index); if (unlikely(page == NULL)) goto no_cached_page; } if (PageReadahead(page)) { page_cache_async_readahead(mapping, ra, filp, page, index, last_index - index); } if (!PageUptodate(page)) goto page_not_up_to_date; page_ok: /* * i_size must be checked after we know the page is Uptodate. * * Checking i_size after the check allows us to calculate * the correct value for "nr", which means the zero-filled * part of the page is not copied back to userspace (unless * another truncate extends the file - this is desired though). */ isize = i_size_read(inode); end_index = (isize - 1) >> PAGE_CACHE_SHIFT; if (unlikely(!isize || index > end_index)) { page_cache_release(page); goto out; } /* nr is the maximum number of bytes to copy from this page */ nr = PAGE_CACHE_SIZE; if (index == end_index) { nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; if (nr <= offset) { page_cache_release(page); goto out; } } nr = nr - offset; /* If users can be writing to this page using arbitrary * virtual addresses, take care about potential aliasing * before reading the page on the kernel side. */ if (mapping_writably_mapped(mapping)) flush_dcache_page(page); /* * When a sequential read accesses a page several times, * only mark it as accessed the first time. */ if (prev_index != index || offset != prev_offset) mark_page_accessed(page); prev_index = index; /* * Ok, we have the page, and it's up-to-date, so * now we can copy it to user space... * * The actor routine returns how many bytes were actually used.. * NOTE! This may not be the same as how much of a user buffer * we filled up (we may be padding etc), so we can only update * "pos" here (the actor routine has to update the user buffer * pointers and the remaining count). */ ret = actor(desc, page, offset, nr); offset += ret; index += offset >> PAGE_CACHE_SHIFT; offset &= ~PAGE_CACHE_MASK; prev_offset = offset; page_cache_release(page); if (ret == nr && desc->count) continue; goto out; page_not_up_to_date: /* Get exclusive access to the page ... */ if (lock_page_killable(page)) goto readpage_eio; /* Did it get truncated before we got the lock? */ if (!page->mapping) { unlock_page(page); page_cache_release(page); continue; } /* Did somebody else fill it already? */ if (PageUptodate(page)) { unlock_page(page); goto page_ok; } readpage: /* Start the actual read. The read will unlock the page. */ error = mapping->a_ops->readpage(filp, page); if (unlikely(error)) { if (error == AOP_TRUNCATED_PAGE) { page_cache_release(page); goto find_page; } goto readpage_error; } if (!PageUptodate(page)) { if (lock_page_killable(page)) goto readpage_eio; if (!PageUptodate(page)) { if (page->mapping == NULL) { /* * invalidate_inode_pages got it */ unlock_page(page); page_cache_release(page); goto find_page; } unlock_page(page); shrink_readahead_size_eio(filp, ra); goto readpage_eio; } unlock_page(page); } goto page_ok; readpage_eio: error = -EIO; readpage_error: /* UHHUH! A synchronous read error occurred. Report it */ desc->error = error; page_cache_release(page); goto out; no_cached_page: /* * Ok, it wasn't cached, so we need to create a new * page.. */ page = page_cache_alloc_cold(mapping); if (!page) { desc->error = -ENOMEM; goto out; } error = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL); if (error) { page_cache_release(page); if (error == -EEXIST) goto find_page; desc->error = error; goto out; } goto readpage; } out: ra->prev_pos = prev_index; ra->prev_pos <<= PAGE_CACHE_SHIFT; ra->prev_pos |= prev_offset; *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset; if (filp) file_accessed(filp); } EXPORT_SYMBOL(do_generic_mapping_read); int file_read_actor(read_descriptor_t *desc, struct page *page, unsigned long offset, unsigned long size) { char *kaddr; unsigned long left, count = desc->count; if (size > count) size = count; /* * Faults on the destination of a read are common, so do it before * taking the kmap. */ if (!fault_in_pages_writeable(desc->arg.buf, size)) { kaddr = kmap_atomic(page, KM_USER0); left = __copy_to_user_inatomic(desc->arg.buf, kaddr + offset, size); kunmap_atomic(kaddr, KM_USER0); if (left == 0) goto success; } /* Do it the slow way */ kaddr = kmap(page); left = __copy_to_user(desc->arg.buf, kaddr + offset, size); kunmap(page); if (left) { size -= left; desc->error = -EFAULT; } success: desc->count = count - size; desc->written += size; desc->arg.buf += size; return size; } /* * Performs necessary checks before doing a write * @iov: io vector request * @nr_segs: number of segments in the iovec * @count: number of bytes to write * @access_flags: type of access: %VERIFY_READ or %VERIFY_WRITE * * Adjust number of segments and amount of bytes to write (nr_segs should be * properly initialized first). Returns appropriate error code that caller * should return or zero in case that write should be allowed. */ int generic_segment_checks(const struct iovec *iov, unsigned long *nr_segs, size_t *count, int access_flags) { unsigned long seg; size_t cnt = 0; for (seg = 0; seg < *nr_segs; seg++) { const struct iovec *iv = &iov[seg]; /* * If any segment has a negative length, or the cumulative * length ever wraps negative then return -EINVAL. */ cnt += iv->iov_len; if (unlikely((ssize_t)(cnt|iv->iov_len) < 0)) return -EINVAL; if (access_ok(access_flags, iv->iov_base, iv->iov_len)) continue; if (seg == 0) return -EFAULT; *nr_segs = seg; cnt -= iv->iov_len; /* This segment is no good */ break; } *count = cnt; return 0; } EXPORT_SYMBOL(generic_segment_checks); /** * generic_file_aio_read - generic filesystem read routine * @iocb: kernel I/O control block * @iov: io vector request * @nr_segs: number of segments in the iovec * @pos: current file position * * This is the "read()" routine for all filesystems * that can use the page cache directly. */ ssize_t generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct file *filp = iocb->ki_filp; ssize_t retval; unsigned long seg; size_t count; loff_t *ppos = &iocb->ki_pos; count = 0; retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); if (retval) return retval; /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ if (filp->f_flags & O_DIRECT) { loff_t size; struct address_space *mapping; struct inode *inode; mapping = filp->f_mapping; inode = mapping->host; retval = 0; if (!count) goto out; /* skip atime */ size = i_size_read(inode); if (pos < size) { retval = generic_file_direct_IO(READ, iocb, iov, pos, nr_segs); if (retval > 0) *ppos = pos + retval; } if (likely(retval != 0)) { file_accessed(filp); goto out; } } retval = 0; if (count) { for (seg = 0; seg < nr_segs; seg++) { read_descriptor_t desc; desc.written = 0; desc.arg.buf = iov[seg].iov_base; desc.count = iov[seg].iov_len; if (desc.count == 0) continue; desc.error = 0; do_generic_file_read(filp,ppos,&desc,file_read_actor); retval += desc.written; if (desc.error) { retval = retval ?: desc.error; break; } if (desc.count > 0) break; } } out: return retval; } EXPORT_SYMBOL(generic_file_aio_read); static ssize_t do_readahead(struct address_space *mapping, struct file *filp, pgoff_t index, unsigned long nr) { if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage) return -EINVAL; force_page_cache_readahead(mapping, filp, index, max_sane_readahead(nr)); return 0; } asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count) { ssize_t ret; struct file *file; ret = -EBADF; file = fget(fd); if (file) { if (file->f_mode & FMODE_READ) { struct address_space *mapping = file->f_mapping; pgoff_t start = offset >> PAGE_CACHE_SHIFT; pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT; unsigned long len = end - start + 1; ret = do_readahead(mapping, file, start, len); } fput(file); } return ret; } #ifdef CONFIG_MMU /** * page_cache_read - adds requested page to the page cache if not already there * @file: file to read * @offset: page index * * This adds the requested page to the page cache if it isn't already there, * and schedules an I/O to read in its contents from disk. */ static int fastcall page_cache_read(struct file * file, pgoff_t offset) { struct address_space *mapping = file->f_mapping; struct page *page; int ret; do { page = page_cache_alloc_cold(mapping); if (!page) return -ENOMEM; ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL); if (ret == 0) ret = mapping->a_ops->readpage(file, page); else if (ret == -EEXIST) ret = 0; /* losing race to add is OK */ page_cache_release(page); } while (ret == AOP_TRUNCATED_PAGE); return ret; } #define MMAP_LOTSAMISS (100) /** * filemap_fault - read in file data for page fault handling * @vma: vma in which the fault was taken * @vmf: struct vm_fault containing details of the fault * * filemap_fault() is invoked via the vma operations vector for a * mapped memory region to read in file data during a page fault. * * The goto's are kind of ugly, but this streamlines the normal case of having * it in the page cache, and handles the special cases reasonably without * having a lot of duplicated code. */ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { int error; struct file *file = vma->vm_file; struct address_space *mapping = file->f_mapping; struct file_ra_state *ra = &file->f_ra; struct inode *inode = mapping->host; struct page *page; unsigned long size; int did_readaround = 0; int ret = 0; size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; if (vmf->pgoff >= size) return VM_FAULT_SIGBUS; /* If we don't want any read-ahead, don't bother */ if (VM_RandomReadHint(vma)) goto no_cached_page; /* * Do we have something in the page cache already? */ retry_find: page = find_lock_page(mapping, vmf->pgoff); /* * For sequential accesses, we use the generic readahead logic. */ if (VM_SequentialReadHint(vma)) { if (!page) { page_cache_sync_readahead(mapping, ra, file, vmf->pgoff, 1); page = find_lock_page(mapping, vmf->pgoff); if (!page) goto no_cached_page; } if (PageReadahead(page)) { page_cache_async_readahead(mapping, ra, file, page, vmf->pgoff, 1); } } if (!page) { unsigned long ra_pages; ra->mmap_miss++; /* * Do we miss much more than hit in this file? If so, * stop bothering with read-ahead. It will only hurt. */ if (ra->mmap_miss > MMAP_LOTSAMISS) goto no_cached_page; /* * To keep the pgmajfault counter straight, we need to * check did_readaround, as this is an inner loop. */ if (!did_readaround) { ret = VM_FAULT_MAJOR; count_vm_event(PGMAJFAULT); } did_readaround = 1; ra_pages = max_sane_readahead(file->f_ra.ra_pages); if (ra_pages) { pgoff_t start = 0; if (vmf->pgoff > ra_pages / 2) start = vmf->pgoff - ra_pages / 2; do_page_cache_readahead(mapping, file, start, ra_pages); } page = find_lock_page(mapping, vmf->pgoff); if (!page) goto no_cached_page; } if (!did_readaround) ra->mmap_miss--; /* * We have a locked page in the page cache, now we need to check * that it's up-to-date. If not, it is going to be due to an error. */ if (unlikely(!PageUptodate(page))) goto page_not_uptodate; /* Must recheck i_size under page lock */ size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; if (unlikely(vmf->pgoff >= size)) { unlock_page(page); page_cache_release(page); return VM_FAULT_SIGBUS; } /* * Found the page and have a reference on it. */ mark_page_accessed(page); ra->prev_pos = (loff_t)page->index << PAGE_CACHE_SHIFT; vmf->page = page; return ret | VM_FAULT_LOCKED; no_cached_page: /* * We're only likely to ever get here if MADV_RANDOM is in * effect. */ error = page_cache_read(file, vmf->pgoff); /* * The page we want has now been added to the page cache. * In the unlikely event that someone removed it in the * meantime, we'll just come back here and read it again. */ if (error >= 0) goto retry_find; /* * An error return from page_cache_read can result if the * system is low on memory, or a problem occurs while trying * to schedule I/O. */ if (error == -ENOMEM) return VM_FAULT_OOM; return VM_FAULT_SIGBUS; page_not_uptodate: /* IO error path */ if (!did_readaround) { ret = VM_FAULT_MAJOR; count_vm_event(PGMAJFAULT); } /* * Umm, take care of errors if the page isn't up-to-date. * Try to re-read it _once_. We do this synchronously, * because there really aren't any performance issues here * and we need to check for errors. */ ClearPageError(page); error = mapping->a_ops->readpage(file, page); page_cache_release(page); if (!error || error == AOP_TRUNCATED_PAGE) goto retry_find; /* Things didn't work out. Return zero to tell the mm layer so. */ shrink_readahead_size_eio(file, ra); return VM_FAULT_SIGBUS; } EXPORT_SYMBOL(filemap_fault); struct vm_operations_struct generic_file_vm_ops = { .fault = filemap_fault, }; /* This is used for a general mmap of a disk file */ int generic_file_mmap(struct file * file, struct vm_area_struct * vma) { struct address_space *mapping = file->f_mapping; if (!mapping->a_ops->readpage) return -ENOEXEC; file_accessed(file); vma->vm_ops = &generic_file_vm_ops; vma->vm_flags |= VM_CAN_NONLINEAR; return 0; } /* * This is for filesystems which do not implement ->writepage. */ int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) { if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) return -EINVAL; return generic_file_mmap(file, vma); } #else int generic_file_mmap(struct file * file, struct vm_area_struct * vma) { return -ENOSYS; } int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma) { return -ENOSYS; } #endif /* CONFIG_MMU */ EXPORT_SYMBOL(generic_file_mmap); EXPORT_SYMBOL(generic_file_readonly_mmap); static struct page *__read_cache_page(struct address_space *mapping, pgoff_t index, int (*filler)(void *,struct page*), void *data) { struct page *page; int err; repeat: page = find_get_page(mapping, index); if (!page) { page = page_cache_alloc_cold(mapping); if (!page) return ERR_PTR(-ENOMEM); err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL); if (unlikely(err)) { page_cache_release(page); if (err == -EEXIST) goto repeat; /* Presumably ENOMEM for radix tree node */ return ERR_PTR(err); } err = filler(data, page); if (err < 0) { page_cache_release(page); page = ERR_PTR(err); } } return page; } /* * Same as read_cache_page, but don't wait for page to become unlocked * after submitting it to the filler. */ struct page *read_cache_page_async(struct address_space *mapping, pgoff_t index, int (*filler)(void *,struct page*), void *data) { struct page *page; int err; retry: page = __read_cache_page(mapping, index, filler, data); if (IS_ERR(page)) return page; if (PageUptodate(page)) goto out; lock_page(page); if (!page->mapping) { unlock_page(page); page_cache_release(page); goto retry; } if (PageUptodate(page)) { unlock_page(page); goto out; } err = filler(data, page); if (err < 0) { page_cache_release(page); return ERR_PTR(err); } out: mark_page_accessed(page); return page; } EXPORT_SYMBOL(read_cache_page_async); /** * read_cache_page - read into page cache, fill it if needed * @mapping: the page's address_space * @index: the page index * @filler: function to perform the read * @data: destination for read data * * Read into the page cache. If a page already exists, and PageUptodate() is * not set, try to fill the page then wait for it to become unlocked. * * If the page does not get brought uptodate, return -EIO. */ struct page *read_cache_page(struct address_space *mapping, pgoff_t index, int (*filler)(void *,struct page*), void *data) { struct page *page; page = read_cache_page_async(mapping, index, filler, data); if (IS_ERR(page)) goto out; wait_on_page_locked(page); if (!PageUptodate(page)) { page_cache_release(page); page = ERR_PTR(-EIO); } out: return page; } EXPORT_SYMBOL(read_cache_page); /* * The logic we want is * * if suid or (sgid and xgrp) * remove privs */ int should_remove_suid(struct dentry *dentry) { mode_t mode = dentry->d_inode->i_mode; int kill = 0; /* suid always must be killed */ if (unlikely(mode & S_ISUID)) kill = ATTR_KILL_SUID; /* * sgid without any exec bits is just a mandatory locking mark; leave * it alone. If some exec bits are set, it's a real sgid; kill it. */ if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) kill |= ATTR_KILL_SGID; if (unlikely(kill && !capable(CAP_FSETID))) return kill; return 0; } EXPORT_SYMBOL(should_remove_suid); int __remove_suid(struct dentry *dentry, int kill) { struct iattr newattrs; newattrs.ia_valid = ATTR_FORCE | kill; return notify_change(dentry, &newattrs); } int remove_suid(struct dentry *dentry) { int killsuid = should_remove_suid(dentry); int killpriv = security_inode_need_killpriv(dentry); int error = 0; if (killpriv < 0) return killpriv; if (killpriv) error = security_inode_killpriv(dentry); if (!error && killsuid) error = __remove_suid(dentry, killsuid); return error; } EXPORT_SYMBOL(remove_suid); static size_t __iovec_copy_from_user_inatomic(char *vaddr, const struct iovec *iov, size_t base, size_t bytes) { size_t copied = 0, left = 0; while (bytes) { char __user *buf = iov->iov_base + base; int copy = min(bytes, iov->iov_len - base); base = 0; left = __copy_from_user_inatomic_nocache(vaddr, buf, copy); copied += copy; bytes -= copy; vaddr += copy; iov++; if (unlikely(left)) break; } return copied - left; } /* * Copy as much as we can into the page and return the number of bytes which * were sucessfully copied. If a fault is encountered then return the number of * bytes which were copied. */ size_t iov_iter_copy_from_user_atomic(struct page *page, struct iov_iter *i, unsigned long offset, size_t bytes) { char *kaddr; size_t copied; BUG_ON(!in_atomic()); kaddr = kmap_atomic(page, KM_USER0); if (likely(i->nr_segs == 1)) { int left; char __user *buf = i->iov->iov_base + i->iov_offset; left = __copy_from_user_inatomic_nocache(kaddr + offset, buf, bytes); copied = bytes - left; } else { copied = __iovec_copy_from_user_inatomic(kaddr + offset, i->iov, i->iov_offset, bytes); } kunmap_atomic(kaddr, KM_USER0); return copied; } EXPORT_SYMBOL(iov_iter_copy_from_user_atomic); /* * This has the same sideeffects and return value as * iov_iter_copy_from_user_atomic(). * The difference is that it attempts to resolve faults. * Page must not be locked. */ size_t iov_iter_copy_from_user(struct page *page, struct iov_iter *i, unsigned long offset, size_t bytes) { char *kaddr; size_t copied; kaddr = kmap(page); if (likely(i->nr_segs == 1)) { int left; char __user *buf = i->iov->iov_base + i->iov_offset; left = __copy_from_user_nocache(kaddr + offset, buf, bytes); copied = bytes - left; } else { copied = __iovec_copy_from_user_inatomic(kaddr + offset, i->iov, i->iov_offset, bytes); } kunmap(page); return copied; } EXPORT_SYMBOL(iov_iter_copy_from_user); static void __iov_iter_advance_iov(struct iov_iter *i, size_t bytes) { if (likely(i->nr_segs == 1)) { i->iov_offset += bytes; } else { const struct iovec *iov = i->iov; size_t base = i->iov_offset; while (bytes) { int copy = min(bytes, iov->iov_len - base); bytes -= copy; base += copy; if (iov->iov_len == base) { iov++; base = 0; } } i->iov = iov; i->iov_offset = base; } } void iov_iter_advance(struct iov_iter *i, size_t bytes) { BUG_ON(i->count < bytes); __iov_iter_advance_iov(i, bytes); i->count -= bytes; } EXPORT_SYMBOL(iov_iter_advance); /* * Fault in the first iovec of the given iov_iter, to a maximum length * of bytes. Returns 0 on success, or non-zero if the memory could not be * accessed (ie. because it is an invalid address). * * writev-intensive code may want this to prefault several iovecs -- that * would be possible (callers must not rely on the fact that _only_ the * first iovec will be faulted with the current implementation). */ int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes) { char __user *buf = i->iov->iov_base + i->iov_offset; bytes = min(bytes, i->iov->iov_len - i->iov_offset); return fault_in_pages_readable(buf, bytes); } EXPORT_SYMBOL(iov_iter_fault_in_readable); /* * Return the count of just the current iov_iter segment. */ size_t iov_iter_single_seg_count(struct iov_iter *i) { const struct iovec *iov = i->iov; if (i->nr_segs == 1) return i->count; else return min(i->count, iov->iov_len - i->iov_offset); } EXPORT_SYMBOL(iov_iter_single_seg_count); /* * Performs necessary checks before doing a write * * Can adjust writing position or amount of bytes to write. * Returns appropriate error code that caller should return or * zero in case that write should be allowed. */ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk) { struct inode *inode = file->f_mapping->host; unsigned long limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; if (unlikely(*pos < 0)) return -EINVAL; if (!isblk) { /* FIXME: this is for backwards compatibility with 2.4 */ if (file->f_flags & O_APPEND) *pos = i_size_read(inode); if (limit != RLIM_INFINITY) { if (*pos >= limit) { send_sig(SIGXFSZ, current, 0); return -EFBIG; } if (*count > limit - (typeof(limit))*pos) { *count = limit - (typeof(limit))*pos; } } } /* * LFS rule */ if (unlikely(*pos + *count > MAX_NON_LFS && !(file->f_flags & O_LARGEFILE))) { if (*pos >= MAX_NON_LFS) { return -EFBIG; } if (*count > MAX_NON_LFS - (unsigned long)*pos) { *count = MAX_NON_LFS - (unsigned long)*pos; } } /* * Are we about to exceed the fs block limit ? * * If we have written data it becomes a short write. If we have * exceeded without writing data we send a signal and return EFBIG. * Linus frestrict idea will clean these up nicely.. */ if (likely(!isblk)) { if (unlikely(*pos >= inode->i_sb->s_maxbytes)) { if (*count || *pos > inode->i_sb->s_maxbytes) { return -EFBIG; } /* zero-length writes at ->s_maxbytes are OK */ } if (unlikely(*pos + *count > inode->i_sb->s_maxbytes)) *count = inode->i_sb->s_maxbytes - *pos; } else { #ifdef CONFIG_BLOCK loff_t isize; if (bdev_read_only(I_BDEV(inode))) return -EPERM; isize = i_size_read(inode); if (*pos >= isize) { if (*count || *pos > isize) return -ENOSPC; } if (*pos + *count > isize) *count = isize - *pos; #else return -EPERM; #endif } return 0; } EXPORT_SYMBOL(generic_write_checks); int pagecache_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { const struct address_space_operations *aops = mapping->a_ops; if (aops->write_begin) { return aops->write_begin(file, mapping, pos, len, flags, pagep, fsdata); } else { int ret; pgoff_t index = pos >> PAGE_CACHE_SHIFT; unsigned offset = pos & (PAGE_CACHE_SIZE - 1); struct inode *inode = mapping->host; struct page *page; again: page = __grab_cache_page(mapping, index); *pagep = page; if (!page) return -ENOMEM; if (flags & AOP_FLAG_UNINTERRUPTIBLE && !PageUptodate(page)) { /* * There is no way to resolve a short write situation * for a !Uptodate page (except by double copying in * the caller done by generic_perform_write_2copy). * * Instead, we have to bring it uptodate here. */ ret = aops->readpage(file, page); page_cache_release(page); if (ret) { if (ret == AOP_TRUNCATED_PAGE) goto again; return ret; } goto again; } ret = aops->prepare_write(file, page, offset, offset+len); if (ret) { unlock_page(page); page_cache_release(page); if (pos + len > inode->i_size) vmtruncate(inode, inode->i_size); } return ret; } } EXPORT_SYMBOL(pagecache_write_begin); int pagecache_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { const struct address_space_operations *aops = mapping->a_ops; int ret; if (aops->write_end) { mark_page_accessed(page); ret = aops->write_end(file, mapping, pos, len, copied, page, fsdata); } else { unsigned offset = pos & (PAGE_CACHE_SIZE - 1); struct inode *inode = mapping->host; flush_dcache_page(page); ret = aops->commit_write(file, page, offset, offset+len); unlock_page(page); mark_page_accessed(page); page_cache_release(page); if (ret < 0) { if (pos + len > inode->i_size) vmtruncate(inode, inode->i_size); } else if (ret > 0) ret = min_t(size_t, copied, ret); else ret = copied; } return ret; } EXPORT_SYMBOL(pagecache_write_end); ssize_t generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov, unsigned long *nr_segs, loff_t pos, loff_t *ppos, size_t count, size_t ocount) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; ssize_t written; if (count != ocount) *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count); written = generic_file_direct_IO(WRITE, iocb, iov, pos, *nr_segs); if (written > 0) { loff_t end = pos + written; if (end > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { i_size_write(inode, end); mark_inode_dirty(inode); } *ppos = end; } /* * Sync the fs metadata but not the minor inode changes and * of course not the data as we did direct DMA for the IO. * i_mutex is held, which protects generic_osync_inode() from * livelocking. AIO O_DIRECT ops attempt to sync metadata here. */ if ((written >= 0 || written == -EIOCBQUEUED) && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { int err = generic_osync_inode(inode, mapping, OSYNC_METADATA); if (err < 0) written = err; } return written; } EXPORT_SYMBOL(generic_file_direct_write); /* * Find or create a page at the given pagecache position. Return the locked * page. This function is specifically for buffered writes. */ struct page *__grab_cache_page(struct address_space *mapping, pgoff_t index) { int status; struct page *page; repeat: page = find_lock_page(mapping, index); if (likely(page)) return page; page = page_cache_alloc(mapping); if (!page) return NULL; status = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL); if (unlikely(status)) { page_cache_release(page); if (status == -EEXIST) goto repeat; return NULL; } return page; } EXPORT_SYMBOL(__grab_cache_page); static ssize_t generic_perform_write_2copy(struct file *file, struct iov_iter *i, loff_t pos) { struct address_space *mapping = file->f_mapping; const struct address_space_operations *a_ops = mapping->a_ops; struct inode *inode = mapping->host; long status = 0; ssize_t written = 0; do { struct page *src_page; struct page *page; pgoff_t index; /* Pagecache index for current page */ unsigned long offset; /* Offset into pagecache page */ unsigned long bytes; /* Bytes to write to page */ size_t copied; /* Bytes copied from user */ offset = (pos & (PAGE_CACHE_SIZE - 1)); index = pos >> PAGE_CACHE_SHIFT; bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, iov_iter_count(i)); /* * a non-NULL src_page indicates that we're doing the * copy via get_user_pages and kmap. */ src_page = NULL; /* * Bring in the user page that we will copy from _first_. * Otherwise there's a nasty deadlock on copying from the * same page as we're writing to, without it being marked * up-to-date. * * Not only is this an optimisation, but it is also required * to check that the address is actually valid, when atomic * usercopies are used, below. */ if (unlikely(iov_iter_fault_in_readable(i, bytes))) { status = -EFAULT; break; } page = __grab_cache_page(mapping, index); if (!page) { status = -ENOMEM; break; } /* * non-uptodate pages cannot cope with short copies, and we * cannot take a pagefault with the destination page locked. * So pin the source page to copy it. */ if (!PageUptodate(page) && !segment_eq(get_fs(), KERNEL_DS)) { unlock_page(page); src_page = alloc_page(GFP_KERNEL); if (!src_page) { page_cache_release(page); status = -ENOMEM; break; } /* * Cannot get_user_pages with a page locked for the * same reason as we can't take a page fault with a * page locked (as explained below). */ copied = iov_iter_copy_from_user(src_page, i, offset, bytes); if (unlikely(copied == 0)) { status = -EFAULT; page_cache_release(page); page_cache_release(src_page); break; } bytes = copied; lock_page(page); /* * Can't handle the page going uptodate here, because * that means we would use non-atomic usercopies, which * zero out the tail of the page, which can cause * zeroes to become transiently visible. We could just * use a non-zeroing copy, but the APIs aren't too * consistent. */ if (unlikely(!page->mapping || PageUptodate(page))) { unlock_page(page); page_cache_release(page); page_cache_release(src_page); continue; } } status = a_ops->prepare_write(file, page, offset, offset+bytes); if (unlikely(status)) goto fs_write_aop_error; if (!src_page) { /* * Must not enter the pagefault handler here, because * we hold the page lock, so we might recursively * deadlock on the same lock, or get an ABBA deadlock * against a different lock, or against the mmap_sem * (which nests outside the page lock). So increment * preempt count, and use _atomic usercopies. * * The page is uptodate so we are OK to encounter a * short copy: if unmodified parts of the page are * marked dirty and written out to disk, it doesn't * really matter. */ pagefault_disable(); copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); pagefault_enable(); } else { void *src, *dst; src = kmap_atomic(src_page, KM_USER0); dst = kmap_atomic(page, KM_USER1); memcpy(dst + offset, src + offset, bytes); kunmap_atomic(dst, KM_USER1); kunmap_atomic(src, KM_USER0); copied = bytes; } flush_dcache_page(page); status = a_ops->commit_write(file, page, offset, offset+bytes); if (unlikely(status < 0)) goto fs_write_aop_error; if (unlikely(status > 0)) /* filesystem did partial write */ copied = min_t(size_t, copied, status); unlock_page(page); mark_page_accessed(page); page_cache_release(page); if (src_page) page_cache_release(src_page); iov_iter_advance(i, copied); pos += copied; written += copied; balance_dirty_pages_ratelimited(mapping); cond_resched(); continue; fs_write_aop_error: unlock_page(page); page_cache_release(page); if (src_page) page_cache_release(src_page); /* * prepare_write() may have instantiated a few blocks * outside i_size. Trim these off again. Don't need * i_size_read because we hold i_mutex. */ if (pos + bytes > inode->i_size) vmtruncate(inode, inode->i_size); break; } while (iov_iter_count(i)); return written ? written : status; } static ssize_t generic_perform_write(struct file *file, struct iov_iter *i, loff_t pos) { struct address_space *mapping = file->f_mapping; const struct address_space_operations *a_ops = mapping->a_ops; long status = 0; ssize_t written = 0; unsigned int flags = 0; /* * Copies from kernel address space cannot fail (NFSD is a big user). */ if (segment_eq(get_fs(), KERNEL_DS)) flags |= AOP_FLAG_UNINTERRUPTIBLE; do { struct page *page; pgoff_t index; /* Pagecache index for current page */ unsigned long offset; /* Offset into pagecache page */ unsigned long bytes; /* Bytes to write to page */ size_t copied; /* Bytes copied from user */ void *fsdata; offset = (pos & (PAGE_CACHE_SIZE - 1)); index = pos >> PAGE_CACHE_SHIFT; bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, iov_iter_count(i)); again: /* * Bring in the user page that we will copy from _first_. * Otherwise there's a nasty deadlock on copying from the * same page as we're writing to, without it being marked * up-to-date. * * Not only is this an optimisation, but it is also required * to check that the address is actually valid, when atomic * usercopies are used, below. */ if (unlikely(iov_iter_fault_in_readable(i, bytes))) { status = -EFAULT; break; } status = a_ops->write_begin(file, mapping, pos, bytes, flags, &page, &fsdata); if (unlikely(status)) break; pagefault_disable(); copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); pagefault_enable(); flush_dcache_page(page); status = a_ops->write_end(file, mapping, pos, bytes, copied, page, fsdata); if (unlikely(status < 0)) break; copied = status; cond_resched(); if (unlikely(copied == 0)) { /* * If we were unable to copy any data at all, we must * fall back to a single segment length write. * * If we didn't fallback here, we could livelock * because not all segments in the iov can be copied at * once without a pagefault. */ bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, iov_iter_single_seg_count(i)); goto again; } iov_iter_advance(i, copied); pos += copied; written += copied; balance_dirty_pages_ratelimited(mapping); } while (iov_iter_count(i)); return written ? written : status; } ssize_t generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos, loff_t *ppos, size_t count, ssize_t written) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; const struct address_space_operations *a_ops = mapping->a_ops; struct inode *inode = mapping->host; ssize_t status; struct iov_iter i; iov_iter_init(&i, iov, nr_segs, count, written); if (a_ops->write_begin) status = generic_perform_write(file, &i, pos); else status = generic_perform_write_2copy(file, &i, pos); if (likely(status >= 0)) { written += status; *ppos = pos + status; /* * For now, when the user asks for O_SYNC, we'll actually give * O_DSYNC */ if (unlikely((file->f_flags & O_SYNC) || IS_SYNC(inode))) { if (!a_ops->writepage || !is_sync_kiocb(iocb)) status = generic_osync_inode(inode, mapping, OSYNC_METADATA|OSYNC_DATA); } } /* * If we get here for O_DIRECT writes then we must have fallen through * to buffered writes (block instantiation inside i_size). So we sync * the file data here, to try to honour O_DIRECT expectations. */ if (unlikely(file->f_flags & O_DIRECT) && written) status = filemap_write_and_wait(mapping); return written ? written : status; } EXPORT_SYMBOL(generic_file_buffered_write); static ssize_t __generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t *ppos) { struct file *file = iocb->ki_filp; struct address_space * mapping = file->f_mapping; size_t ocount; /* original count */ size_t count; /* after file limit checks */ struct inode *inode = mapping->host; loff_t pos; ssize_t written; ssize_t err; ocount = 0; err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ); if (err) return err; count = ocount; pos = *ppos; vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); /* We can write back this queue in page reclaim */ current->backing_dev_info = mapping->backing_dev_info; written = 0; err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); if (err) goto out; if (count == 0) goto out; err = remove_suid(file->f_path.dentry); if (err) goto out; file_update_time(file); /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ if (unlikely(file->f_flags & O_DIRECT)) { loff_t endbyte; ssize_t written_buffered; written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos, count, ocount); if (written < 0 || written == count) goto out; /* * direct-io write to a hole: fall through to buffered I/O * for completing the rest of the request. */ pos += written; count -= written; written_buffered = generic_file_buffered_write(iocb, iov, nr_segs, pos, ppos, count, written); /* * If generic_file_buffered_write() retuned a synchronous error * then we want to return the number of bytes which were * direct-written, or the error code if that was zero. Note * that this differs from normal direct-io semantics, which * will return -EFOO even if some bytes were written. */ if (written_buffered < 0) { err = written_buffered; goto out; } /* * We need to ensure that the page cache pages are written to * disk and invalidated to preserve the expected O_DIRECT * semantics. */ endbyte = pos + written_buffered - written - 1; err = do_sync_mapping_range(file->f_mapping, pos, endbyte, SYNC_FILE_RANGE_WAIT_BEFORE| SYNC_FILE_RANGE_WRITE| SYNC_FILE_RANGE_WAIT_AFTER); if (err == 0) { written = written_buffered; invalidate_mapping_pages(mapping, pos >> PAGE_CACHE_SHIFT, endbyte >> PAGE_CACHE_SHIFT); } else { /* * We don't know how much we wrote, so just return * the number of bytes which were direct-written */ } } else { written = generic_file_buffered_write(iocb, iov, nr_segs, pos, ppos, count, written); } out: current->backing_dev_info = NULL; return written ? written : err; } ssize_t generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; ssize_t ret; BUG_ON(iocb->ki_pos != pos); ret = __generic_file_aio_write_nolock(iocb, iov, nr_segs, &iocb->ki_pos); if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { ssize_t err; err = sync_page_range_nolock(inode, mapping, pos, ret); if (err < 0) ret = err; } return ret; } EXPORT_SYMBOL(generic_file_aio_write_nolock); ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; ssize_t ret; BUG_ON(iocb->ki_pos != pos); mutex_lock(&inode->i_mutex); ret = __generic_file_aio_write_nolock(iocb, iov, nr_segs, &iocb->ki_pos); mutex_unlock(&inode->i_mutex); if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { ssize_t err; err = sync_page_range(inode, mapping, pos, ret); if (err < 0) ret = err; } return ret; } EXPORT_SYMBOL(generic_file_aio_write); /* * Called under i_mutex for writes to S_ISREG files. Returns -EIO if something * went wrong during pagecache shootdown. */ static ssize_t generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; ssize_t retval; size_t write_len; pgoff_t end = 0; /* silence gcc */ /* * If it's a write, unmap all mmappings of the file up-front. This * will cause any pte dirty bits to be propagated into the pageframes * for the subsequent filemap_write_and_wait(). */ if (rw == WRITE) { write_len = iov_length(iov, nr_segs); end = (offset + write_len - 1) >> PAGE_CACHE_SHIFT; if (mapping_mapped(mapping)) unmap_mapping_range(mapping, offset, write_len, 0); } retval = filemap_write_and_wait(mapping); if (retval) goto out; /* * After a write we want buffered reads to be sure to go to disk to get * the new data. We invalidate clean cached page from the region we're * about to write. We do this *before* the write so that we can return * -EIO without clobbering -EIOCBQUEUED from ->direct_IO(). */ if (rw == WRITE && mapping->nrpages) { retval = invalidate_inode_pages2_range(mapping, offset >> PAGE_CACHE_SHIFT, end); if (retval) goto out; } retval = mapping->a_ops->direct_IO(rw, iocb, iov, offset, nr_segs); /* * Finally, try again to invalidate clean pages which might have been * cached by non-direct readahead, or faulted in by get_user_pages() * if the source of the write was an mmap'ed region of the file * we're writing. Either one is a pretty crazy thing to do, * so we don't support it 100%. If this invalidation * fails, tough, the write still worked... */ if (rw == WRITE && mapping->nrpages) { invalidate_inode_pages2_range(mapping, offset >> PAGE_CACHE_SHIFT, end); } out: return retval; } /** * try_to_release_page() - release old fs-specific metadata on a page * * @page: the page which the kernel is trying to free * @gfp_mask: memory allocation flags (and I/O mode) * * The address_space is to try to release any data against the page * (presumably at page->private). If the release was successful, return `1'. * Otherwise return zero. * * The @gfp_mask argument specifies whether I/O may be performed to release * this page (__GFP_IO), and whether the call may block (__GFP_WAIT). * * NOTE: @gfp_mask may go away, and this function may become non-blocking. */ int try_to_release_page(struct page *page, gfp_t gfp_mask) { struct address_space * const mapping = page->mapping; BUG_ON(!PageLocked(page)); if (PageWriteback(page)) return 0; if (mapping && mapping->a_ops->releasepage) return mapping->a_ops->releasepage(page, gfp_mask); return try_to_free_buffers(page); } EXPORT_SYMBOL(try_to_release_page);
./CrossVul/dataset_final_sorted/CWE-20/c/bad_5876_0
crossvul-cpp_data_bad_721_2
/* Copyright (C) 2007-2014 Open Information Security Foundation * * You can copy, redistribute or modify this Program under the terms of * the GNU General Public License version 2 as published by the Free * Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * version 2 along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ /** * \defgroup decode Packet decoding * * \brief Code in charge of protocol decoding * * The task of decoding packets is made in different files and * as Suricata is supporting encapsulation there is a potential * recursivity in the call. * * For each protocol a DecodePROTO function is provided. For * example we have DecodeIPV4() for IPv4 and DecodePPP() for * PPP. * * These functions have all a pkt and and a len argument which * are respectively a pointer to the protocol data and the length * of this protocol data. * * \attention The pkt parameter must point to the effective data because * it will be used later to set per protocol pointer like Packet::tcph * * @{ */ /** * \file * * \author Victor Julien <victor@inliniac.net> * * Decode the raw packet */ #include "suricata-common.h" #include "suricata.h" #include "conf.h" #include "decode.h" #include "decode-teredo.h" #include "util-debug.h" #include "util-mem.h" #include "app-layer-detect-proto.h" #include "app-layer.h" #include "tm-threads.h" #include "util-error.h" #include "util-print.h" #include "tmqh-packetpool.h" #include "util-profiling.h" #include "pkt-var.h" #include "util-mpm-ac.h" #include "output.h" #include "output-flow.h" extern bool stats_decoder_events; extern bool stats_stream_events; int DecodeTunnel(ThreadVars *tv, DecodeThreadVars *dtv, Packet *p, uint8_t *pkt, uint32_t len, PacketQueue *pq, enum DecodeTunnelProto proto) { switch (proto) { case DECODE_TUNNEL_PPP: return DecodePPP(tv, dtv, p, pkt, len, pq); case DECODE_TUNNEL_IPV4: return DecodeIPV4(tv, dtv, p, pkt, len, pq); case DECODE_TUNNEL_IPV6: return DecodeIPV6(tv, dtv, p, pkt, len, pq); case DECODE_TUNNEL_VLAN: return DecodeVLAN(tv, dtv, p, pkt, len, pq); case DECODE_TUNNEL_ETHERNET: return DecodeEthernet(tv, dtv, p, pkt, len, pq); case DECODE_TUNNEL_ERSPAN: return DecodeERSPAN(tv, dtv, p, pkt, len, pq); default: SCLogInfo("FIXME: DecodeTunnel: protocol %" PRIu32 " not supported.", proto); break; } return TM_ECODE_OK; } /** * \brief Return a malloced packet. */ void PacketFree(Packet *p) { PACKET_DESTRUCTOR(p); SCFree(p); } /** * \brief Finalize decoding of a packet * * This function needs to be call at the end of decode * functions when decoding has been succesful. * */ void PacketDecodeFinalize(ThreadVars *tv, DecodeThreadVars *dtv, Packet *p) { if (p->flags & PKT_IS_INVALID) { StatsIncr(tv, dtv->counter_invalid); } } void PacketUpdateEngineEventCounters(ThreadVars *tv, DecodeThreadVars *dtv, Packet *p) { for (uint8_t i = 0; i < p->events.cnt; i++) { const uint8_t e = p->events.events[i]; if (e <= DECODE_EVENT_PACKET_MAX && !stats_decoder_events) continue; if (e > DECODE_EVENT_PACKET_MAX && !stats_stream_events) continue; StatsIncr(tv, dtv->counter_engine_events[e]); } } /** * \brief Get a malloced packet. * * \retval p packet, NULL on error */ Packet *PacketGetFromAlloc(void) { Packet *p = SCMalloc(SIZE_OF_PACKET); if (unlikely(p == NULL)) { return NULL; } memset(p, 0, SIZE_OF_PACKET); PACKET_INITIALIZE(p); p->ReleasePacket = PacketFree; p->flags |= PKT_ALLOC; SCLogDebug("allocated a new packet only using alloc..."); PACKET_PROFILING_START(p); return p; } /** * \brief Return a packet to where it was allocated. */ void PacketFreeOrRelease(Packet *p) { if (p->flags & PKT_ALLOC) PacketFree(p); else PacketPoolReturnPacket(p); } /** * \brief Get a packet. We try to get a packet from the packetpool first, but * if that is empty we alloc a packet that is free'd again after * processing. * * \retval p packet, NULL on error */ Packet *PacketGetFromQueueOrAlloc(void) { /* try the pool first */ Packet *p = PacketPoolGetPacket(); if (p == NULL) { /* non fatal, we're just not processing a packet then */ p = PacketGetFromAlloc(); } else { PACKET_PROFILING_START(p); } return p; } inline int PacketCallocExtPkt(Packet *p, int datalen) { if (! p->ext_pkt) { p->ext_pkt = SCCalloc(1, datalen); if (unlikely(p->ext_pkt == NULL)) { SET_PKT_LEN(p, 0); return -1; } } return 0; } /** * \brief Copy data to Packet payload at given offset * * This function copies data/payload to a Packet. It uses the * space allocated at Packet creation (pointed by Packet::pkt) * or allocate some memory (pointed by Packet::ext_pkt) if the * data size is to big to fit in initial space (of size * default_packet_size). * * \param Pointer to the Packet to modify * \param Offset of the copy relatively to payload of Packet * \param Pointer to the data to copy * \param Length of the data to copy */ inline int PacketCopyDataOffset(Packet *p, uint32_t offset, uint8_t *data, uint32_t datalen) { if (unlikely(offset + datalen > MAX_PAYLOAD_SIZE)) { /* too big */ return -1; } /* Do we have already an packet with allocated data */ if (! p->ext_pkt) { uint32_t newsize = offset + datalen; // check overflow if (newsize < offset) return -1; if (newsize <= default_packet_size) { /* data will fit in memory allocated with packet */ memcpy(GET_PKT_DIRECT_DATA(p) + offset, data, datalen); } else { /* here we need a dynamic allocation */ p->ext_pkt = SCMalloc(MAX_PAYLOAD_SIZE); if (unlikely(p->ext_pkt == NULL)) { SET_PKT_LEN(p, 0); return -1; } /* copy initial data */ memcpy(p->ext_pkt, GET_PKT_DIRECT_DATA(p), GET_PKT_DIRECT_MAX_SIZE(p)); /* copy data as asked */ memcpy(p->ext_pkt + offset, data, datalen); } } else { memcpy(p->ext_pkt + offset, data, datalen); } return 0; } /** * \brief Copy data to Packet payload and set packet length * * \param Pointer to the Packet to modify * \param Pointer to the data to copy * \param Length of the data to copy */ inline int PacketCopyData(Packet *p, uint8_t *pktdata, uint32_t pktlen) { SET_PKT_LEN(p, (size_t)pktlen); return PacketCopyDataOffset(p, 0, pktdata, pktlen); } /** * \brief Setup a pseudo packet (tunnel) * * \param parent parent packet for this pseudo pkt * \param pkt raw packet data * \param len packet data length * \param proto protocol of the tunneled packet * * \retval p the pseudo packet or NULL if out of memory */ Packet *PacketTunnelPktSetup(ThreadVars *tv, DecodeThreadVars *dtv, Packet *parent, uint8_t *pkt, uint32_t len, enum DecodeTunnelProto proto, PacketQueue *pq) { int ret; SCEnter(); /* get us a packet */ Packet *p = PacketGetFromQueueOrAlloc(); if (unlikely(p == NULL)) { SCReturnPtr(NULL, "Packet"); } /* copy packet and set lenght, proto */ PacketCopyData(p, pkt, len); p->recursion_level = parent->recursion_level + 1; p->ts.tv_sec = parent->ts.tv_sec; p->ts.tv_usec = parent->ts.tv_usec; p->datalink = DLT_RAW; p->tenant_id = parent->tenant_id; /* set the root ptr to the lowest layer */ if (parent->root != NULL) p->root = parent->root; else p->root = parent; /* tell new packet it's part of a tunnel */ SET_TUNNEL_PKT(p); ret = DecodeTunnel(tv, dtv, p, GET_PKT_DATA(p), GET_PKT_LEN(p), pq, proto); if (unlikely(ret != TM_ECODE_OK)) { /* Not a tunnel packet, just a pseudo packet */ p->root = NULL; UNSET_TUNNEL_PKT(p); TmqhOutputPacketpool(tv, p); SCReturnPtr(NULL, "Packet"); } /* tell parent packet it's part of a tunnel */ SET_TUNNEL_PKT(parent); /* increment tunnel packet refcnt in the root packet */ TUNNEL_INCR_PKT_TPR(p); /* disable payload (not packet) inspection on the parent, as the payload * is the packet we will now run through the system separately. We do * check it against the ip/port/other header checks though */ DecodeSetNoPayloadInspectionFlag(parent); SCReturnPtr(p, "Packet"); } /** * \brief Setup a pseudo packet (reassembled frags) * * Difference with PacketPseudoPktSetup is that this func doesn't increment * the recursion level. It needs to be on the same level as the frags because * we run the flow engine against this and we need to get the same flow. * * \param parent parent packet for this pseudo pkt * \param pkt raw packet data * \param len packet data length * \param proto protocol of the tunneled packet * * \retval p the pseudo packet or NULL if out of memory */ Packet *PacketDefragPktSetup(Packet *parent, uint8_t *pkt, uint32_t len, uint8_t proto) { SCEnter(); /* get us a packet */ Packet *p = PacketGetFromQueueOrAlloc(); if (unlikely(p == NULL)) { SCReturnPtr(NULL, "Packet"); } /* set the root ptr to the lowest layer */ if (parent->root != NULL) p->root = parent->root; else p->root = parent; /* copy packet and set lenght, proto */ if (pkt && len) { PacketCopyData(p, pkt, len); } p->recursion_level = parent->recursion_level; /* NOT incremented */ p->ts.tv_sec = parent->ts.tv_sec; p->ts.tv_usec = parent->ts.tv_usec; p->datalink = DLT_RAW; p->tenant_id = parent->tenant_id; /* tell new packet it's part of a tunnel */ SET_TUNNEL_PKT(p); p->vlan_id[0] = parent->vlan_id[0]; p->vlan_id[1] = parent->vlan_id[1]; p->vlan_idx = parent->vlan_idx; SCReturnPtr(p, "Packet"); } /** * \brief inform defrag "parent" that a pseudo packet is * now assosiated to it. */ void PacketDefragPktSetupParent(Packet *parent) { /* tell parent packet it's part of a tunnel */ SET_TUNNEL_PKT(parent); /* increment tunnel packet refcnt in the root packet */ TUNNEL_INCR_PKT_TPR(parent); /* disable payload (not packet) inspection on the parent, as the payload * is the packet we will now run through the system separately. We do * check it against the ip/port/other header checks though */ DecodeSetNoPayloadInspectionFlag(parent); } void PacketBypassCallback(Packet *p) { /* Don't try to bypass if flow is already out or * if we have failed to do it once */ int state = SC_ATOMIC_GET(p->flow->flow_state); if ((state == FLOW_STATE_LOCAL_BYPASSED) || (state == FLOW_STATE_CAPTURE_BYPASSED)) { return; } if (p->BypassPacketsFlow && p->BypassPacketsFlow(p)) { FlowUpdateState(p->flow, FLOW_STATE_CAPTURE_BYPASSED); } else { FlowUpdateState(p->flow, FLOW_STATE_LOCAL_BYPASSED); } } void DecodeRegisterPerfCounters(DecodeThreadVars *dtv, ThreadVars *tv) { /* register counters */ dtv->counter_pkts = StatsRegisterCounter("decoder.pkts", tv); dtv->counter_bytes = StatsRegisterCounter("decoder.bytes", tv); dtv->counter_invalid = StatsRegisterCounter("decoder.invalid", tv); dtv->counter_ipv4 = StatsRegisterCounter("decoder.ipv4", tv); dtv->counter_ipv6 = StatsRegisterCounter("decoder.ipv6", tv); dtv->counter_eth = StatsRegisterCounter("decoder.ethernet", tv); dtv->counter_raw = StatsRegisterCounter("decoder.raw", tv); dtv->counter_null = StatsRegisterCounter("decoder.null", tv); dtv->counter_sll = StatsRegisterCounter("decoder.sll", tv); dtv->counter_tcp = StatsRegisterCounter("decoder.tcp", tv); dtv->counter_udp = StatsRegisterCounter("decoder.udp", tv); dtv->counter_sctp = StatsRegisterCounter("decoder.sctp", tv); dtv->counter_icmpv4 = StatsRegisterCounter("decoder.icmpv4", tv); dtv->counter_icmpv6 = StatsRegisterCounter("decoder.icmpv6", tv); dtv->counter_ppp = StatsRegisterCounter("decoder.ppp", tv); dtv->counter_pppoe = StatsRegisterCounter("decoder.pppoe", tv); dtv->counter_gre = StatsRegisterCounter("decoder.gre", tv); dtv->counter_vlan = StatsRegisterCounter("decoder.vlan", tv); dtv->counter_vlan_qinq = StatsRegisterCounter("decoder.vlan_qinq", tv); dtv->counter_ieee8021ah = StatsRegisterCounter("decoder.ieee8021ah", tv); dtv->counter_teredo = StatsRegisterCounter("decoder.teredo", tv); dtv->counter_ipv4inipv6 = StatsRegisterCounter("decoder.ipv4_in_ipv6", tv); dtv->counter_ipv6inipv6 = StatsRegisterCounter("decoder.ipv6_in_ipv6", tv); dtv->counter_mpls = StatsRegisterCounter("decoder.mpls", tv); dtv->counter_avg_pkt_size = StatsRegisterAvgCounter("decoder.avg_pkt_size", tv); dtv->counter_max_pkt_size = StatsRegisterMaxCounter("decoder.max_pkt_size", tv); dtv->counter_erspan = StatsRegisterMaxCounter("decoder.erspan", tv); dtv->counter_flow_memcap = StatsRegisterCounter("flow.memcap", tv); dtv->counter_flow_tcp = StatsRegisterCounter("flow.tcp", tv); dtv->counter_flow_udp = StatsRegisterCounter("flow.udp", tv); dtv->counter_flow_icmp4 = StatsRegisterCounter("flow.icmpv4", tv); dtv->counter_flow_icmp6 = StatsRegisterCounter("flow.icmpv6", tv); dtv->counter_defrag_ipv4_fragments = StatsRegisterCounter("defrag.ipv4.fragments", tv); dtv->counter_defrag_ipv4_reassembled = StatsRegisterCounter("defrag.ipv4.reassembled", tv); dtv->counter_defrag_ipv4_timeouts = StatsRegisterCounter("defrag.ipv4.timeouts", tv); dtv->counter_defrag_ipv6_fragments = StatsRegisterCounter("defrag.ipv6.fragments", tv); dtv->counter_defrag_ipv6_reassembled = StatsRegisterCounter("defrag.ipv6.reassembled", tv); dtv->counter_defrag_ipv6_timeouts = StatsRegisterCounter("defrag.ipv6.timeouts", tv); dtv->counter_defrag_max_hit = StatsRegisterCounter("defrag.max_frag_hits", tv); for (int i = 0; i < DECODE_EVENT_MAX; i++) { BUG_ON(i != (int)DEvents[i].code); if (i <= DECODE_EVENT_PACKET_MAX && !stats_decoder_events) continue; if (i > DECODE_EVENT_PACKET_MAX && !stats_stream_events) continue; dtv->counter_engine_events[i] = StatsRegisterCounter( DEvents[i].event_name, tv); } return; } void DecodeUpdatePacketCounters(ThreadVars *tv, const DecodeThreadVars *dtv, const Packet *p) { StatsIncr(tv, dtv->counter_pkts); //StatsIncr(tv, dtv->counter_pkts_per_sec); StatsAddUI64(tv, dtv->counter_bytes, GET_PKT_LEN(p)); StatsAddUI64(tv, dtv->counter_avg_pkt_size, GET_PKT_LEN(p)); StatsSetUI64(tv, dtv->counter_max_pkt_size, GET_PKT_LEN(p)); } /** * \brief Debug print function for printing addresses * * \param Address object * * \todo IPv6 */ void AddressDebugPrint(Address *a) { if (a == NULL) return; switch (a->family) { case AF_INET: { char s[16]; PrintInet(AF_INET, (const void *)&a->addr_data32[0], s, sizeof(s)); SCLogDebug("%s", s); break; } } } /** \brief Alloc and setup DecodeThreadVars */ DecodeThreadVars *DecodeThreadVarsAlloc(ThreadVars *tv) { DecodeThreadVars *dtv = NULL; if ( (dtv = SCMalloc(sizeof(DecodeThreadVars))) == NULL) return NULL; memset(dtv, 0, sizeof(DecodeThreadVars)); dtv->app_tctx = AppLayerGetCtxThread(tv); if (OutputFlowLogThreadInit(tv, NULL, &dtv->output_flow_thread_data) != TM_ECODE_OK) { SCLogError(SC_ERR_THREAD_INIT, "initializing flow log API for thread failed"); DecodeThreadVarsFree(tv, dtv); return NULL; } /** set config defaults */ int vlanbool = 0; if ((ConfGetBool("vlan.use-for-tracking", &vlanbool)) == 1 && vlanbool == 0) { dtv->vlan_disabled = 1; } SCLogDebug("vlan tracking is %s", dtv->vlan_disabled == 0 ? "enabled" : "disabled"); return dtv; } void DecodeThreadVarsFree(ThreadVars *tv, DecodeThreadVars *dtv) { if (dtv != NULL) { if (dtv->app_tctx != NULL) AppLayerDestroyCtxThread(dtv->app_tctx); if (dtv->output_flow_thread_data != NULL) OutputFlowLogThreadDeinit(tv, dtv->output_flow_thread_data); SCFree(dtv); } } /** * \brief Set data for Packet and set length when zeo copy is used * * \param Pointer to the Packet to modify * \param Pointer to the data * \param Length of the data */ inline int PacketSetData(Packet *p, uint8_t *pktdata, uint32_t pktlen) { SET_PKT_LEN(p, (size_t)pktlen); if (unlikely(!pktdata)) { return -1; } p->ext_pkt = pktdata; p->flags |= PKT_ZERO_COPY; return 0; } const char *PktSrcToString(enum PktSrcEnum pkt_src) { const char *pkt_src_str = "<unknown>"; switch (pkt_src) { case PKT_SRC_WIRE: pkt_src_str = "wire/pcap"; break; case PKT_SRC_DECODER_GRE: pkt_src_str = "gre tunnel"; break; case PKT_SRC_DECODER_IPV4: pkt_src_str = "ipv4 tunnel"; break; case PKT_SRC_DECODER_IPV6: pkt_src_str = "ipv6 tunnel"; break; case PKT_SRC_DECODER_TEREDO: pkt_src_str = "teredo tunnel"; break; case PKT_SRC_DEFRAG: pkt_src_str = "defrag"; break; case PKT_SRC_STREAM_TCP_STREAM_END_PSEUDO: pkt_src_str = "stream"; break; case PKT_SRC_STREAM_TCP_DETECTLOG_FLUSH: pkt_src_str = "stream (detect/log)"; break; case PKT_SRC_FFR: pkt_src_str = "stream (flow timeout)"; break; } return pkt_src_str; } void CaptureStatsUpdate(ThreadVars *tv, CaptureStats *s, const Packet *p) { if (unlikely(PACKET_TEST_ACTION(p, (ACTION_REJECT|ACTION_REJECT_DST|ACTION_REJECT_BOTH)))) { StatsIncr(tv, s->counter_ips_rejected); } else if (unlikely(PACKET_TEST_ACTION(p, ACTION_DROP))) { StatsIncr(tv, s->counter_ips_blocked); } else if (unlikely(p->flags & PKT_STREAM_MODIFIED)) { StatsIncr(tv, s->counter_ips_replaced); } else { StatsIncr(tv, s->counter_ips_accepted); } } void CaptureStatsSetup(ThreadVars *tv, CaptureStats *s) { s->counter_ips_accepted = StatsRegisterCounter("ips.accepted", tv); s->counter_ips_blocked = StatsRegisterCounter("ips.blocked", tv); s->counter_ips_rejected = StatsRegisterCounter("ips.rejected", tv); s->counter_ips_replaced = StatsRegisterCounter("ips.replaced", tv); } void DecodeGlobalConfig(void) { DecodeTeredoConfig(); } /** * @} */
./CrossVul/dataset_final_sorted/CWE-20/c/bad_721_2
crossvul-cpp_data_bad_364_3
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP W W PPPP % % P P W W P P % % PPPP W W PPPP % % P W W W P % % P W W P % % % % % % Read Seattle Film Works Image Format % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/constitute.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/resource_.h" #include "MagickCore/quantum-private.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/module.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P W P % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPWP() returns MagickTrue if the image format type, identified by the % magick string, is PWP. % % The format of the IsPWP method is: % % MagickBooleanType IsPWP(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % % */ static MagickBooleanType IsPWP(const unsigned char *magick,const size_t length) { if (length < 5) return(MagickFalse); if (LocaleNCompare((char *) magick,"SFW95",5) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d P W P I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPWPImage() reads a Seattle Film Works multi-image file and returns % it. It allocates the memory necessary for the new Image structure and % returns a pointer to the new image. % % The format of the ReadPWPImage method is: % % Image *ReadPWPImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadPWPImage(const ImageInfo *image_info,ExceptionInfo *exception) { char filename[MagickPathExtent]; FILE *file; Image *image, *next_image, *pwp_image; ImageInfo *read_info; int c, unique_file; MagickBooleanType status; register Image *p; register ssize_t i; size_t filesize, length; ssize_t count; unsigned char magick[MagickPathExtent]; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImage(image); return((Image *) NULL); } pwp_image=image; memset(magick,0,sizeof(magick)); count=ReadBlob(pwp_image,5,magick); if ((count != 5) || (LocaleNCompare((char *) magick,"SFW95",5) != 0)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); read_info=CloneImageInfo(image_info); (void) SetImageInfoProgressMonitor(read_info,(MagickProgressMonitor) NULL, (void *) NULL); SetImageInfoBlob(read_info,(void *) NULL,0); unique_file=AcquireUniqueFileResource(filename); (void) FormatLocaleString(read_info->filename,MagickPathExtent,"sfw:%s", filename); for ( ; ; ) { (void) memset(magick,0,sizeof(magick)); for (c=ReadBlobByte(pwp_image); c != EOF; c=ReadBlobByte(pwp_image)) { for (i=0; i < 17; i++) magick[i]=magick[i+1]; magick[17]=(unsigned char) c; if (LocaleNCompare((char *) (magick+12),"SFW94A",6) == 0) break; } if (c == EOF) { (void) RelinquishUniqueFileResource(filename); read_info=DestroyImageInfo(read_info); ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); } if (LocaleNCompare((char *) (magick+12),"SFW94A",6) != 0) { (void) RelinquishUniqueFileResource(filename); read_info=DestroyImageInfo(read_info); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } /* Dump SFW image to a temporary file. */ file=(FILE *) NULL; if (unique_file != -1) file=fdopen(unique_file,"wb"); if ((unique_file == -1) || (file == (FILE *) NULL)) { (void) RelinquishUniqueFileResource(filename); read_info=DestroyImageInfo(read_info); ThrowFileException(exception,FileOpenError,"UnableToWriteFile", image->filename); image=DestroyImageList(image); return((Image *) NULL); } length=fwrite("SFW94A",1,6,file); (void) length; filesize=65535UL*magick[2]+256L*magick[1]+magick[0]; for (i=0; i < (ssize_t) filesize; i++) { c=ReadBlobByte(pwp_image); if (c == EOF) break; (void) fputc(c,file); } (void) fclose(file); if (c == EOF) { (void) RelinquishUniqueFileResource(filename); read_info=DestroyImageInfo(read_info); ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); } next_image=ReadImage(read_info,exception); if (next_image == (Image *) NULL) break; (void) FormatLocaleString(next_image->filename,MagickPathExtent, "slide_%02ld.sfw",(long) next_image->scene); if (image == (Image *) NULL) image=next_image; else { /* Link image into image list. */ for (p=image; p->next != (Image *) NULL; p=GetNextImageInList(p)) ; next_image->previous=p; next_image->scene=p->scene+1; p->next=next_image; } if (image_info->number_scenes != 0) if (next_image->scene >= (image_info->scene+image_info->number_scenes-1)) break; status=SetImageProgress(image,LoadImagesTag,TellBlob(pwp_image), GetBlobSize(pwp_image)); if (status == MagickFalse) break; } if (unique_file != -1) (void) close(unique_file); (void) RelinquishUniqueFileResource(filename); read_info=DestroyImageInfo(read_info); if (image != (Image *) NULL) { if (EOFBlob(image) != MagickFalse) { char *message; message=GetExceptionMessage(errno); (void) ThrowMagickException(exception,GetMagickModule(), CorruptImageError,"UnexpectedEndOfFile","`%s': %s",image->filename, message); message=DestroyString(message); } (void) CloseBlob(image); } return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r P W P I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterPWPImage() adds attributes for the PWP image format to % the list of supported formats. The attributes include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterPWPImage method is: % % size_t RegisterPWPImage(void) % */ ModuleExport size_t RegisterPWPImage(void) { MagickInfo *entry; entry=AcquireMagickInfo("PWP","PWP","Seattle Film Works"); entry->decoder=(DecodeImageHandler *) ReadPWPImage; entry->magick=(IsImageFormatHandler *) IsPWP; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r P W P I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterPWPImage() removes format registrations made by the % PWP module from the list of supported formats. % % The format of the UnregisterPWPImage method is: % % UnregisterPWPImage(void) % */ ModuleExport void UnregisterPWPImage(void) { (void) UnregisterMagickInfo("PWP"); }
./CrossVul/dataset_final_sorted/CWE-20/c/bad_364_3
crossvul-cpp_data_bad_364_1
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD CCCC M M % % D D C MM MM % % D D C M M M % % D D C M M % % DDDD CCCC M M % % % % % % Read DICOM Image Format % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/constitute.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/resource_.h" #include "MagickCore/quantum-private.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/module.h" /* Dicom medical image declarations. */ typedef struct _DicomInfo { const unsigned short group, element; const char *vr, *description; } DicomInfo; static const DicomInfo dicom_info[] = { { 0x0000, 0x0000, "UL", "Group Length" }, { 0x0000, 0x0001, "UL", "Command Length to End" }, { 0x0000, 0x0002, "UI", "Affected SOP Class UID" }, { 0x0000, 0x0003, "UI", "Requested SOP Class UID" }, { 0x0000, 0x0010, "LO", "Command Recognition Code" }, { 0x0000, 0x0100, "US", "Command Field" }, { 0x0000, 0x0110, "US", "Message ID" }, { 0x0000, 0x0120, "US", "Message ID Being Responded To" }, { 0x0000, 0x0200, "AE", "Initiator" }, { 0x0000, 0x0300, "AE", "Receiver" }, { 0x0000, 0x0400, "AE", "Find Location" }, { 0x0000, 0x0600, "AE", "Move Destination" }, { 0x0000, 0x0700, "US", "Priority" }, { 0x0000, 0x0800, "US", "Data Set Type" }, { 0x0000, 0x0850, "US", "Number of Matches" }, { 0x0000, 0x0860, "US", "Response Sequence Number" }, { 0x0000, 0x0900, "US", "Status" }, { 0x0000, 0x0901, "AT", "Offending Element" }, { 0x0000, 0x0902, "LO", "Exception Comment" }, { 0x0000, 0x0903, "US", "Exception ID" }, { 0x0000, 0x1000, "UI", "Affected SOP Instance UID" }, { 0x0000, 0x1001, "UI", "Requested SOP Instance UID" }, { 0x0000, 0x1002, "US", "Event Type ID" }, { 0x0000, 0x1005, "AT", "Attribute Identifier List" }, { 0x0000, 0x1008, "US", "Action Type ID" }, { 0x0000, 0x1020, "US", "Number of Remaining Suboperations" }, { 0x0000, 0x1021, "US", "Number of Completed Suboperations" }, { 0x0000, 0x1022, "US", "Number of Failed Suboperations" }, { 0x0000, 0x1023, "US", "Number of Warning Suboperations" }, { 0x0000, 0x1030, "AE", "Move Originator Application Entity Title" }, { 0x0000, 0x1031, "US", "Move Originator Message ID" }, { 0x0000, 0x4000, "LO", "Dialog Receiver" }, { 0x0000, 0x4010, "LO", "Terminal Type" }, { 0x0000, 0x5010, "SH", "Message Set ID" }, { 0x0000, 0x5020, "SH", "End Message Set" }, { 0x0000, 0x5110, "LO", "Display Format" }, { 0x0000, 0x5120, "LO", "Page Position ID" }, { 0x0000, 0x5130, "LO", "Text Format ID" }, { 0x0000, 0x5140, "LO", "Normal Reverse" }, { 0x0000, 0x5150, "LO", "Add Gray Scale" }, { 0x0000, 0x5160, "LO", "Borders" }, { 0x0000, 0x5170, "IS", "Copies" }, { 0x0000, 0x5180, "LO", "OldMagnificationType" }, { 0x0000, 0x5190, "LO", "Erase" }, { 0x0000, 0x51a0, "LO", "Print" }, { 0x0000, 0x51b0, "US", "Overlays" }, { 0x0002, 0x0000, "UL", "Meta Element Group Length" }, { 0x0002, 0x0001, "OB", "File Meta Information Version" }, { 0x0002, 0x0002, "UI", "Media Storage SOP Class UID" }, { 0x0002, 0x0003, "UI", "Media Storage SOP Instance UID" }, { 0x0002, 0x0010, "UI", "Transfer Syntax UID" }, { 0x0002, 0x0012, "UI", "Implementation Class UID" }, { 0x0002, 0x0013, "SH", "Implementation Version Name" }, { 0x0002, 0x0016, "AE", "Source Application Entity Title" }, { 0x0002, 0x0100, "UI", "Private Information Creator UID" }, { 0x0002, 0x0102, "OB", "Private Information" }, { 0x0003, 0x0000, "US", "?" }, { 0x0003, 0x0008, "US", "ISI Command Field" }, { 0x0003, 0x0011, "US", "Attach ID Application Code" }, { 0x0003, 0x0012, "UL", "Attach ID Message Count" }, { 0x0003, 0x0013, "DA", "Attach ID Date" }, { 0x0003, 0x0014, "TM", "Attach ID Time" }, { 0x0003, 0x0020, "US", "Message Type" }, { 0x0003, 0x0030, "DA", "Max Waiting Date" }, { 0x0003, 0x0031, "TM", "Max Waiting Time" }, { 0x0004, 0x0000, "UL", "File Set Group Length" }, { 0x0004, 0x1130, "CS", "File Set ID" }, { 0x0004, 0x1141, "CS", "File Set Descriptor File ID" }, { 0x0004, 0x1142, "CS", "File Set Descriptor File Specific Character Set" }, { 0x0004, 0x1200, "UL", "Root Directory Entity First Directory Record Offset" }, { 0x0004, 0x1202, "UL", "Root Directory Entity Last Directory Record Offset" }, { 0x0004, 0x1212, "US", "File Set Consistency Flag" }, { 0x0004, 0x1220, "SQ", "Directory Record Sequence" }, { 0x0004, 0x1400, "UL", "Next Directory Record Offset" }, { 0x0004, 0x1410, "US", "Record In Use Flag" }, { 0x0004, 0x1420, "UL", "Referenced Lower Level Directory Entity Offset" }, { 0x0004, 0x1430, "CS", "Directory Record Type" }, { 0x0004, 0x1432, "UI", "Private Record UID" }, { 0x0004, 0x1500, "CS", "Referenced File ID" }, { 0x0004, 0x1504, "UL", "MRDR Directory Record Offset" }, { 0x0004, 0x1510, "UI", "Referenced SOP Class UID In File" }, { 0x0004, 0x1511, "UI", "Referenced SOP Instance UID In File" }, { 0x0004, 0x1512, "UI", "Referenced Transfer Syntax UID In File" }, { 0x0004, 0x1600, "UL", "Number of References" }, { 0x0005, 0x0000, "US", "?" }, { 0x0006, 0x0000, "US", "?" }, { 0x0008, 0x0000, "UL", "Identifying Group Length" }, { 0x0008, 0x0001, "UL", "Length to End" }, { 0x0008, 0x0005, "CS", "Specific Character Set" }, { 0x0008, 0x0008, "CS", "Image Type" }, { 0x0008, 0x0010, "LO", "Recognition Code" }, { 0x0008, 0x0012, "DA", "Instance Creation Date" }, { 0x0008, 0x0013, "TM", "Instance Creation Time" }, { 0x0008, 0x0014, "UI", "Instance Creator UID" }, { 0x0008, 0x0016, "UI", "SOP Class UID" }, { 0x0008, 0x0018, "UI", "SOP Instance UID" }, { 0x0008, 0x0020, "DA", "Study Date" }, { 0x0008, 0x0021, "DA", "Series Date" }, { 0x0008, 0x0022, "DA", "Acquisition Date" }, { 0x0008, 0x0023, "DA", "Image Date" }, { 0x0008, 0x0024, "DA", "Overlay Date" }, { 0x0008, 0x0025, "DA", "Curve Date" }, { 0x0008, 0x002A, "DT", "Acquisition DateTime" }, { 0x0008, 0x0030, "TM", "Study Time" }, { 0x0008, 0x0031, "TM", "Series Time" }, { 0x0008, 0x0032, "TM", "Acquisition Time" }, { 0x0008, 0x0033, "TM", "Image Time" }, { 0x0008, 0x0034, "TM", "Overlay Time" }, { 0x0008, 0x0035, "TM", "Curve Time" }, { 0x0008, 0x0040, "xs", "Old Data Set Type" }, { 0x0008, 0x0041, "xs", "Old Data Set Subtype" }, { 0x0008, 0x0042, "CS", "Nuclear Medicine Series Type" }, { 0x0008, 0x0050, "SH", "Accession Number" }, { 0x0008, 0x0052, "CS", "Query/Retrieve Level" }, { 0x0008, 0x0054, "AE", "Retrieve AE Title" }, { 0x0008, 0x0058, "UI", "Failed SOP Instance UID List" }, { 0x0008, 0x0060, "CS", "Modality" }, { 0x0008, 0x0062, "SQ", "Modality Subtype" }, { 0x0008, 0x0064, "CS", "Conversion Type" }, { 0x0008, 0x0068, "CS", "Presentation Intent Type" }, { 0x0008, 0x0070, "LO", "Manufacturer" }, { 0x0008, 0x0080, "LO", "Institution Name" }, { 0x0008, 0x0081, "ST", "Institution Address" }, { 0x0008, 0x0082, "SQ", "Institution Code Sequence" }, { 0x0008, 0x0090, "PN", "Referring Physician's Name" }, { 0x0008, 0x0092, "ST", "Referring Physician's Address" }, { 0x0008, 0x0094, "SH", "Referring Physician's Telephone Numbers" }, { 0x0008, 0x0100, "SH", "Code Value" }, { 0x0008, 0x0102, "SH", "Coding Scheme Designator" }, { 0x0008, 0x0103, "SH", "Coding Scheme Version" }, { 0x0008, 0x0104, "LO", "Code Meaning" }, { 0x0008, 0x0105, "CS", "Mapping Resource" }, { 0x0008, 0x0106, "DT", "Context Group Version" }, { 0x0008, 0x010b, "CS", "Code Set Extension Flag" }, { 0x0008, 0x010c, "UI", "Private Coding Scheme Creator UID" }, { 0x0008, 0x010d, "UI", "Code Set Extension Creator UID" }, { 0x0008, 0x010f, "CS", "Context Identifier" }, { 0x0008, 0x1000, "LT", "Network ID" }, { 0x0008, 0x1010, "SH", "Station Name" }, { 0x0008, 0x1030, "LO", "Study Description" }, { 0x0008, 0x1032, "SQ", "Procedure Code Sequence" }, { 0x0008, 0x103e, "LO", "Series Description" }, { 0x0008, 0x1040, "LO", "Institutional Department Name" }, { 0x0008, 0x1048, "PN", "Physician of Record" }, { 0x0008, 0x1050, "PN", "Performing Physician's Name" }, { 0x0008, 0x1060, "PN", "Name of Physician(s) Reading Study" }, { 0x0008, 0x1070, "PN", "Operator's Name" }, { 0x0008, 0x1080, "LO", "Admitting Diagnosis Description" }, { 0x0008, 0x1084, "SQ", "Admitting Diagnosis Code Sequence" }, { 0x0008, 0x1090, "LO", "Manufacturer's Model Name" }, { 0x0008, 0x1100, "SQ", "Referenced Results Sequence" }, { 0x0008, 0x1110, "SQ", "Referenced Study Sequence" }, { 0x0008, 0x1111, "SQ", "Referenced Study Component Sequence" }, { 0x0008, 0x1115, "SQ", "Referenced Series Sequence" }, { 0x0008, 0x1120, "SQ", "Referenced Patient Sequence" }, { 0x0008, 0x1125, "SQ", "Referenced Visit Sequence" }, { 0x0008, 0x1130, "SQ", "Referenced Overlay Sequence" }, { 0x0008, 0x1140, "SQ", "Referenced Image Sequence" }, { 0x0008, 0x1145, "SQ", "Referenced Curve Sequence" }, { 0x0008, 0x1148, "SQ", "Referenced Previous Waveform" }, { 0x0008, 0x114a, "SQ", "Referenced Simultaneous Waveforms" }, { 0x0008, 0x114c, "SQ", "Referenced Subsequent Waveform" }, { 0x0008, 0x1150, "UI", "Referenced SOP Class UID" }, { 0x0008, 0x1155, "UI", "Referenced SOP Instance UID" }, { 0x0008, 0x1160, "IS", "Referenced Frame Number" }, { 0x0008, 0x1195, "UI", "Transaction UID" }, { 0x0008, 0x1197, "US", "Failure Reason" }, { 0x0008, 0x1198, "SQ", "Failed SOP Sequence" }, { 0x0008, 0x1199, "SQ", "Referenced SOP Sequence" }, { 0x0008, 0x2110, "CS", "Old Lossy Image Compression" }, { 0x0008, 0x2111, "ST", "Derivation Description" }, { 0x0008, 0x2112, "SQ", "Source Image Sequence" }, { 0x0008, 0x2120, "SH", "Stage Name" }, { 0x0008, 0x2122, "IS", "Stage Number" }, { 0x0008, 0x2124, "IS", "Number of Stages" }, { 0x0008, 0x2128, "IS", "View Number" }, { 0x0008, 0x2129, "IS", "Number of Event Timers" }, { 0x0008, 0x212a, "IS", "Number of Views in Stage" }, { 0x0008, 0x2130, "DS", "Event Elapsed Time(s)" }, { 0x0008, 0x2132, "LO", "Event Timer Name(s)" }, { 0x0008, 0x2142, "IS", "Start Trim" }, { 0x0008, 0x2143, "IS", "Stop Trim" }, { 0x0008, 0x2144, "IS", "Recommended Display Frame Rate" }, { 0x0008, 0x2200, "CS", "Transducer Position" }, { 0x0008, 0x2204, "CS", "Transducer Orientation" }, { 0x0008, 0x2208, "CS", "Anatomic Structure" }, { 0x0008, 0x2218, "SQ", "Anatomic Region Sequence" }, { 0x0008, 0x2220, "SQ", "Anatomic Region Modifier Sequence" }, { 0x0008, 0x2228, "SQ", "Primary Anatomic Structure Sequence" }, { 0x0008, 0x2230, "SQ", "Primary Anatomic Structure Modifier Sequence" }, { 0x0008, 0x2240, "SQ", "Transducer Position Sequence" }, { 0x0008, 0x2242, "SQ", "Transducer Position Modifier Sequence" }, { 0x0008, 0x2244, "SQ", "Transducer Orientation Sequence" }, { 0x0008, 0x2246, "SQ", "Transducer Orientation Modifier Sequence" }, { 0x0008, 0x2251, "SQ", "Anatomic Structure Space Or Region Code Sequence" }, { 0x0008, 0x2253, "SQ", "Anatomic Portal Of Entrance Code Sequence" }, { 0x0008, 0x2255, "SQ", "Anatomic Approach Direction Code Sequence" }, { 0x0008, 0x2256, "ST", "Anatomic Perspective Description" }, { 0x0008, 0x2257, "SQ", "Anatomic Perspective Code Sequence" }, { 0x0008, 0x2258, "ST", "Anatomic Location Of Examining Instrument Description" }, { 0x0008, 0x2259, "SQ", "Anatomic Location Of Examining Instrument Code Sequence" }, { 0x0008, 0x225a, "SQ", "Anatomic Structure Space Or Region Modifier Code Sequence" }, { 0x0008, 0x225c, "SQ", "OnAxis Background Anatomic Structure Code Sequence" }, { 0x0008, 0x4000, "LT", "Identifying Comments" }, { 0x0009, 0x0000, "xs", "?" }, { 0x0009, 0x0001, "xs", "?" }, { 0x0009, 0x0002, "xs", "?" }, { 0x0009, 0x0003, "xs", "?" }, { 0x0009, 0x0004, "xs", "?" }, { 0x0009, 0x0005, "UN", "?" }, { 0x0009, 0x0006, "UN", "?" }, { 0x0009, 0x0007, "UN", "?" }, { 0x0009, 0x0008, "xs", "?" }, { 0x0009, 0x0009, "LT", "?" }, { 0x0009, 0x000a, "IS", "?" }, { 0x0009, 0x000b, "IS", "?" }, { 0x0009, 0x000c, "IS", "?" }, { 0x0009, 0x000d, "IS", "?" }, { 0x0009, 0x000e, "IS", "?" }, { 0x0009, 0x000f, "UN", "?" }, { 0x0009, 0x0010, "xs", "?" }, { 0x0009, 0x0011, "xs", "?" }, { 0x0009, 0x0012, "xs", "?" }, { 0x0009, 0x0013, "xs", "?" }, { 0x0009, 0x0014, "xs", "?" }, { 0x0009, 0x0015, "xs", "?" }, { 0x0009, 0x0016, "xs", "?" }, { 0x0009, 0x0017, "LT", "?" }, { 0x0009, 0x0018, "LT", "Data Set Identifier" }, { 0x0009, 0x001a, "US", "?" }, { 0x0009, 0x001e, "UI", "?" }, { 0x0009, 0x0020, "xs", "?" }, { 0x0009, 0x0021, "xs", "?" }, { 0x0009, 0x0022, "SH", "User Orientation" }, { 0x0009, 0x0023, "SL", "Initiation Type" }, { 0x0009, 0x0024, "xs", "?" }, { 0x0009, 0x0025, "xs", "?" }, { 0x0009, 0x0026, "xs", "?" }, { 0x0009, 0x0027, "xs", "?" }, { 0x0009, 0x0029, "xs", "?" }, { 0x0009, 0x002a, "SL", "?" }, { 0x0009, 0x002c, "LO", "Series Comments" }, { 0x0009, 0x002d, "SL", "Track Beat Average" }, { 0x0009, 0x002e, "FD", "Distance Prescribed" }, { 0x0009, 0x002f, "LT", "?" }, { 0x0009, 0x0030, "xs", "?" }, { 0x0009, 0x0031, "xs", "?" }, { 0x0009, 0x0032, "LT", "?" }, { 0x0009, 0x0034, "xs", "?" }, { 0x0009, 0x0035, "SL", "Gantry Locus Type" }, { 0x0009, 0x0037, "SL", "Starting Heart Rate" }, { 0x0009, 0x0038, "xs", "?" }, { 0x0009, 0x0039, "SL", "RR Window Offset" }, { 0x0009, 0x003a, "SL", "Percent Cycle Imaged" }, { 0x0009, 0x003e, "US", "?" }, { 0x0009, 0x003f, "US", "?" }, { 0x0009, 0x0040, "xs", "?" }, { 0x0009, 0x0041, "xs", "?" }, { 0x0009, 0x0042, "xs", "?" }, { 0x0009, 0x0043, "xs", "?" }, { 0x0009, 0x0050, "LT", "?" }, { 0x0009, 0x0051, "xs", "?" }, { 0x0009, 0x0060, "LT", "?" }, { 0x0009, 0x0061, "LT", "Series Unique Identifier" }, { 0x0009, 0x0070, "LT", "?" }, { 0x0009, 0x0080, "LT", "?" }, { 0x0009, 0x0091, "LT", "?" }, { 0x0009, 0x00e2, "LT", "?" }, { 0x0009, 0x00e3, "UI", "Equipment UID" }, { 0x0009, 0x00e6, "SH", "Genesis Version Now" }, { 0x0009, 0x00e7, "UL", "Exam Record Checksum" }, { 0x0009, 0x00e8, "UL", "?" }, { 0x0009, 0x00e9, "SL", "Actual Series Data Time Stamp" }, { 0x0009, 0x00f2, "UN", "?" }, { 0x0009, 0x00f3, "UN", "?" }, { 0x0009, 0x00f4, "LT", "?" }, { 0x0009, 0x00f5, "xs", "?" }, { 0x0009, 0x00f6, "LT", "PDM Data Object Type Extension" }, { 0x0009, 0x00f8, "US", "?" }, { 0x0009, 0x00fb, "IS", "?" }, { 0x0009, 0x1002, "OB", "?" }, { 0x0009, 0x1003, "OB", "?" }, { 0x0009, 0x1010, "UN", "?" }, { 0x0010, 0x0000, "UL", "Patient Group Length" }, { 0x0010, 0x0010, "PN", "Patient's Name" }, { 0x0010, 0x0020, "LO", "Patient's ID" }, { 0x0010, 0x0021, "LO", "Issuer of Patient's ID" }, { 0x0010, 0x0030, "DA", "Patient's Birth Date" }, { 0x0010, 0x0032, "TM", "Patient's Birth Time" }, { 0x0010, 0x0040, "CS", "Patient's Sex" }, { 0x0010, 0x0050, "SQ", "Patient's Insurance Plan Code Sequence" }, { 0x0010, 0x1000, "LO", "Other Patient's ID's" }, { 0x0010, 0x1001, "PN", "Other Patient's Names" }, { 0x0010, 0x1005, "PN", "Patient's Birth Name" }, { 0x0010, 0x1010, "AS", "Patient's Age" }, { 0x0010, 0x1020, "DS", "Patient's Size" }, { 0x0010, 0x1030, "DS", "Patient's Weight" }, { 0x0010, 0x1040, "LO", "Patient's Address" }, { 0x0010, 0x1050, "LT", "Insurance Plan Identification" }, { 0x0010, 0x1060, "PN", "Patient's Mother's Birth Name" }, { 0x0010, 0x1080, "LO", "Military Rank" }, { 0x0010, 0x1081, "LO", "Branch of Service" }, { 0x0010, 0x1090, "LO", "Medical Record Locator" }, { 0x0010, 0x2000, "LO", "Medical Alerts" }, { 0x0010, 0x2110, "LO", "Contrast Allergies" }, { 0x0010, 0x2150, "LO", "Country of Residence" }, { 0x0010, 0x2152, "LO", "Region of Residence" }, { 0x0010, 0x2154, "SH", "Patients Telephone Numbers" }, { 0x0010, 0x2160, "SH", "Ethnic Group" }, { 0x0010, 0x2180, "SH", "Occupation" }, { 0x0010, 0x21a0, "CS", "Smoking Status" }, { 0x0010, 0x21b0, "LT", "Additional Patient History" }, { 0x0010, 0x21c0, "US", "Pregnancy Status" }, { 0x0010, 0x21d0, "DA", "Last Menstrual Date" }, { 0x0010, 0x21f0, "LO", "Patients Religious Preference" }, { 0x0010, 0x4000, "LT", "Patient Comments" }, { 0x0011, 0x0001, "xs", "?" }, { 0x0011, 0x0002, "US", "?" }, { 0x0011, 0x0003, "LT", "Patient UID" }, { 0x0011, 0x0004, "LT", "Patient ID" }, { 0x0011, 0x000a, "xs", "?" }, { 0x0011, 0x000b, "SL", "Effective Series Duration" }, { 0x0011, 0x000c, "SL", "Num Beats" }, { 0x0011, 0x000d, "LO", "Radio Nuclide Name" }, { 0x0011, 0x0010, "xs", "?" }, { 0x0011, 0x0011, "xs", "?" }, { 0x0011, 0x0012, "LO", "Dataset Name" }, { 0x0011, 0x0013, "LO", "Dataset Type" }, { 0x0011, 0x0015, "xs", "?" }, { 0x0011, 0x0016, "SL", "Energy Number" }, { 0x0011, 0x0017, "SL", "RR Interval Window Number" }, { 0x0011, 0x0018, "SL", "MG Bin Number" }, { 0x0011, 0x0019, "FD", "Radius Of Rotation" }, { 0x0011, 0x001a, "SL", "Detector Count Zone" }, { 0x0011, 0x001b, "SL", "Num Energy Windows" }, { 0x0011, 0x001c, "SL", "Energy Offset" }, { 0x0011, 0x001d, "SL", "Energy Range" }, { 0x0011, 0x001f, "SL", "Image Orientation" }, { 0x0011, 0x0020, "xs", "?" }, { 0x0011, 0x0021, "xs", "?" }, { 0x0011, 0x0022, "xs", "?" }, { 0x0011, 0x0023, "xs", "?" }, { 0x0011, 0x0024, "SL", "FOV Mask Y Cutoff Angle" }, { 0x0011, 0x0025, "xs", "?" }, { 0x0011, 0x0026, "SL", "Table Orientation" }, { 0x0011, 0x0027, "SL", "ROI Top Left" }, { 0x0011, 0x0028, "SL", "ROI Bottom Right" }, { 0x0011, 0x0030, "xs", "?" }, { 0x0011, 0x0031, "xs", "?" }, { 0x0011, 0x0032, "UN", "?" }, { 0x0011, 0x0033, "LO", "Energy Correct Name" }, { 0x0011, 0x0034, "LO", "Spatial Correct Name" }, { 0x0011, 0x0035, "xs", "?" }, { 0x0011, 0x0036, "LO", "Uniformity Correct Name" }, { 0x0011, 0x0037, "LO", "Acquisition Specific Correct Name" }, { 0x0011, 0x0038, "SL", "Byte Order" }, { 0x0011, 0x003a, "SL", "Picture Format" }, { 0x0011, 0x003b, "FD", "Pixel Scale" }, { 0x0011, 0x003c, "FD", "Pixel Offset" }, { 0x0011, 0x003e, "SL", "FOV Shape" }, { 0x0011, 0x003f, "SL", "Dataset Flags" }, { 0x0011, 0x0040, "xs", "?" }, { 0x0011, 0x0041, "LT", "Medical Alerts" }, { 0x0011, 0x0042, "LT", "Contrast Allergies" }, { 0x0011, 0x0044, "FD", "Threshold Center" }, { 0x0011, 0x0045, "FD", "Threshold Width" }, { 0x0011, 0x0046, "SL", "Interpolation Type" }, { 0x0011, 0x0055, "FD", "Period" }, { 0x0011, 0x0056, "FD", "ElapsedTime" }, { 0x0011, 0x00a1, "DA", "Patient Registration Date" }, { 0x0011, 0x00a2, "TM", "Patient Registration Time" }, { 0x0011, 0x00b0, "LT", "Patient Last Name" }, { 0x0011, 0x00b2, "LT", "Patient First Name" }, { 0x0011, 0x00b4, "LT", "Patient Hospital Status" }, { 0x0011, 0x00bc, "TM", "Current Location Time" }, { 0x0011, 0x00c0, "LT", "Patient Insurance Status" }, { 0x0011, 0x00d0, "LT", "Patient Billing Type" }, { 0x0011, 0x00d2, "LT", "Patient Billing Address" }, { 0x0013, 0x0000, "LT", "Modifying Physician" }, { 0x0013, 0x0010, "xs", "?" }, { 0x0013, 0x0011, "SL", "?" }, { 0x0013, 0x0012, "xs", "?" }, { 0x0013, 0x0016, "SL", "AutoTrack Peak" }, { 0x0013, 0x0017, "SL", "AutoTrack Width" }, { 0x0013, 0x0018, "FD", "Transmission Scan Time" }, { 0x0013, 0x0019, "FD", "Transmission Mask Width" }, { 0x0013, 0x001a, "FD", "Copper Attenuator Thickness" }, { 0x0013, 0x001c, "FD", "?" }, { 0x0013, 0x001d, "FD", "?" }, { 0x0013, 0x001e, "FD", "Tomo View Offset" }, { 0x0013, 0x0020, "LT", "Patient Name" }, { 0x0013, 0x0022, "LT", "Patient Id" }, { 0x0013, 0x0026, "LT", "Study Comments" }, { 0x0013, 0x0030, "DA", "Patient Birthdate" }, { 0x0013, 0x0031, "DS", "Patient Weight" }, { 0x0013, 0x0032, "LT", "Patients Maiden Name" }, { 0x0013, 0x0033, "LT", "Referring Physician" }, { 0x0013, 0x0034, "LT", "Admitting Diagnosis" }, { 0x0013, 0x0035, "LT", "Patient Sex" }, { 0x0013, 0x0040, "LT", "Procedure Description" }, { 0x0013, 0x0042, "LT", "Patient Rest Direction" }, { 0x0013, 0x0044, "LT", "Patient Position" }, { 0x0013, 0x0046, "LT", "View Direction" }, { 0x0015, 0x0001, "DS", "Stenosis Calibration Ratio" }, { 0x0015, 0x0002, "DS", "Stenosis Magnification" }, { 0x0015, 0x0003, "DS", "Cardiac Calibration Ratio" }, { 0x0018, 0x0000, "UL", "Acquisition Group Length" }, { 0x0018, 0x0010, "LO", "Contrast/Bolus Agent" }, { 0x0018, 0x0012, "SQ", "Contrast/Bolus Agent Sequence" }, { 0x0018, 0x0014, "SQ", "Contrast/Bolus Administration Route Sequence" }, { 0x0018, 0x0015, "CS", "Body Part Examined" }, { 0x0018, 0x0020, "CS", "Scanning Sequence" }, { 0x0018, 0x0021, "CS", "Sequence Variant" }, { 0x0018, 0x0022, "CS", "Scan Options" }, { 0x0018, 0x0023, "CS", "MR Acquisition Type" }, { 0x0018, 0x0024, "SH", "Sequence Name" }, { 0x0018, 0x0025, "CS", "Angio Flag" }, { 0x0018, 0x0026, "SQ", "Intervention Drug Information Sequence" }, { 0x0018, 0x0027, "TM", "Intervention Drug Stop Time" }, { 0x0018, 0x0028, "DS", "Intervention Drug Dose" }, { 0x0018, 0x0029, "SQ", "Intervention Drug Code Sequence" }, { 0x0018, 0x002a, "SQ", "Additional Drug Sequence" }, { 0x0018, 0x0030, "LO", "Radionuclide" }, { 0x0018, 0x0031, "LO", "Radiopharmaceutical" }, { 0x0018, 0x0032, "DS", "Energy Window Centerline" }, { 0x0018, 0x0033, "DS", "Energy Window Total Width" }, { 0x0018, 0x0034, "LO", "Intervention Drug Name" }, { 0x0018, 0x0035, "TM", "Intervention Drug Start Time" }, { 0x0018, 0x0036, "SQ", "Intervention Therapy Sequence" }, { 0x0018, 0x0037, "CS", "Therapy Type" }, { 0x0018, 0x0038, "CS", "Intervention Status" }, { 0x0018, 0x0039, "CS", "Therapy Description" }, { 0x0018, 0x0040, "IS", "Cine Rate" }, { 0x0018, 0x0050, "DS", "Slice Thickness" }, { 0x0018, 0x0060, "DS", "KVP" }, { 0x0018, 0x0070, "IS", "Counts Accumulated" }, { 0x0018, 0x0071, "CS", "Acquisition Termination Condition" }, { 0x0018, 0x0072, "DS", "Effective Series Duration" }, { 0x0018, 0x0073, "CS", "Acquisition Start Condition" }, { 0x0018, 0x0074, "IS", "Acquisition Start Condition Data" }, { 0x0018, 0x0075, "IS", "Acquisition Termination Condition Data" }, { 0x0018, 0x0080, "DS", "Repetition Time" }, { 0x0018, 0x0081, "DS", "Echo Time" }, { 0x0018, 0x0082, "DS", "Inversion Time" }, { 0x0018, 0x0083, "DS", "Number of Averages" }, { 0x0018, 0x0084, "DS", "Imaging Frequency" }, { 0x0018, 0x0085, "SH", "Imaged Nucleus" }, { 0x0018, 0x0086, "IS", "Echo Number(s)" }, { 0x0018, 0x0087, "DS", "Magnetic Field Strength" }, { 0x0018, 0x0088, "DS", "Spacing Between Slices" }, { 0x0018, 0x0089, "IS", "Number of Phase Encoding Steps" }, { 0x0018, 0x0090, "DS", "Data Collection Diameter" }, { 0x0018, 0x0091, "IS", "Echo Train Length" }, { 0x0018, 0x0093, "DS", "Percent Sampling" }, { 0x0018, 0x0094, "DS", "Percent Phase Field of View" }, { 0x0018, 0x0095, "DS", "Pixel Bandwidth" }, { 0x0018, 0x1000, "LO", "Device Serial Number" }, { 0x0018, 0x1004, "LO", "Plate ID" }, { 0x0018, 0x1010, "LO", "Secondary Capture Device ID" }, { 0x0018, 0x1012, "DA", "Date of Secondary Capture" }, { 0x0018, 0x1014, "TM", "Time of Secondary Capture" }, { 0x0018, 0x1016, "LO", "Secondary Capture Device Manufacturer" }, { 0x0018, 0x1018, "LO", "Secondary Capture Device Manufacturer Model Name" }, { 0x0018, 0x1019, "LO", "Secondary Capture Device Software Version(s)" }, { 0x0018, 0x1020, "LO", "Software Version(s)" }, { 0x0018, 0x1022, "SH", "Video Image Format Acquired" }, { 0x0018, 0x1023, "LO", "Digital Image Format Acquired" }, { 0x0018, 0x1030, "LO", "Protocol Name" }, { 0x0018, 0x1040, "LO", "Contrast/Bolus Route" }, { 0x0018, 0x1041, "DS", "Contrast/Bolus Volume" }, { 0x0018, 0x1042, "TM", "Contrast/Bolus Start Time" }, { 0x0018, 0x1043, "TM", "Contrast/Bolus Stop Time" }, { 0x0018, 0x1044, "DS", "Contrast/Bolus Total Dose" }, { 0x0018, 0x1045, "IS", "Syringe Counts" }, { 0x0018, 0x1046, "DS", "Contrast Flow Rate" }, { 0x0018, 0x1047, "DS", "Contrast Flow Duration" }, { 0x0018, 0x1048, "CS", "Contrast/Bolus Ingredient" }, { 0x0018, 0x1049, "DS", "Contrast/Bolus Ingredient Concentration" }, { 0x0018, 0x1050, "DS", "Spatial Resolution" }, { 0x0018, 0x1060, "DS", "Trigger Time" }, { 0x0018, 0x1061, "LO", "Trigger Source or Type" }, { 0x0018, 0x1062, "IS", "Nominal Interval" }, { 0x0018, 0x1063, "DS", "Frame Time" }, { 0x0018, 0x1064, "LO", "Framing Type" }, { 0x0018, 0x1065, "DS", "Frame Time Vector" }, { 0x0018, 0x1066, "DS", "Frame Delay" }, { 0x0018, 0x1067, "DS", "Image Trigger Delay" }, { 0x0018, 0x1068, "DS", "Group Time Offset" }, { 0x0018, 0x1069, "DS", "Trigger Time Offset" }, { 0x0018, 0x106a, "CS", "Synchronization Trigger" }, { 0x0018, 0x106b, "UI", "Synchronization Frame of Reference" }, { 0x0018, 0x106e, "UL", "Trigger Sample Position" }, { 0x0018, 0x1070, "LO", "Radiopharmaceutical Route" }, { 0x0018, 0x1071, "DS", "Radiopharmaceutical Volume" }, { 0x0018, 0x1072, "TM", "Radiopharmaceutical Start Time" }, { 0x0018, 0x1073, "TM", "Radiopharmaceutical Stop Time" }, { 0x0018, 0x1074, "DS", "Radionuclide Total Dose" }, { 0x0018, 0x1075, "DS", "Radionuclide Half Life" }, { 0x0018, 0x1076, "DS", "Radionuclide Positron Fraction" }, { 0x0018, 0x1077, "DS", "Radiopharmaceutical Specific Activity" }, { 0x0018, 0x1080, "CS", "Beat Rejection Flag" }, { 0x0018, 0x1081, "IS", "Low R-R Value" }, { 0x0018, 0x1082, "IS", "High R-R Value" }, { 0x0018, 0x1083, "IS", "Intervals Acquired" }, { 0x0018, 0x1084, "IS", "Intervals Rejected" }, { 0x0018, 0x1085, "LO", "PVC Rejection" }, { 0x0018, 0x1086, "IS", "Skip Beats" }, { 0x0018, 0x1088, "IS", "Heart Rate" }, { 0x0018, 0x1090, "IS", "Cardiac Number of Images" }, { 0x0018, 0x1094, "IS", "Trigger Window" }, { 0x0018, 0x1100, "DS", "Reconstruction Diameter" }, { 0x0018, 0x1110, "DS", "Distance Source to Detector" }, { 0x0018, 0x1111, "DS", "Distance Source to Patient" }, { 0x0018, 0x1114, "DS", "Estimated Radiographic Magnification Factor" }, { 0x0018, 0x1120, "DS", "Gantry/Detector Tilt" }, { 0x0018, 0x1121, "DS", "Gantry/Detector Slew" }, { 0x0018, 0x1130, "DS", "Table Height" }, { 0x0018, 0x1131, "DS", "Table Traverse" }, { 0x0018, 0x1134, "CS", "Table Motion" }, { 0x0018, 0x1135, "DS", "Table Vertical Increment" }, { 0x0018, 0x1136, "DS", "Table Lateral Increment" }, { 0x0018, 0x1137, "DS", "Table Longitudinal Increment" }, { 0x0018, 0x1138, "DS", "Table Angle" }, { 0x0018, 0x113a, "CS", "Table Type" }, { 0x0018, 0x1140, "CS", "Rotation Direction" }, { 0x0018, 0x1141, "DS", "Angular Position" }, { 0x0018, 0x1142, "DS", "Radial Position" }, { 0x0018, 0x1143, "DS", "Scan Arc" }, { 0x0018, 0x1144, "DS", "Angular Step" }, { 0x0018, 0x1145, "DS", "Center of Rotation Offset" }, { 0x0018, 0x1146, "DS", "Rotation Offset" }, { 0x0018, 0x1147, "CS", "Field of View Shape" }, { 0x0018, 0x1149, "IS", "Field of View Dimension(s)" }, { 0x0018, 0x1150, "IS", "Exposure Time" }, { 0x0018, 0x1151, "IS", "X-ray Tube Current" }, { 0x0018, 0x1152, "IS", "Exposure" }, { 0x0018, 0x1153, "IS", "Exposure in uAs" }, { 0x0018, 0x1154, "DS", "AveragePulseWidth" }, { 0x0018, 0x1155, "CS", "RadiationSetting" }, { 0x0018, 0x1156, "CS", "Rectification Type" }, { 0x0018, 0x115a, "CS", "RadiationMode" }, { 0x0018, 0x115e, "DS", "ImageAreaDoseProduct" }, { 0x0018, 0x1160, "SH", "Filter Type" }, { 0x0018, 0x1161, "LO", "TypeOfFilters" }, { 0x0018, 0x1162, "DS", "IntensifierSize" }, { 0x0018, 0x1164, "DS", "ImagerPixelSpacing" }, { 0x0018, 0x1166, "CS", "Grid" }, { 0x0018, 0x1170, "IS", "Generator Power" }, { 0x0018, 0x1180, "SH", "Collimator/Grid Name" }, { 0x0018, 0x1181, "CS", "Collimator Type" }, { 0x0018, 0x1182, "IS", "Focal Distance" }, { 0x0018, 0x1183, "DS", "X Focus Center" }, { 0x0018, 0x1184, "DS", "Y Focus Center" }, { 0x0018, 0x1190, "DS", "Focal Spot(s)" }, { 0x0018, 0x1191, "CS", "Anode Target Material" }, { 0x0018, 0x11a0, "DS", "Body Part Thickness" }, { 0x0018, 0x11a2, "DS", "Compression Force" }, { 0x0018, 0x1200, "DA", "Date of Last Calibration" }, { 0x0018, 0x1201, "TM", "Time of Last Calibration" }, { 0x0018, 0x1210, "SH", "Convolution Kernel" }, { 0x0018, 0x1240, "IS", "Upper/Lower Pixel Values" }, { 0x0018, 0x1242, "IS", "Actual Frame Duration" }, { 0x0018, 0x1243, "IS", "Count Rate" }, { 0x0018, 0x1244, "US", "Preferred Playback Sequencing" }, { 0x0018, 0x1250, "SH", "Receiving Coil" }, { 0x0018, 0x1251, "SH", "Transmitting Coil" }, { 0x0018, 0x1260, "SH", "Plate Type" }, { 0x0018, 0x1261, "LO", "Phosphor Type" }, { 0x0018, 0x1300, "DS", "Scan Velocity" }, { 0x0018, 0x1301, "CS", "Whole Body Technique" }, { 0x0018, 0x1302, "IS", "Scan Length" }, { 0x0018, 0x1310, "US", "Acquisition Matrix" }, { 0x0018, 0x1312, "CS", "Phase Encoding Direction" }, { 0x0018, 0x1314, "DS", "Flip Angle" }, { 0x0018, 0x1315, "CS", "Variable Flip Angle Flag" }, { 0x0018, 0x1316, "DS", "SAR" }, { 0x0018, 0x1318, "DS", "dB/dt" }, { 0x0018, 0x1400, "LO", "Acquisition Device Processing Description" }, { 0x0018, 0x1401, "LO", "Acquisition Device Processing Code" }, { 0x0018, 0x1402, "CS", "Cassette Orientation" }, { 0x0018, 0x1403, "CS", "Cassette Size" }, { 0x0018, 0x1404, "US", "Exposures on Plate" }, { 0x0018, 0x1405, "IS", "Relative X-ray Exposure" }, { 0x0018, 0x1450, "DS", "Column Angulation" }, { 0x0018, 0x1460, "DS", "Tomo Layer Height" }, { 0x0018, 0x1470, "DS", "Tomo Angle" }, { 0x0018, 0x1480, "DS", "Tomo Time" }, { 0x0018, 0x1490, "CS", "Tomo Type" }, { 0x0018, 0x1491, "CS", "Tomo Class" }, { 0x0018, 0x1495, "IS", "Number of Tomosynthesis Source Images" }, { 0x0018, 0x1500, "CS", "PositionerMotion" }, { 0x0018, 0x1508, "CS", "Positioner Type" }, { 0x0018, 0x1510, "DS", "PositionerPrimaryAngle" }, { 0x0018, 0x1511, "DS", "PositionerSecondaryAngle" }, { 0x0018, 0x1520, "DS", "PositionerPrimaryAngleIncrement" }, { 0x0018, 0x1521, "DS", "PositionerSecondaryAngleIncrement" }, { 0x0018, 0x1530, "DS", "DetectorPrimaryAngle" }, { 0x0018, 0x1531, "DS", "DetectorSecondaryAngle" }, { 0x0018, 0x1600, "CS", "Shutter Shape" }, { 0x0018, 0x1602, "IS", "Shutter Left Vertical Edge" }, { 0x0018, 0x1604, "IS", "Shutter Right Vertical Edge" }, { 0x0018, 0x1606, "IS", "Shutter Upper Horizontal Edge" }, { 0x0018, 0x1608, "IS", "Shutter Lower Horizonta lEdge" }, { 0x0018, 0x1610, "IS", "Center of Circular Shutter" }, { 0x0018, 0x1612, "IS", "Radius of Circular Shutter" }, { 0x0018, 0x1620, "IS", "Vertices of Polygonal Shutter" }, { 0x0018, 0x1622, "US", "Shutter Presentation Value" }, { 0x0018, 0x1623, "US", "Shutter Overlay Group" }, { 0x0018, 0x1700, "CS", "Collimator Shape" }, { 0x0018, 0x1702, "IS", "Collimator Left Vertical Edge" }, { 0x0018, 0x1704, "IS", "Collimator Right Vertical Edge" }, { 0x0018, 0x1706, "IS", "Collimator Upper Horizontal Edge" }, { 0x0018, 0x1708, "IS", "Collimator Lower Horizontal Edge" }, { 0x0018, 0x1710, "IS", "Center of Circular Collimator" }, { 0x0018, 0x1712, "IS", "Radius of Circular Collimator" }, { 0x0018, 0x1720, "IS", "Vertices of Polygonal Collimator" }, { 0x0018, 0x1800, "CS", "Acquisition Time Synchronized" }, { 0x0018, 0x1801, "SH", "Time Source" }, { 0x0018, 0x1802, "CS", "Time Distribution Protocol" }, { 0x0018, 0x4000, "LT", "Acquisition Comments" }, { 0x0018, 0x5000, "SH", "Output Power" }, { 0x0018, 0x5010, "LO", "Transducer Data" }, { 0x0018, 0x5012, "DS", "Focus Depth" }, { 0x0018, 0x5020, "LO", "Processing Function" }, { 0x0018, 0x5021, "LO", "Postprocessing Function" }, { 0x0018, 0x5022, "DS", "Mechanical Index" }, { 0x0018, 0x5024, "DS", "Thermal Index" }, { 0x0018, 0x5026, "DS", "Cranial Thermal Index" }, { 0x0018, 0x5027, "DS", "Soft Tissue Thermal Index" }, { 0x0018, 0x5028, "DS", "Soft Tissue-Focus Thermal Index" }, { 0x0018, 0x5029, "DS", "Soft Tissue-Surface Thermal Index" }, { 0x0018, 0x5030, "DS", "Dynamic Range" }, { 0x0018, 0x5040, "DS", "Total Gain" }, { 0x0018, 0x5050, "IS", "Depth of Scan Field" }, { 0x0018, 0x5100, "CS", "Patient Position" }, { 0x0018, 0x5101, "CS", "View Position" }, { 0x0018, 0x5104, "SQ", "Projection Eponymous Name Code Sequence" }, { 0x0018, 0x5210, "DS", "Image Transformation Matrix" }, { 0x0018, 0x5212, "DS", "Image Translation Vector" }, { 0x0018, 0x6000, "DS", "Sensitivity" }, { 0x0018, 0x6011, "IS", "Sequence of Ultrasound Regions" }, { 0x0018, 0x6012, "US", "Region Spatial Format" }, { 0x0018, 0x6014, "US", "Region Data Type" }, { 0x0018, 0x6016, "UL", "Region Flags" }, { 0x0018, 0x6018, "UL", "Region Location Min X0" }, { 0x0018, 0x601a, "UL", "Region Location Min Y0" }, { 0x0018, 0x601c, "UL", "Region Location Max X1" }, { 0x0018, 0x601e, "UL", "Region Location Max Y1" }, { 0x0018, 0x6020, "SL", "Reference Pixel X0" }, { 0x0018, 0x6022, "SL", "Reference Pixel Y0" }, { 0x0018, 0x6024, "US", "Physical Units X Direction" }, { 0x0018, 0x6026, "US", "Physical Units Y Direction" }, { 0x0018, 0x6028, "FD", "Reference Pixel Physical Value X" }, { 0x0018, 0x602a, "US", "Reference Pixel Physical Value Y" }, { 0x0018, 0x602c, "US", "Physical Delta X" }, { 0x0018, 0x602e, "US", "Physical Delta Y" }, { 0x0018, 0x6030, "UL", "Transducer Frequency" }, { 0x0018, 0x6031, "CS", "Transducer Type" }, { 0x0018, 0x6032, "UL", "Pulse Repetition Frequency" }, { 0x0018, 0x6034, "FD", "Doppler Correction Angle" }, { 0x0018, 0x6036, "FD", "Steering Angle" }, { 0x0018, 0x6038, "UL", "Doppler Sample Volume X Position" }, { 0x0018, 0x603a, "UL", "Doppler Sample Volume Y Position" }, { 0x0018, 0x603c, "UL", "TM-Line Position X0" }, { 0x0018, 0x603e, "UL", "TM-Line Position Y0" }, { 0x0018, 0x6040, "UL", "TM-Line Position X1" }, { 0x0018, 0x6042, "UL", "TM-Line Position Y1" }, { 0x0018, 0x6044, "US", "Pixel Component Organization" }, { 0x0018, 0x6046, "UL", "Pixel Component Mask" }, { 0x0018, 0x6048, "UL", "Pixel Component Range Start" }, { 0x0018, 0x604a, "UL", "Pixel Component Range Stop" }, { 0x0018, 0x604c, "US", "Pixel Component Physical Units" }, { 0x0018, 0x604e, "US", "Pixel Component Data Type" }, { 0x0018, 0x6050, "UL", "Number of Table Break Points" }, { 0x0018, 0x6052, "UL", "Table of X Break Points" }, { 0x0018, 0x6054, "FD", "Table of Y Break Points" }, { 0x0018, 0x6056, "UL", "Number of Table Entries" }, { 0x0018, 0x6058, "UL", "Table of Pixel Values" }, { 0x0018, 0x605a, "FL", "Table of Parameter Values" }, { 0x0018, 0x7000, "CS", "Detector Conditions Nominal Flag" }, { 0x0018, 0x7001, "DS", "Detector Temperature" }, { 0x0018, 0x7004, "CS", "Detector Type" }, { 0x0018, 0x7005, "CS", "Detector Configuration" }, { 0x0018, 0x7006, "LT", "Detector Description" }, { 0x0018, 0x7008, "LT", "Detector Mode" }, { 0x0018, 0x700a, "SH", "Detector ID" }, { 0x0018, 0x700c, "DA", "Date of Last Detector Calibration " }, { 0x0018, 0x700e, "TM", "Time of Last Detector Calibration" }, { 0x0018, 0x7010, "IS", "Exposures on Detector Since Last Calibration" }, { 0x0018, 0x7011, "IS", "Exposures on Detector Since Manufactured" }, { 0x0018, 0x7012, "DS", "Detector Time Since Last Exposure" }, { 0x0018, 0x7014, "DS", "Detector Active Time" }, { 0x0018, 0x7016, "DS", "Detector Activation Offset From Exposure" }, { 0x0018, 0x701a, "DS", "Detector Binning" }, { 0x0018, 0x7020, "DS", "Detector Element Physical Size" }, { 0x0018, 0x7022, "DS", "Detector Element Spacing" }, { 0x0018, 0x7024, "CS", "Detector Active Shape" }, { 0x0018, 0x7026, "DS", "Detector Active Dimensions" }, { 0x0018, 0x7028, "DS", "Detector Active Origin" }, { 0x0018, 0x7030, "DS", "Field of View Origin" }, { 0x0018, 0x7032, "DS", "Field of View Rotation" }, { 0x0018, 0x7034, "CS", "Field of View Horizontal Flip" }, { 0x0018, 0x7040, "LT", "Grid Absorbing Material" }, { 0x0018, 0x7041, "LT", "Grid Spacing Material" }, { 0x0018, 0x7042, "DS", "Grid Thickness" }, { 0x0018, 0x7044, "DS", "Grid Pitch" }, { 0x0018, 0x7046, "IS", "Grid Aspect Ratio" }, { 0x0018, 0x7048, "DS", "Grid Period" }, { 0x0018, 0x704c, "DS", "Grid Focal Distance" }, { 0x0018, 0x7050, "LT", "Filter Material" }, { 0x0018, 0x7052, "DS", "Filter Thickness Minimum" }, { 0x0018, 0x7054, "DS", "Filter Thickness Maximum" }, { 0x0018, 0x7060, "CS", "Exposure Control Mode" }, { 0x0018, 0x7062, "LT", "Exposure Control Mode Description" }, { 0x0018, 0x7064, "CS", "Exposure Status" }, { 0x0018, 0x7065, "DS", "Phototimer Setting" }, { 0x0019, 0x0000, "xs", "?" }, { 0x0019, 0x0001, "xs", "?" }, { 0x0019, 0x0002, "xs", "?" }, { 0x0019, 0x0003, "xs", "?" }, { 0x0019, 0x0004, "xs", "?" }, { 0x0019, 0x0005, "xs", "?" }, { 0x0019, 0x0006, "xs", "?" }, { 0x0019, 0x0007, "xs", "?" }, { 0x0019, 0x0008, "xs", "?" }, { 0x0019, 0x0009, "xs", "?" }, { 0x0019, 0x000a, "xs", "?" }, { 0x0019, 0x000b, "DS", "?" }, { 0x0019, 0x000c, "US", "?" }, { 0x0019, 0x000d, "TM", "Time" }, { 0x0019, 0x000e, "xs", "?" }, { 0x0019, 0x000f, "DS", "Horizontal Frame Of Reference" }, { 0x0019, 0x0010, "xs", "?" }, { 0x0019, 0x0011, "xs", "?" }, { 0x0019, 0x0012, "xs", "?" }, { 0x0019, 0x0013, "xs", "?" }, { 0x0019, 0x0014, "xs", "?" }, { 0x0019, 0x0015, "xs", "?" }, { 0x0019, 0x0016, "xs", "?" }, { 0x0019, 0x0017, "xs", "?" }, { 0x0019, 0x0018, "xs", "?" }, { 0x0019, 0x0019, "xs", "?" }, { 0x0019, 0x001a, "xs", "?" }, { 0x0019, 0x001b, "xs", "?" }, { 0x0019, 0x001c, "CS", "Dose" }, { 0x0019, 0x001d, "IS", "Side Mark" }, { 0x0019, 0x001e, "xs", "?" }, { 0x0019, 0x001f, "DS", "Exposure Duration" }, { 0x0019, 0x0020, "xs", "?" }, { 0x0019, 0x0021, "xs", "?" }, { 0x0019, 0x0022, "xs", "?" }, { 0x0019, 0x0023, "xs", "?" }, { 0x0019, 0x0024, "xs", "?" }, { 0x0019, 0x0025, "xs", "?" }, { 0x0019, 0x0026, "xs", "?" }, { 0x0019, 0x0027, "xs", "?" }, { 0x0019, 0x0028, "xs", "?" }, { 0x0019, 0x0029, "IS", "?" }, { 0x0019, 0x002a, "xs", "?" }, { 0x0019, 0x002b, "DS", "Xray Off Position" }, { 0x0019, 0x002c, "xs", "?" }, { 0x0019, 0x002d, "US", "?" }, { 0x0019, 0x002e, "xs", "?" }, { 0x0019, 0x002f, "DS", "Trigger Frequency" }, { 0x0019, 0x0030, "xs", "?" }, { 0x0019, 0x0031, "xs", "?" }, { 0x0019, 0x0032, "xs", "?" }, { 0x0019, 0x0033, "UN", "ECG 2 Offset 2" }, { 0x0019, 0x0034, "US", "?" }, { 0x0019, 0x0036, "US", "?" }, { 0x0019, 0x0038, "US", "?" }, { 0x0019, 0x0039, "xs", "?" }, { 0x0019, 0x003a, "xs", "?" }, { 0x0019, 0x003b, "LT", "?" }, { 0x0019, 0x003c, "xs", "?" }, { 0x0019, 0x003e, "xs", "?" }, { 0x0019, 0x003f, "UN", "?" }, { 0x0019, 0x0040, "xs", "?" }, { 0x0019, 0x0041, "xs", "?" }, { 0x0019, 0x0042, "xs", "?" }, { 0x0019, 0x0043, "xs", "?" }, { 0x0019, 0x0044, "xs", "?" }, { 0x0019, 0x0045, "xs", "?" }, { 0x0019, 0x0046, "xs", "?" }, { 0x0019, 0x0047, "xs", "?" }, { 0x0019, 0x0048, "xs", "?" }, { 0x0019, 0x0049, "US", "?" }, { 0x0019, 0x004a, "xs", "?" }, { 0x0019, 0x004b, "SL", "Data Size For Scan Data" }, { 0x0019, 0x004c, "US", "?" }, { 0x0019, 0x004e, "US", "?" }, { 0x0019, 0x0050, "xs", "?" }, { 0x0019, 0x0051, "xs", "?" }, { 0x0019, 0x0052, "xs", "?" }, { 0x0019, 0x0053, "LT", "Barcode" }, { 0x0019, 0x0054, "xs", "?" }, { 0x0019, 0x0055, "DS", "Receiver Reference Gain" }, { 0x0019, 0x0056, "xs", "?" }, { 0x0019, 0x0057, "SS", "CT Water Number" }, { 0x0019, 0x0058, "xs", "?" }, { 0x0019, 0x005a, "xs", "?" }, { 0x0019, 0x005c, "xs", "?" }, { 0x0019, 0x005d, "US", "?" }, { 0x0019, 0x005e, "xs", "?" }, { 0x0019, 0x005f, "SL", "Increment Between Channels" }, { 0x0019, 0x0060, "xs", "?" }, { 0x0019, 0x0061, "xs", "?" }, { 0x0019, 0x0062, "xs", "?" }, { 0x0019, 0x0063, "xs", "?" }, { 0x0019, 0x0064, "xs", "?" }, { 0x0019, 0x0065, "xs", "?" }, { 0x0019, 0x0066, "xs", "?" }, { 0x0019, 0x0067, "xs", "?" }, { 0x0019, 0x0068, "xs", "?" }, { 0x0019, 0x0069, "UL", "Convolution Mode" }, { 0x0019, 0x006a, "xs", "?" }, { 0x0019, 0x006b, "SS", "Field Of View In Detector Cells" }, { 0x0019, 0x006c, "US", "?" }, { 0x0019, 0x006e, "US", "?" }, { 0x0019, 0x0070, "xs", "?" }, { 0x0019, 0x0071, "xs", "?" }, { 0x0019, 0x0072, "xs", "?" }, { 0x0019, 0x0073, "xs", "?" }, { 0x0019, 0x0074, "xs", "?" }, { 0x0019, 0x0075, "xs", "?" }, { 0x0019, 0x0076, "xs", "?" }, { 0x0019, 0x0077, "US", "?" }, { 0x0019, 0x0078, "US", "?" }, { 0x0019, 0x007a, "US", "?" }, { 0x0019, 0x007c, "US", "?" }, { 0x0019, 0x007d, "DS", "Second Echo" }, { 0x0019, 0x007e, "xs", "?" }, { 0x0019, 0x007f, "DS", "Table Delta" }, { 0x0019, 0x0080, "xs", "?" }, { 0x0019, 0x0081, "xs", "?" }, { 0x0019, 0x0082, "xs", "?" }, { 0x0019, 0x0083, "xs", "?" }, { 0x0019, 0x0084, "xs", "?" }, { 0x0019, 0x0085, "xs", "?" }, { 0x0019, 0x0086, "xs", "?" }, { 0x0019, 0x0087, "xs", "?" }, { 0x0019, 0x0088, "xs", "?" }, { 0x0019, 0x008a, "xs", "?" }, { 0x0019, 0x008b, "SS", "Actual Receive Gain Digital" }, { 0x0019, 0x008c, "US", "?" }, { 0x0019, 0x008d, "DS", "Delay After Trigger" }, { 0x0019, 0x008e, "US", "?" }, { 0x0019, 0x008f, "SS", "Swap Phase Frequency" }, { 0x0019, 0x0090, "xs", "?" }, { 0x0019, 0x0091, "xs", "?" }, { 0x0019, 0x0092, "xs", "?" }, { 0x0019, 0x0093, "xs", "?" }, { 0x0019, 0x0094, "xs", "?" }, { 0x0019, 0x0095, "SS", "Analog Receiver Gain" }, { 0x0019, 0x0096, "xs", "?" }, { 0x0019, 0x0097, "xs", "?" }, { 0x0019, 0x0098, "xs", "?" }, { 0x0019, 0x0099, "US", "?" }, { 0x0019, 0x009a, "US", "?" }, { 0x0019, 0x009b, "SS", "Pulse Sequence Mode" }, { 0x0019, 0x009c, "xs", "?" }, { 0x0019, 0x009d, "DT", "Pulse Sequence Date" }, { 0x0019, 0x009e, "xs", "?" }, { 0x0019, 0x009f, "xs", "?" }, { 0x0019, 0x00a0, "xs", "?" }, { 0x0019, 0x00a1, "xs", "?" }, { 0x0019, 0x00a2, "xs", "?" }, { 0x0019, 0x00a3, "xs", "?" }, { 0x0019, 0x00a4, "xs", "?" }, { 0x0019, 0x00a5, "xs", "?" }, { 0x0019, 0x00a6, "xs", "?" }, { 0x0019, 0x00a7, "xs", "?" }, { 0x0019, 0x00a8, "xs", "?" }, { 0x0019, 0x00a9, "xs", "?" }, { 0x0019, 0x00aa, "xs", "?" }, { 0x0019, 0x00ab, "xs", "?" }, { 0x0019, 0x00ac, "xs", "?" }, { 0x0019, 0x00ad, "xs", "?" }, { 0x0019, 0x00ae, "xs", "?" }, { 0x0019, 0x00af, "xs", "?" }, { 0x0019, 0x00b0, "xs", "?" }, { 0x0019, 0x00b1, "xs", "?" }, { 0x0019, 0x00b2, "xs", "?" }, { 0x0019, 0x00b3, "xs", "?" }, { 0x0019, 0x00b4, "xs", "?" }, { 0x0019, 0x00b5, "xs", "?" }, { 0x0019, 0x00b6, "DS", "User Data" }, { 0x0019, 0x00b7, "DS", "User Data" }, { 0x0019, 0x00b8, "DS", "User Data" }, { 0x0019, 0x00b9, "DS", "User Data" }, { 0x0019, 0x00ba, "DS", "User Data" }, { 0x0019, 0x00bb, "DS", "User Data" }, { 0x0019, 0x00bc, "DS", "User Data" }, { 0x0019, 0x00bd, "DS", "User Data" }, { 0x0019, 0x00be, "DS", "Projection Angle" }, { 0x0019, 0x00c0, "xs", "?" }, { 0x0019, 0x00c1, "xs", "?" }, { 0x0019, 0x00c2, "xs", "?" }, { 0x0019, 0x00c3, "xs", "?" }, { 0x0019, 0x00c4, "xs", "?" }, { 0x0019, 0x00c5, "xs", "?" }, { 0x0019, 0x00c6, "SS", "SAT Location H" }, { 0x0019, 0x00c7, "SS", "SAT Location F" }, { 0x0019, 0x00c8, "SS", "SAT Thickness R L" }, { 0x0019, 0x00c9, "SS", "SAT Thickness A P" }, { 0x0019, 0x00ca, "SS", "SAT Thickness H F" }, { 0x0019, 0x00cb, "xs", "?" }, { 0x0019, 0x00cc, "xs", "?" }, { 0x0019, 0x00cd, "SS", "Thickness Disclaimer" }, { 0x0019, 0x00ce, "SS", "Prescan Type" }, { 0x0019, 0x00cf, "SS", "Prescan Status" }, { 0x0019, 0x00d0, "SH", "Raw Data Type" }, { 0x0019, 0x00d1, "DS", "Flow Sensitivity" }, { 0x0019, 0x00d2, "xs", "?" }, { 0x0019, 0x00d3, "xs", "?" }, { 0x0019, 0x00d4, "xs", "?" }, { 0x0019, 0x00d5, "xs", "?" }, { 0x0019, 0x00d6, "xs", "?" }, { 0x0019, 0x00d7, "xs", "?" }, { 0x0019, 0x00d8, "xs", "?" }, { 0x0019, 0x00d9, "xs", "?" }, { 0x0019, 0x00da, "xs", "?" }, { 0x0019, 0x00db, "DS", "Back Projector Coefficient" }, { 0x0019, 0x00dc, "SS", "Primary Speed Correction Used" }, { 0x0019, 0x00dd, "SS", "Overrange Correction Used" }, { 0x0019, 0x00de, "DS", "Dynamic Z Alpha Value" }, { 0x0019, 0x00df, "DS", "User Data" }, { 0x0019, 0x00e0, "DS", "User Data" }, { 0x0019, 0x00e1, "xs", "?" }, { 0x0019, 0x00e2, "xs", "?" }, { 0x0019, 0x00e3, "xs", "?" }, { 0x0019, 0x00e4, "LT", "?" }, { 0x0019, 0x00e5, "IS", "?" }, { 0x0019, 0x00e6, "US", "?" }, { 0x0019, 0x00e8, "DS", "?" }, { 0x0019, 0x00e9, "DS", "?" }, { 0x0019, 0x00eb, "DS", "?" }, { 0x0019, 0x00ec, "US", "?" }, { 0x0019, 0x00f0, "xs", "?" }, { 0x0019, 0x00f1, "xs", "?" }, { 0x0019, 0x00f2, "xs", "?" }, { 0x0019, 0x00f3, "xs", "?" }, { 0x0019, 0x00f4, "LT", "?" }, { 0x0019, 0x00f9, "DS", "Transmission Gain" }, { 0x0019, 0x1015, "UN", "?" }, { 0x0020, 0x0000, "UL", "Relationship Group Length" }, { 0x0020, 0x000d, "UI", "Study Instance UID" }, { 0x0020, 0x000e, "UI", "Series Instance UID" }, { 0x0020, 0x0010, "SH", "Study ID" }, { 0x0020, 0x0011, "IS", "Series Number" }, { 0x0020, 0x0012, "IS", "Acquisition Number" }, { 0x0020, 0x0013, "IS", "Instance (formerly Image) Number" }, { 0x0020, 0x0014, "IS", "Isotope Number" }, { 0x0020, 0x0015, "IS", "Phase Number" }, { 0x0020, 0x0016, "IS", "Interval Number" }, { 0x0020, 0x0017, "IS", "Time Slot Number" }, { 0x0020, 0x0018, "IS", "Angle Number" }, { 0x0020, 0x0020, "CS", "Patient Orientation" }, { 0x0020, 0x0022, "IS", "Overlay Number" }, { 0x0020, 0x0024, "IS", "Curve Number" }, { 0x0020, 0x0026, "IS", "LUT Number" }, { 0x0020, 0x0030, "DS", "Image Position" }, { 0x0020, 0x0032, "DS", "Image Position (Patient)" }, { 0x0020, 0x0035, "DS", "Image Orientation" }, { 0x0020, 0x0037, "DS", "Image Orientation (Patient)" }, { 0x0020, 0x0050, "DS", "Location" }, { 0x0020, 0x0052, "UI", "Frame of Reference UID" }, { 0x0020, 0x0060, "CS", "Laterality" }, { 0x0020, 0x0062, "CS", "Image Laterality" }, { 0x0020, 0x0070, "LT", "Image Geometry Type" }, { 0x0020, 0x0080, "LO", "Masking Image" }, { 0x0020, 0x0100, "IS", "Temporal Position Identifier" }, { 0x0020, 0x0105, "IS", "Number of Temporal Positions" }, { 0x0020, 0x0110, "DS", "Temporal Resolution" }, { 0x0020, 0x1000, "IS", "Series in Study" }, { 0x0020, 0x1001, "DS", "Acquisitions in Series" }, { 0x0020, 0x1002, "IS", "Images in Acquisition" }, { 0x0020, 0x1003, "IS", "Images in Series" }, { 0x0020, 0x1004, "IS", "Acquisitions in Study" }, { 0x0020, 0x1005, "IS", "Images in Study" }, { 0x0020, 0x1020, "LO", "Reference" }, { 0x0020, 0x1040, "LO", "Position Reference Indicator" }, { 0x0020, 0x1041, "DS", "Slice Location" }, { 0x0020, 0x1070, "IS", "Other Study Numbers" }, { 0x0020, 0x1200, "IS", "Number of Patient Related Studies" }, { 0x0020, 0x1202, "IS", "Number of Patient Related Series" }, { 0x0020, 0x1204, "IS", "Number of Patient Related Images" }, { 0x0020, 0x1206, "IS", "Number of Study Related Series" }, { 0x0020, 0x1208, "IS", "Number of Study Related Series" }, { 0x0020, 0x3100, "LO", "Source Image IDs" }, { 0x0020, 0x3401, "LO", "Modifying Device ID" }, { 0x0020, 0x3402, "LO", "Modified Image ID" }, { 0x0020, 0x3403, "xs", "Modified Image Date" }, { 0x0020, 0x3404, "LO", "Modifying Device Manufacturer" }, { 0x0020, 0x3405, "xs", "Modified Image Time" }, { 0x0020, 0x3406, "xs", "Modified Image Description" }, { 0x0020, 0x4000, "LT", "Image Comments" }, { 0x0020, 0x5000, "AT", "Original Image Identification" }, { 0x0020, 0x5002, "LO", "Original Image Identification Nomenclature" }, { 0x0021, 0x0000, "xs", "?" }, { 0x0021, 0x0001, "xs", "?" }, { 0x0021, 0x0002, "xs", "?" }, { 0x0021, 0x0003, "xs", "?" }, { 0x0021, 0x0004, "DS", "VOI Position" }, { 0x0021, 0x0005, "xs", "?" }, { 0x0021, 0x0006, "IS", "CSI Matrix Size Original" }, { 0x0021, 0x0007, "xs", "?" }, { 0x0021, 0x0008, "DS", "Spatial Grid Shift" }, { 0x0021, 0x0009, "DS", "Signal Limits Minimum" }, { 0x0021, 0x0010, "xs", "?" }, { 0x0021, 0x0011, "xs", "?" }, { 0x0021, 0x0012, "xs", "?" }, { 0x0021, 0x0013, "xs", "?" }, { 0x0021, 0x0014, "xs", "?" }, { 0x0021, 0x0015, "xs", "?" }, { 0x0021, 0x0016, "xs", "?" }, { 0x0021, 0x0017, "DS", "EPI Operation Mode Flag" }, { 0x0021, 0x0018, "xs", "?" }, { 0x0021, 0x0019, "xs", "?" }, { 0x0021, 0x0020, "xs", "?" }, { 0x0021, 0x0021, "xs", "?" }, { 0x0021, 0x0022, "xs", "?" }, { 0x0021, 0x0024, "xs", "?" }, { 0x0021, 0x0025, "US", "?" }, { 0x0021, 0x0026, "IS", "Image Pixel Offset" }, { 0x0021, 0x0030, "xs", "?" }, { 0x0021, 0x0031, "xs", "?" }, { 0x0021, 0x0032, "xs", "?" }, { 0x0021, 0x0034, "xs", "?" }, { 0x0021, 0x0035, "SS", "Series From Which Prescribed" }, { 0x0021, 0x0036, "xs", "?" }, { 0x0021, 0x0037, "SS", "Screen Format" }, { 0x0021, 0x0039, "DS", "Slab Thickness" }, { 0x0021, 0x0040, "xs", "?" }, { 0x0021, 0x0041, "xs", "?" }, { 0x0021, 0x0042, "xs", "?" }, { 0x0021, 0x0043, "xs", "?" }, { 0x0021, 0x0044, "xs", "?" }, { 0x0021, 0x0045, "xs", "?" }, { 0x0021, 0x0046, "xs", "?" }, { 0x0021, 0x0047, "xs", "?" }, { 0x0021, 0x0048, "xs", "?" }, { 0x0021, 0x0049, "xs", "?" }, { 0x0021, 0x004a, "xs", "?" }, { 0x0021, 0x004e, "US", "?" }, { 0x0021, 0x004f, "xs", "?" }, { 0x0021, 0x0050, "xs", "?" }, { 0x0021, 0x0051, "xs", "?" }, { 0x0021, 0x0052, "xs", "?" }, { 0x0021, 0x0053, "xs", "?" }, { 0x0021, 0x0054, "xs", "?" }, { 0x0021, 0x0055, "xs", "?" }, { 0x0021, 0x0056, "xs", "?" }, { 0x0021, 0x0057, "xs", "?" }, { 0x0021, 0x0058, "xs", "?" }, { 0x0021, 0x0059, "xs", "?" }, { 0x0021, 0x005a, "SL", "Integer Slop" }, { 0x0021, 0x005b, "DS", "Float Slop" }, { 0x0021, 0x005c, "DS", "Float Slop" }, { 0x0021, 0x005d, "DS", "Float Slop" }, { 0x0021, 0x005e, "DS", "Float Slop" }, { 0x0021, 0x005f, "DS", "Float Slop" }, { 0x0021, 0x0060, "xs", "?" }, { 0x0021, 0x0061, "DS", "Image Normal" }, { 0x0021, 0x0062, "IS", "Reference Type Code" }, { 0x0021, 0x0063, "DS", "Image Distance" }, { 0x0021, 0x0065, "US", "Image Positioning History Mask" }, { 0x0021, 0x006a, "DS", "Image Row" }, { 0x0021, 0x006b, "DS", "Image Column" }, { 0x0021, 0x0070, "xs", "?" }, { 0x0021, 0x0071, "xs", "?" }, { 0x0021, 0x0072, "xs", "?" }, { 0x0021, 0x0073, "DS", "Second Repetition Time" }, { 0x0021, 0x0075, "DS", "Light Brightness" }, { 0x0021, 0x0076, "DS", "Light Contrast" }, { 0x0021, 0x007a, "IS", "Overlay Threshold" }, { 0x0021, 0x007b, "IS", "Surface Threshold" }, { 0x0021, 0x007c, "IS", "Grey Scale Threshold" }, { 0x0021, 0x0080, "xs", "?" }, { 0x0021, 0x0081, "DS", "Auto Window Level Alpha" }, { 0x0021, 0x0082, "xs", "?" }, { 0x0021, 0x0083, "DS", "Auto Window Level Window" }, { 0x0021, 0x0084, "DS", "Auto Window Level Level" }, { 0x0021, 0x0090, "xs", "?" }, { 0x0021, 0x0091, "xs", "?" }, { 0x0021, 0x0092, "xs", "?" }, { 0x0021, 0x0093, "xs", "?" }, { 0x0021, 0x0094, "DS", "EPI Change Value of X Component" }, { 0x0021, 0x0095, "DS", "EPI Change Value of Y Component" }, { 0x0021, 0x0096, "DS", "EPI Change Value of Z Component" }, { 0x0021, 0x00a0, "xs", "?" }, { 0x0021, 0x00a1, "DS", "?" }, { 0x0021, 0x00a2, "xs", "?" }, { 0x0021, 0x00a3, "LT", "?" }, { 0x0021, 0x00a4, "LT", "?" }, { 0x0021, 0x00a7, "LT", "?" }, { 0x0021, 0x00b0, "IS", "?" }, { 0x0021, 0x00c0, "IS", "?" }, { 0x0023, 0x0000, "xs", "?" }, { 0x0023, 0x0001, "SL", "Number Of Series In Study" }, { 0x0023, 0x0002, "SL", "Number Of Unarchived Series" }, { 0x0023, 0x0010, "xs", "?" }, { 0x0023, 0x0020, "xs", "?" }, { 0x0023, 0x0030, "xs", "?" }, { 0x0023, 0x0040, "xs", "?" }, { 0x0023, 0x0050, "xs", "?" }, { 0x0023, 0x0060, "xs", "?" }, { 0x0023, 0x0070, "xs", "?" }, { 0x0023, 0x0074, "SL", "Number Of Updates To Info" }, { 0x0023, 0x007d, "SS", "Indicates If Study Has Complete Info" }, { 0x0023, 0x0080, "xs", "?" }, { 0x0023, 0x0090, "xs", "?" }, { 0x0023, 0x00ff, "US", "?" }, { 0x0025, 0x0000, "UL", "Group Length" }, { 0x0025, 0x0006, "SS", "Last Pulse Sequence Used" }, { 0x0025, 0x0007, "SL", "Images In Series" }, { 0x0025, 0x0010, "SS", "Landmark Counter" }, { 0x0025, 0x0011, "SS", "Number Of Acquisitions" }, { 0x0025, 0x0014, "SL", "Indicates Number Of Updates To Info" }, { 0x0025, 0x0017, "SL", "Series Complete Flag" }, { 0x0025, 0x0018, "SL", "Number Of Images Archived" }, { 0x0025, 0x0019, "SL", "Last Image Number Used" }, { 0x0025, 0x001a, "SH", "Primary Receiver Suite And Host" }, { 0x0027, 0x0000, "US", "?" }, { 0x0027, 0x0006, "SL", "Image Archive Flag" }, { 0x0027, 0x0010, "SS", "Scout Type" }, { 0x0027, 0x0011, "UN", "?" }, { 0x0027, 0x0012, "IS", "?" }, { 0x0027, 0x0013, "IS", "?" }, { 0x0027, 0x0014, "IS", "?" }, { 0x0027, 0x0015, "IS", "?" }, { 0x0027, 0x0016, "LT", "?" }, { 0x0027, 0x001c, "SL", "Vma Mamp" }, { 0x0027, 0x001d, "SS", "Vma Phase" }, { 0x0027, 0x001e, "SL", "Vma Mod" }, { 0x0027, 0x001f, "SL", "Vma Clip" }, { 0x0027, 0x0020, "SS", "Smart Scan On Off Flag" }, { 0x0027, 0x0030, "SH", "Foreign Image Revision" }, { 0x0027, 0x0031, "SS", "Imaging Mode" }, { 0x0027, 0x0032, "SS", "Pulse Sequence" }, { 0x0027, 0x0033, "SL", "Imaging Options" }, { 0x0027, 0x0035, "SS", "Plane Type" }, { 0x0027, 0x0036, "SL", "Oblique Plane" }, { 0x0027, 0x0040, "SH", "RAS Letter Of Image Location" }, { 0x0027, 0x0041, "FL", "Image Location" }, { 0x0027, 0x0042, "FL", "Center R Coord Of Plane Image" }, { 0x0027, 0x0043, "FL", "Center A Coord Of Plane Image" }, { 0x0027, 0x0044, "FL", "Center S Coord Of Plane Image" }, { 0x0027, 0x0045, "FL", "Normal R Coord" }, { 0x0027, 0x0046, "FL", "Normal A Coord" }, { 0x0027, 0x0047, "FL", "Normal S Coord" }, { 0x0027, 0x0048, "FL", "R Coord Of Top Right Corner" }, { 0x0027, 0x0049, "FL", "A Coord Of Top Right Corner" }, { 0x0027, 0x004a, "FL", "S Coord Of Top Right Corner" }, { 0x0027, 0x004b, "FL", "R Coord Of Bottom Right Corner" }, { 0x0027, 0x004c, "FL", "A Coord Of Bottom Right Corner" }, { 0x0027, 0x004d, "FL", "S Coord Of Bottom Right Corner" }, { 0x0027, 0x0050, "FL", "Table Start Location" }, { 0x0027, 0x0051, "FL", "Table End Location" }, { 0x0027, 0x0052, "SH", "RAS Letter For Side Of Image" }, { 0x0027, 0x0053, "SH", "RAS Letter For Anterior Posterior" }, { 0x0027, 0x0054, "SH", "RAS Letter For Scout Start Loc" }, { 0x0027, 0x0055, "SH", "RAS Letter For Scout End Loc" }, { 0x0027, 0x0060, "FL", "Image Dimension X" }, { 0x0027, 0x0061, "FL", "Image Dimension Y" }, { 0x0027, 0x0062, "FL", "Number Of Excitations" }, { 0x0028, 0x0000, "UL", "Image Presentation Group Length" }, { 0x0028, 0x0002, "US", "Samples per Pixel" }, { 0x0028, 0x0004, "CS", "Photometric Interpretation" }, { 0x0028, 0x0005, "US", "Image Dimensions" }, { 0x0028, 0x0006, "US", "Planar Configuration" }, { 0x0028, 0x0008, "IS", "Number of Frames" }, { 0x0028, 0x0009, "AT", "Frame Increment Pointer" }, { 0x0028, 0x0010, "US", "Rows" }, { 0x0028, 0x0011, "US", "Columns" }, { 0x0028, 0x0012, "US", "Planes" }, { 0x0028, 0x0014, "US", "Ultrasound Color Data Present" }, { 0x0028, 0x0030, "DS", "Pixel Spacing" }, { 0x0028, 0x0031, "DS", "Zoom Factor" }, { 0x0028, 0x0032, "DS", "Zoom Center" }, { 0x0028, 0x0034, "IS", "Pixel Aspect Ratio" }, { 0x0028, 0x0040, "LO", "Image Format" }, { 0x0028, 0x0050, "LT", "Manipulated Image" }, { 0x0028, 0x0051, "CS", "Corrected Image" }, { 0x0028, 0x005f, "LO", "Compression Recognition Code" }, { 0x0028, 0x0060, "LO", "Compression Code" }, { 0x0028, 0x0061, "SH", "Compression Originator" }, { 0x0028, 0x0062, "SH", "Compression Label" }, { 0x0028, 0x0063, "SH", "Compression Description" }, { 0x0028, 0x0065, "LO", "Compression Sequence" }, { 0x0028, 0x0066, "AT", "Compression Step Pointers" }, { 0x0028, 0x0068, "US", "Repeat Interval" }, { 0x0028, 0x0069, "US", "Bits Grouped" }, { 0x0028, 0x0070, "US", "Perimeter Table" }, { 0x0028, 0x0071, "xs", "Perimeter Value" }, { 0x0028, 0x0080, "US", "Predictor Rows" }, { 0x0028, 0x0081, "US", "Predictor Columns" }, { 0x0028, 0x0082, "US", "Predictor Constants" }, { 0x0028, 0x0090, "LO", "Blocked Pixels" }, { 0x0028, 0x0091, "US", "Block Rows" }, { 0x0028, 0x0092, "US", "Block Columns" }, { 0x0028, 0x0093, "US", "Row Overlap" }, { 0x0028, 0x0094, "US", "Column Overlap" }, { 0x0028, 0x0100, "US", "Bits Allocated" }, { 0x0028, 0x0101, "US", "Bits Stored" }, { 0x0028, 0x0102, "US", "High Bit" }, { 0x0028, 0x0103, "US", "Pixel Representation" }, { 0x0028, 0x0104, "xs", "Smallest Valid Pixel Value" }, { 0x0028, 0x0105, "xs", "Largest Valid Pixel Value" }, { 0x0028, 0x0106, "xs", "Smallest Image Pixel Value" }, { 0x0028, 0x0107, "xs", "Largest Image Pixel Value" }, { 0x0028, 0x0108, "xs", "Smallest Pixel Value in Series" }, { 0x0028, 0x0109, "xs", "Largest Pixel Value in Series" }, { 0x0028, 0x0110, "xs", "Smallest Pixel Value in Plane" }, { 0x0028, 0x0111, "xs", "Largest Pixel Value in Plane" }, { 0x0028, 0x0120, "xs", "Pixel Padding Value" }, { 0x0028, 0x0200, "xs", "Image Location" }, { 0x0028, 0x0300, "CS", "Quality Control Image" }, { 0x0028, 0x0301, "CS", "Burned In Annotation" }, { 0x0028, 0x0400, "xs", "?" }, { 0x0028, 0x0401, "xs", "?" }, { 0x0028, 0x0402, "xs", "?" }, { 0x0028, 0x0403, "xs", "?" }, { 0x0028, 0x0404, "AT", "Details of Coefficients" }, { 0x0028, 0x0700, "LO", "DCT Label" }, { 0x0028, 0x0701, "LO", "Data Block Description" }, { 0x0028, 0x0702, "AT", "Data Block" }, { 0x0028, 0x0710, "US", "Normalization Factor Format" }, { 0x0028, 0x0720, "US", "Zonal Map Number Format" }, { 0x0028, 0x0721, "AT", "Zonal Map Location" }, { 0x0028, 0x0722, "US", "Zonal Map Format" }, { 0x0028, 0x0730, "US", "Adaptive Map Format" }, { 0x0028, 0x0740, "US", "Code Number Format" }, { 0x0028, 0x0800, "LO", "Code Label" }, { 0x0028, 0x0802, "US", "Number of Tables" }, { 0x0028, 0x0803, "AT", "Code Table Location" }, { 0x0028, 0x0804, "US", "Bits For Code Word" }, { 0x0028, 0x0808, "AT", "Image Data Location" }, { 0x0028, 0x1040, "CS", "Pixel Intensity Relationship" }, { 0x0028, 0x1041, "SS", "Pixel Intensity Relationship Sign" }, { 0x0028, 0x1050, "DS", "Window Center" }, { 0x0028, 0x1051, "DS", "Window Width" }, { 0x0028, 0x1052, "DS", "Rescale Intercept" }, { 0x0028, 0x1053, "DS", "Rescale Slope" }, { 0x0028, 0x1054, "LO", "Rescale Type" }, { 0x0028, 0x1055, "LO", "Window Center & Width Explanation" }, { 0x0028, 0x1080, "LO", "Gray Scale" }, { 0x0028, 0x1090, "CS", "Recommended Viewing Mode" }, { 0x0028, 0x1100, "xs", "Gray Lookup Table Descriptor" }, { 0x0028, 0x1101, "xs", "Red Palette Color Lookup Table Descriptor" }, { 0x0028, 0x1102, "xs", "Green Palette Color Lookup Table Descriptor" }, { 0x0028, 0x1103, "xs", "Blue Palette Color Lookup Table Descriptor" }, { 0x0028, 0x1111, "OW", "Large Red Palette Color Lookup Table Descriptor" }, { 0x0028, 0x1112, "OW", "Large Green Palette Color Lookup Table Descriptor" }, { 0x0028, 0x1113, "OW", "Large Blue Palette Color Lookup Table Descriptor" }, { 0x0028, 0x1199, "UI", "Palette Color Lookup Table UID" }, { 0x0028, 0x1200, "xs", "Gray Lookup Table Data" }, { 0x0028, 0x1201, "OW", "Red Palette Color Lookup Table Data" }, { 0x0028, 0x1202, "OW", "Green Palette Color Lookup Table Data" }, { 0x0028, 0x1203, "OW", "Blue Palette Color Lookup Table Data" }, { 0x0028, 0x1211, "OW", "Large Red Palette Color Lookup Table Data" }, { 0x0028, 0x1212, "OW", "Large Green Palette Color Lookup Table Data" }, { 0x0028, 0x1213, "OW", "Large Blue Palette Color Lookup Table Data" }, { 0x0028, 0x1214, "UI", "Large Palette Color Lookup Table UID" }, { 0x0028, 0x1221, "OW", "Segmented Red Palette Color Lookup Table Data" }, { 0x0028, 0x1222, "OW", "Segmented Green Palette Color Lookup Table Data" }, { 0x0028, 0x1223, "OW", "Segmented Blue Palette Color Lookup Table Data" }, { 0x0028, 0x1300, "CS", "Implant Present" }, { 0x0028, 0x2110, "CS", "Lossy Image Compression" }, { 0x0028, 0x2112, "DS", "Lossy Image Compression Ratio" }, { 0x0028, 0x3000, "SQ", "Modality LUT Sequence" }, { 0x0028, 0x3002, "US", "LUT Descriptor" }, { 0x0028, 0x3003, "LO", "LUT Explanation" }, { 0x0028, 0x3004, "LO", "Modality LUT Type" }, { 0x0028, 0x3006, "US", "LUT Data" }, { 0x0028, 0x3010, "xs", "VOI LUT Sequence" }, { 0x0028, 0x4000, "LT", "Image Presentation Comments" }, { 0x0028, 0x5000, "SQ", "Biplane Acquisition Sequence" }, { 0x0028, 0x6010, "US", "Representative Frame Number" }, { 0x0028, 0x6020, "US", "Frame Numbers of Interest" }, { 0x0028, 0x6022, "LO", "Frame of Interest Description" }, { 0x0028, 0x6030, "US", "Mask Pointer" }, { 0x0028, 0x6040, "US", "R Wave Pointer" }, { 0x0028, 0x6100, "SQ", "Mask Subtraction Sequence" }, { 0x0028, 0x6101, "CS", "Mask Operation" }, { 0x0028, 0x6102, "US", "Applicable Frame Range" }, { 0x0028, 0x6110, "US", "Mask Frame Numbers" }, { 0x0028, 0x6112, "US", "Contrast Frame Averaging" }, { 0x0028, 0x6114, "FL", "Mask Sub-Pixel Shift" }, { 0x0028, 0x6120, "SS", "TID Offset" }, { 0x0028, 0x6190, "ST", "Mask Operation Explanation" }, { 0x0029, 0x0000, "xs", "?" }, { 0x0029, 0x0001, "xs", "?" }, { 0x0029, 0x0002, "xs", "?" }, { 0x0029, 0x0003, "xs", "?" }, { 0x0029, 0x0004, "xs", "?" }, { 0x0029, 0x0005, "xs", "?" }, { 0x0029, 0x0006, "xs", "?" }, { 0x0029, 0x0007, "SL", "Lower Range Of Pixels" }, { 0x0029, 0x0008, "SH", "Lower Range Of Pixels" }, { 0x0029, 0x0009, "SH", "Lower Range Of Pixels" }, { 0x0029, 0x000a, "SS", "Lower Range Of Pixels" }, { 0x0029, 0x000c, "xs", "?" }, { 0x0029, 0x000e, "CS", "Zoom Enable Status" }, { 0x0029, 0x000f, "CS", "Zoom Select Status" }, { 0x0029, 0x0010, "xs", "?" }, { 0x0029, 0x0011, "xs", "?" }, { 0x0029, 0x0013, "LT", "?" }, { 0x0029, 0x0015, "xs", "?" }, { 0x0029, 0x0016, "SL", "Lower Range Of Pixels" }, { 0x0029, 0x0017, "SL", "Lower Range Of Pixels" }, { 0x0029, 0x0018, "SL", "Upper Range Of Pixels" }, { 0x0029, 0x001a, "SL", "Length Of Total Info In Bytes" }, { 0x0029, 0x001e, "xs", "?" }, { 0x0029, 0x001f, "xs", "?" }, { 0x0029, 0x0020, "xs", "?" }, { 0x0029, 0x0022, "IS", "Pixel Quality Value" }, { 0x0029, 0x0025, "LT", "Processed Pixel Data Quality" }, { 0x0029, 0x0026, "SS", "Version Of Info Structure" }, { 0x0029, 0x0030, "xs", "?" }, { 0x0029, 0x0031, "xs", "?" }, { 0x0029, 0x0032, "xs", "?" }, { 0x0029, 0x0033, "xs", "?" }, { 0x0029, 0x0034, "xs", "?" }, { 0x0029, 0x0035, "SL", "Advantage Comp Underflow" }, { 0x0029, 0x0038, "US", "?" }, { 0x0029, 0x0040, "xs", "?" }, { 0x0029, 0x0041, "DS", "Magnifying Glass Rectangle" }, { 0x0029, 0x0043, "DS", "Magnifying Glass Factor" }, { 0x0029, 0x0044, "US", "Magnifying Glass Function" }, { 0x0029, 0x004e, "CS", "Magnifying Glass Enable Status" }, { 0x0029, 0x004f, "CS", "Magnifying Glass Select Status" }, { 0x0029, 0x0050, "xs", "?" }, { 0x0029, 0x0051, "LT", "Exposure Code" }, { 0x0029, 0x0052, "LT", "Sort Code" }, { 0x0029, 0x0053, "LT", "?" }, { 0x0029, 0x0060, "xs", "?" }, { 0x0029, 0x0061, "xs", "?" }, { 0x0029, 0x0067, "LT", "?" }, { 0x0029, 0x0070, "xs", "?" }, { 0x0029, 0x0071, "xs", "?" }, { 0x0029, 0x0072, "xs", "?" }, { 0x0029, 0x0077, "CS", "Window Select Status" }, { 0x0029, 0x0078, "LT", "ECG Display Printing ID" }, { 0x0029, 0x0079, "CS", "ECG Display Printing" }, { 0x0029, 0x007e, "CS", "ECG Display Printing Enable Status" }, { 0x0029, 0x007f, "CS", "ECG Display Printing Select Status" }, { 0x0029, 0x0080, "xs", "?" }, { 0x0029, 0x0081, "xs", "?" }, { 0x0029, 0x0082, "IS", "View Zoom" }, { 0x0029, 0x0083, "IS", "View Transform" }, { 0x0029, 0x008e, "CS", "Physiological Display Enable Status" }, { 0x0029, 0x008f, "CS", "Physiological Display Select Status" }, { 0x0029, 0x0090, "IS", "?" }, { 0x0029, 0x0099, "LT", "Shutter Type" }, { 0x0029, 0x00a0, "US", "Rows of Rectangular Shutter" }, { 0x0029, 0x00a1, "US", "Columns of Rectangular Shutter" }, { 0x0029, 0x00a2, "US", "Origin of Rectangular Shutter" }, { 0x0029, 0x00b0, "US", "Radius of Circular Shutter" }, { 0x0029, 0x00b2, "US", "Origin of Circular Shutter" }, { 0x0029, 0x00c0, "LT", "Functional Shutter ID" }, { 0x0029, 0x00c1, "xs", "?" }, { 0x0029, 0x00c3, "IS", "Scan Resolution" }, { 0x0029, 0x00c4, "IS", "Field of View" }, { 0x0029, 0x00c5, "LT", "Field Of Shutter Rectangle" }, { 0x0029, 0x00ce, "CS", "Shutter Enable Status" }, { 0x0029, 0x00cf, "CS", "Shutter Select Status" }, { 0x0029, 0x00d0, "IS", "?" }, { 0x0029, 0x00d1, "IS", "?" }, { 0x0029, 0x00d5, "LT", "Slice Thickness" }, { 0x0031, 0x0010, "LT", "Request UID" }, { 0x0031, 0x0012, "LT", "Examination Reason" }, { 0x0031, 0x0030, "DA", "Requested Date" }, { 0x0031, 0x0032, "TM", "Worklist Request Start Time" }, { 0x0031, 0x0033, "TM", "Worklist Request End Time" }, { 0x0031, 0x0045, "LT", "Requesting Physician" }, { 0x0031, 0x004a, "TM", "Requested Time" }, { 0x0031, 0x0050, "LT", "Requested Physician" }, { 0x0031, 0x0080, "LT", "Requested Location" }, { 0x0032, 0x0000, "UL", "Study Group Length" }, { 0x0032, 0x000a, "CS", "Study Status ID" }, { 0x0032, 0x000c, "CS", "Study Priority ID" }, { 0x0032, 0x0012, "LO", "Study ID Issuer" }, { 0x0032, 0x0032, "DA", "Study Verified Date" }, { 0x0032, 0x0033, "TM", "Study Verified Time" }, { 0x0032, 0x0034, "DA", "Study Read Date" }, { 0x0032, 0x0035, "TM", "Study Read Time" }, { 0x0032, 0x1000, "DA", "Scheduled Study Start Date" }, { 0x0032, 0x1001, "TM", "Scheduled Study Start Time" }, { 0x0032, 0x1010, "DA", "Scheduled Study Stop Date" }, { 0x0032, 0x1011, "TM", "Scheduled Study Stop Time" }, { 0x0032, 0x1020, "LO", "Scheduled Study Location" }, { 0x0032, 0x1021, "AE", "Scheduled Study Location AE Title(s)" }, { 0x0032, 0x1030, "LO", "Reason for Study" }, { 0x0032, 0x1032, "PN", "Requesting Physician" }, { 0x0032, 0x1033, "LO", "Requesting Service" }, { 0x0032, 0x1040, "DA", "Study Arrival Date" }, { 0x0032, 0x1041, "TM", "Study Arrival Time" }, { 0x0032, 0x1050, "DA", "Study Completion Date" }, { 0x0032, 0x1051, "TM", "Study Completion Time" }, { 0x0032, 0x1055, "CS", "Study Component Status ID" }, { 0x0032, 0x1060, "LO", "Requested Procedure Description" }, { 0x0032, 0x1064, "SQ", "Requested Procedure Code Sequence" }, { 0x0032, 0x1070, "LO", "Requested Contrast Agent" }, { 0x0032, 0x4000, "LT", "Study Comments" }, { 0x0033, 0x0001, "UN", "?" }, { 0x0033, 0x0002, "UN", "?" }, { 0x0033, 0x0005, "UN", "?" }, { 0x0033, 0x0006, "UN", "?" }, { 0x0033, 0x0010, "LT", "Patient Study UID" }, { 0x0037, 0x0010, "LO", "ReferringDepartment" }, { 0x0037, 0x0020, "US", "ScreenNumber" }, { 0x0037, 0x0040, "SH", "LeftOrientation" }, { 0x0037, 0x0042, "SH", "RightOrientation" }, { 0x0037, 0x0050, "CS", "Inversion" }, { 0x0037, 0x0060, "US", "DSA" }, { 0x0038, 0x0000, "UL", "Visit Group Length" }, { 0x0038, 0x0004, "SQ", "Referenced Patient Alias Sequence" }, { 0x0038, 0x0008, "CS", "Visit Status ID" }, { 0x0038, 0x0010, "LO", "Admission ID" }, { 0x0038, 0x0011, "LO", "Issuer of Admission ID" }, { 0x0038, 0x0016, "LO", "Route of Admissions" }, { 0x0038, 0x001a, "DA", "Scheduled Admission Date" }, { 0x0038, 0x001b, "TM", "Scheduled Admission Time" }, { 0x0038, 0x001c, "DA", "Scheduled Discharge Date" }, { 0x0038, 0x001d, "TM", "Scheduled Discharge Time" }, { 0x0038, 0x001e, "LO", "Scheduled Patient Institution Residence" }, { 0x0038, 0x0020, "DA", "Admitting Date" }, { 0x0038, 0x0021, "TM", "Admitting Time" }, { 0x0038, 0x0030, "DA", "Discharge Date" }, { 0x0038, 0x0032, "TM", "Discharge Time" }, { 0x0038, 0x0040, "LO", "Discharge Diagnosis Description" }, { 0x0038, 0x0044, "SQ", "Discharge Diagnosis Code Sequence" }, { 0x0038, 0x0050, "LO", "Special Needs" }, { 0x0038, 0x0300, "LO", "Current Patient Location" }, { 0x0038, 0x0400, "LO", "Patient's Institution Residence" }, { 0x0038, 0x0500, "LO", "Patient State" }, { 0x0038, 0x4000, "LT", "Visit Comments" }, { 0x0039, 0x0080, "IS", "Private Entity Number" }, { 0x0039, 0x0085, "DA", "Private Entity Date" }, { 0x0039, 0x0090, "TM", "Private Entity Time" }, { 0x0039, 0x0095, "LO", "Private Entity Launch Command" }, { 0x0039, 0x00aa, "CS", "Private Entity Type" }, { 0x003a, 0x0002, "SQ", "Waveform Sequence" }, { 0x003a, 0x0005, "US", "Waveform Number of Channels" }, { 0x003a, 0x0010, "UL", "Waveform Number of Samples" }, { 0x003a, 0x001a, "DS", "Sampling Frequency" }, { 0x003a, 0x0020, "SH", "Group Label" }, { 0x003a, 0x0103, "CS", "Waveform Sample Value Representation" }, { 0x003a, 0x0122, "OB", "Waveform Padding Value" }, { 0x003a, 0x0200, "SQ", "Channel Definition" }, { 0x003a, 0x0202, "IS", "Waveform Channel Number" }, { 0x003a, 0x0203, "SH", "Channel Label" }, { 0x003a, 0x0205, "CS", "Channel Status" }, { 0x003a, 0x0208, "SQ", "Channel Source" }, { 0x003a, 0x0209, "SQ", "Channel Source Modifiers" }, { 0x003a, 0x020a, "SQ", "Differential Channel Source" }, { 0x003a, 0x020b, "SQ", "Differential Channel Source Modifiers" }, { 0x003a, 0x0210, "DS", "Channel Sensitivity" }, { 0x003a, 0x0211, "SQ", "Channel Sensitivity Units" }, { 0x003a, 0x0212, "DS", "Channel Sensitivity Correction Factor" }, { 0x003a, 0x0213, "DS", "Channel Baseline" }, { 0x003a, 0x0214, "DS", "Channel Time Skew" }, { 0x003a, 0x0215, "DS", "Channel Sample Skew" }, { 0x003a, 0x0216, "OB", "Channel Minimum Value" }, { 0x003a, 0x0217, "OB", "Channel Maximum Value" }, { 0x003a, 0x0218, "DS", "Channel Offset" }, { 0x003a, 0x021a, "US", "Bits Per Sample" }, { 0x003a, 0x0220, "DS", "Filter Low Frequency" }, { 0x003a, 0x0221, "DS", "Filter High Frequency" }, { 0x003a, 0x0222, "DS", "Notch Filter Frequency" }, { 0x003a, 0x0223, "DS", "Notch Filter Bandwidth" }, { 0x003a, 0x1000, "OB", "Waveform Data" }, { 0x0040, 0x0001, "AE", "Scheduled Station AE Title" }, { 0x0040, 0x0002, "DA", "Scheduled Procedure Step Start Date" }, { 0x0040, 0x0003, "TM", "Scheduled Procedure Step Start Time" }, { 0x0040, 0x0004, "DA", "Scheduled Procedure Step End Date" }, { 0x0040, 0x0005, "TM", "Scheduled Procedure Step End Time" }, { 0x0040, 0x0006, "PN", "Scheduled Performing Physician Name" }, { 0x0040, 0x0007, "LO", "Scheduled Procedure Step Description" }, { 0x0040, 0x0008, "SQ", "Scheduled Action Item Code Sequence" }, { 0x0040, 0x0009, "SH", "Scheduled Procedure Step ID" }, { 0x0040, 0x0010, "SH", "Scheduled Station Name" }, { 0x0040, 0x0011, "SH", "Scheduled Procedure Step Location" }, { 0x0040, 0x0012, "LO", "Pre-Medication" }, { 0x0040, 0x0020, "CS", "Scheduled Procedure Step Status" }, { 0x0040, 0x0100, "SQ", "Scheduled Procedure Step Sequence" }, { 0x0040, 0x0302, "US", "Entrance Dose" }, { 0x0040, 0x0303, "US", "Exposed Area" }, { 0x0040, 0x0306, "DS", "Distance Source to Entrance" }, { 0x0040, 0x0307, "DS", "Distance Source to Support" }, { 0x0040, 0x0310, "ST", "Comments On Radiation Dose" }, { 0x0040, 0x0312, "DS", "X-Ray Output" }, { 0x0040, 0x0314, "DS", "Half Value Layer" }, { 0x0040, 0x0316, "DS", "Organ Dose" }, { 0x0040, 0x0318, "CS", "Organ Exposed" }, { 0x0040, 0x0400, "LT", "Comments On Scheduled Procedure Step" }, { 0x0040, 0x050a, "LO", "Specimen Accession Number" }, { 0x0040, 0x0550, "SQ", "Specimen Sequence" }, { 0x0040, 0x0551, "LO", "Specimen Identifier" }, { 0x0040, 0x0552, "SQ", "Specimen Description Sequence" }, { 0x0040, 0x0553, "ST", "Specimen Description" }, { 0x0040, 0x0555, "SQ", "Acquisition Context Sequence" }, { 0x0040, 0x0556, "ST", "Acquisition Context Description" }, { 0x0040, 0x059a, "SQ", "Specimen Type Code Sequence" }, { 0x0040, 0x06fa, "LO", "Slide Identifier" }, { 0x0040, 0x071a, "SQ", "Image Center Point Coordinates Sequence" }, { 0x0040, 0x072a, "DS", "X Offset In Slide Coordinate System" }, { 0x0040, 0x073a, "DS", "Y Offset In Slide Coordinate System" }, { 0x0040, 0x074a, "DS", "Z Offset In Slide Coordinate System" }, { 0x0040, 0x08d8, "SQ", "Pixel Spacing Sequence" }, { 0x0040, 0x08da, "SQ", "Coordinate System Axis Code Sequence" }, { 0x0040, 0x08ea, "SQ", "Measurement Units Code Sequence" }, { 0x0040, 0x09f8, "SQ", "Vital Stain Code Sequence" }, { 0x0040, 0x1001, "SH", "Requested Procedure ID" }, { 0x0040, 0x1002, "LO", "Reason For Requested Procedure" }, { 0x0040, 0x1003, "SH", "Requested Procedure Priority" }, { 0x0040, 0x1004, "LO", "Patient Transport Arrangements" }, { 0x0040, 0x1005, "LO", "Requested Procedure Location" }, { 0x0040, 0x1006, "SH", "Placer Order Number of Procedure" }, { 0x0040, 0x1007, "SH", "Filler Order Number of Procedure" }, { 0x0040, 0x1008, "LO", "Confidentiality Code" }, { 0x0040, 0x1009, "SH", "Reporting Priority" }, { 0x0040, 0x1010, "PN", "Names of Intended Recipients of Results" }, { 0x0040, 0x1400, "LT", "Requested Procedure Comments" }, { 0x0040, 0x2001, "LO", "Reason For Imaging Service Request" }, { 0x0040, 0x2004, "DA", "Issue Date of Imaging Service Request" }, { 0x0040, 0x2005, "TM", "Issue Time of Imaging Service Request" }, { 0x0040, 0x2006, "SH", "Placer Order Number of Imaging Service Request" }, { 0x0040, 0x2007, "SH", "Filler Order Number of Imaging Service Request" }, { 0x0040, 0x2008, "PN", "Order Entered By" }, { 0x0040, 0x2009, "SH", "Order Enterer Location" }, { 0x0040, 0x2010, "SH", "Order Callback Phone Number" }, { 0x0040, 0x2400, "LT", "Imaging Service Request Comments" }, { 0x0040, 0x3001, "LO", "Confidentiality Constraint On Patient Data" }, { 0x0040, 0xa007, "CS", "Findings Flag" }, { 0x0040, 0xa020, "SQ", "Findings Sequence" }, { 0x0040, 0xa021, "UI", "Findings Group UID" }, { 0x0040, 0xa022, "UI", "Referenced Findings Group UID" }, { 0x0040, 0xa023, "DA", "Findings Group Recording Date" }, { 0x0040, 0xa024, "TM", "Findings Group Recording Time" }, { 0x0040, 0xa026, "SQ", "Findings Source Category Code Sequence" }, { 0x0040, 0xa027, "LO", "Documenting Organization" }, { 0x0040, 0xa028, "SQ", "Documenting Organization Identifier Code Sequence" }, { 0x0040, 0xa032, "LO", "History Reliability Qualifier Description" }, { 0x0040, 0xa043, "SQ", "Concept Name Code Sequence" }, { 0x0040, 0xa047, "LO", "Measurement Precision Description" }, { 0x0040, 0xa057, "CS", "Urgency or Priority Alerts" }, { 0x0040, 0xa060, "LO", "Sequencing Indicator" }, { 0x0040, 0xa066, "SQ", "Document Identifier Code Sequence" }, { 0x0040, 0xa067, "PN", "Document Author" }, { 0x0040, 0xa068, "SQ", "Document Author Identifier Code Sequence" }, { 0x0040, 0xa070, "SQ", "Identifier Code Sequence" }, { 0x0040, 0xa073, "LO", "Object String Identifier" }, { 0x0040, 0xa074, "OB", "Object Binary Identifier" }, { 0x0040, 0xa075, "PN", "Documenting Observer" }, { 0x0040, 0xa076, "SQ", "Documenting Observer Identifier Code Sequence" }, { 0x0040, 0xa078, "SQ", "Observation Subject Identifier Code Sequence" }, { 0x0040, 0xa080, "SQ", "Person Identifier Code Sequence" }, { 0x0040, 0xa085, "SQ", "Procedure Identifier Code Sequence" }, { 0x0040, 0xa088, "LO", "Object Directory String Identifier" }, { 0x0040, 0xa089, "OB", "Object Directory Binary Identifier" }, { 0x0040, 0xa090, "CS", "History Reliability Qualifier" }, { 0x0040, 0xa0a0, "CS", "Referenced Type of Data" }, { 0x0040, 0xa0b0, "US", "Referenced Waveform Channels" }, { 0x0040, 0xa110, "DA", "Date of Document or Verbal Transaction" }, { 0x0040, 0xa112, "TM", "Time of Document Creation or Verbal Transaction" }, { 0x0040, 0xa121, "DA", "Date" }, { 0x0040, 0xa122, "TM", "Time" }, { 0x0040, 0xa123, "PN", "Person Name" }, { 0x0040, 0xa124, "SQ", "Referenced Person Sequence" }, { 0x0040, 0xa125, "CS", "Report Status ID" }, { 0x0040, 0xa130, "CS", "Temporal Range Type" }, { 0x0040, 0xa132, "UL", "Referenced Sample Offsets" }, { 0x0040, 0xa136, "US", "Referenced Frame Numbers" }, { 0x0040, 0xa138, "DS", "Referenced Time Offsets" }, { 0x0040, 0xa13a, "DT", "Referenced Datetime" }, { 0x0040, 0xa160, "UT", "Text Value" }, { 0x0040, 0xa167, "SQ", "Observation Category Code Sequence" }, { 0x0040, 0xa168, "SQ", "Concept Code Sequence" }, { 0x0040, 0xa16a, "ST", "Bibliographic Citation" }, { 0x0040, 0xa170, "CS", "Observation Class" }, { 0x0040, 0xa171, "UI", "Observation UID" }, { 0x0040, 0xa172, "UI", "Referenced Observation UID" }, { 0x0040, 0xa173, "CS", "Referenced Observation Class" }, { 0x0040, 0xa174, "CS", "Referenced Object Observation Class" }, { 0x0040, 0xa180, "US", "Annotation Group Number" }, { 0x0040, 0xa192, "DA", "Observation Date" }, { 0x0040, 0xa193, "TM", "Observation Time" }, { 0x0040, 0xa194, "CS", "Measurement Automation" }, { 0x0040, 0xa195, "SQ", "Concept Name Code Sequence Modifier" }, { 0x0040, 0xa224, "ST", "Identification Description" }, { 0x0040, 0xa290, "CS", "Coordinates Set Geometric Type" }, { 0x0040, 0xa296, "SQ", "Algorithm Code Sequence" }, { 0x0040, 0xa297, "ST", "Algorithm Description" }, { 0x0040, 0xa29a, "SL", "Pixel Coordinates Set" }, { 0x0040, 0xa300, "SQ", "Measured Value Sequence" }, { 0x0040, 0xa307, "PN", "Current Observer" }, { 0x0040, 0xa30a, "DS", "Numeric Value" }, { 0x0040, 0xa313, "SQ", "Referenced Accession Sequence" }, { 0x0040, 0xa33a, "ST", "Report Status Comment" }, { 0x0040, 0xa340, "SQ", "Procedure Context Sequence" }, { 0x0040, 0xa352, "PN", "Verbal Source" }, { 0x0040, 0xa353, "ST", "Address" }, { 0x0040, 0xa354, "LO", "Telephone Number" }, { 0x0040, 0xa358, "SQ", "Verbal Source Identifier Code Sequence" }, { 0x0040, 0xa380, "SQ", "Report Detail Sequence" }, { 0x0040, 0xa402, "UI", "Observation Subject UID" }, { 0x0040, 0xa403, "CS", "Observation Subject Class" }, { 0x0040, 0xa404, "SQ", "Observation Subject Type Code Sequence" }, { 0x0040, 0xa600, "CS", "Observation Subject Context Flag" }, { 0x0040, 0xa601, "CS", "Observer Context Flag" }, { 0x0040, 0xa603, "CS", "Procedure Context Flag" }, { 0x0040, 0xa730, "SQ", "Observations Sequence" }, { 0x0040, 0xa731, "SQ", "Relationship Sequence" }, { 0x0040, 0xa732, "SQ", "Relationship Type Code Sequence" }, { 0x0040, 0xa744, "SQ", "Language Code Sequence" }, { 0x0040, 0xa992, "ST", "Uniform Resource Locator" }, { 0x0040, 0xb020, "SQ", "Annotation Sequence" }, { 0x0040, 0xdb73, "SQ", "Relationship Type Code Sequence Modifier" }, { 0x0041, 0x0000, "LT", "Papyrus Comments" }, { 0x0041, 0x0010, "xs", "?" }, { 0x0041, 0x0011, "xs", "?" }, { 0x0041, 0x0012, "UL", "Pixel Offset" }, { 0x0041, 0x0013, "SQ", "Image Identifier Sequence" }, { 0x0041, 0x0014, "SQ", "External File Reference Sequence" }, { 0x0041, 0x0015, "US", "Number of Images" }, { 0x0041, 0x0020, "xs", "?" }, { 0x0041, 0x0021, "UI", "Referenced SOP Class UID" }, { 0x0041, 0x0022, "UI", "Referenced SOP Instance UID" }, { 0x0041, 0x0030, "xs", "?" }, { 0x0041, 0x0031, "xs", "?" }, { 0x0041, 0x0032, "xs", "?" }, { 0x0041, 0x0034, "DA", "Modified Date" }, { 0x0041, 0x0036, "TM", "Modified Time" }, { 0x0041, 0x0040, "LT", "Owner Name" }, { 0x0041, 0x0041, "UI", "Referenced Image SOP Class UID" }, { 0x0041, 0x0042, "UI", "Referenced Image SOP Instance UID" }, { 0x0041, 0x0050, "xs", "?" }, { 0x0041, 0x0060, "UL", "Number of Images" }, { 0x0041, 0x0062, "UL", "Number of Other" }, { 0x0041, 0x00a0, "LT", "External Folder Element DSID" }, { 0x0041, 0x00a1, "US", "External Folder Element Data Set Type" }, { 0x0041, 0x00a2, "LT", "External Folder Element File Location" }, { 0x0041, 0x00a3, "UL", "External Folder Element Length" }, { 0x0041, 0x00b0, "LT", "Internal Folder Element DSID" }, { 0x0041, 0x00b1, "US", "Internal Folder Element Data Set Type" }, { 0x0041, 0x00b2, "UL", "Internal Offset To Data Set" }, { 0x0041, 0x00b3, "UL", "Internal Offset To Image" }, { 0x0043, 0x0001, "SS", "Bitmap Of Prescan Options" }, { 0x0043, 0x0002, "SS", "Gradient Offset In X" }, { 0x0043, 0x0003, "SS", "Gradient Offset In Y" }, { 0x0043, 0x0004, "SS", "Gradient Offset In Z" }, { 0x0043, 0x0005, "SS", "Image Is Original Or Unoriginal" }, { 0x0043, 0x0006, "SS", "Number Of EPI Shots" }, { 0x0043, 0x0007, "SS", "Views Per Segment" }, { 0x0043, 0x0008, "SS", "Respiratory Rate In BPM" }, { 0x0043, 0x0009, "SS", "Respiratory Trigger Point" }, { 0x0043, 0x000a, "SS", "Type Of Receiver Used" }, { 0x0043, 0x000b, "DS", "Peak Rate Of Change Of Gradient Field" }, { 0x0043, 0x000c, "DS", "Limits In Units Of Percent" }, { 0x0043, 0x000d, "DS", "PSD Estimated Limit" }, { 0x0043, 0x000e, "DS", "PSD Estimated Limit In Tesla Per Second" }, { 0x0043, 0x000f, "DS", "SAR Avg Head" }, { 0x0043, 0x0010, "US", "Window Value" }, { 0x0043, 0x0011, "US", "Total Input Views" }, { 0x0043, 0x0012, "SS", "Xray Chain" }, { 0x0043, 0x0013, "SS", "Recon Kernel Parameters" }, { 0x0043, 0x0014, "SS", "Calibration Parameters" }, { 0x0043, 0x0015, "SS", "Total Output Views" }, { 0x0043, 0x0016, "SS", "Number Of Overranges" }, { 0x0043, 0x0017, "DS", "IBH Image Scale Factors" }, { 0x0043, 0x0018, "DS", "BBH Coefficients" }, { 0x0043, 0x0019, "SS", "Number Of BBH Chains To Blend" }, { 0x0043, 0x001a, "SL", "Starting Channel Number" }, { 0x0043, 0x001b, "SS", "PPScan Parameters" }, { 0x0043, 0x001c, "SS", "GE Image Integrity" }, { 0x0043, 0x001d, "SS", "Level Value" }, { 0x0043, 0x001e, "xs", "?" }, { 0x0043, 0x001f, "SL", "Max Overranges In A View" }, { 0x0043, 0x0020, "DS", "Avg Overranges All Views" }, { 0x0043, 0x0021, "SS", "Corrected Afterglow Terms" }, { 0x0043, 0x0025, "SS", "Reference Channels" }, { 0x0043, 0x0026, "US", "No Views Ref Channels Blocked" }, { 0x0043, 0x0027, "xs", "?" }, { 0x0043, 0x0028, "OB", "Unique Image Identifier" }, { 0x0043, 0x0029, "OB", "Histogram Tables" }, { 0x0043, 0x002a, "OB", "User Defined Data" }, { 0x0043, 0x002b, "SS", "Private Scan Options" }, { 0x0043, 0x002c, "SS", "Effective Echo Spacing" }, { 0x0043, 0x002d, "SH", "String Slop Field 1" }, { 0x0043, 0x002e, "SH", "String Slop Field 2" }, { 0x0043, 0x002f, "SS", "Raw Data Type" }, { 0x0043, 0x0030, "SS", "Raw Data Type" }, { 0x0043, 0x0031, "DS", "RA Coord Of Target Recon Centre" }, { 0x0043, 0x0032, "SS", "Raw Data Type" }, { 0x0043, 0x0033, "FL", "Neg Scan Spacing" }, { 0x0043, 0x0034, "IS", "Offset Frequency" }, { 0x0043, 0x0035, "UL", "User Usage Tag" }, { 0x0043, 0x0036, "UL", "User Fill Map MSW" }, { 0x0043, 0x0037, "UL", "User Fill Map LSW" }, { 0x0043, 0x0038, "FL", "User 25 To User 48" }, { 0x0043, 0x0039, "IS", "Slop Integer 6 To Slop Integer 9" }, { 0x0043, 0x0040, "FL", "Trigger On Position" }, { 0x0043, 0x0041, "FL", "Degree Of Rotation" }, { 0x0043, 0x0042, "SL", "DAS Trigger Source" }, { 0x0043, 0x0043, "SL", "DAS Fpa Gain" }, { 0x0043, 0x0044, "SL", "DAS Output Source" }, { 0x0043, 0x0045, "SL", "DAS Ad Input" }, { 0x0043, 0x0046, "SL", "DAS Cal Mode" }, { 0x0043, 0x0047, "SL", "DAS Cal Frequency" }, { 0x0043, 0x0048, "SL", "DAS Reg Xm" }, { 0x0043, 0x0049, "SL", "DAS Auto Zero" }, { 0x0043, 0x004a, "SS", "Starting Channel Of View" }, { 0x0043, 0x004b, "SL", "DAS Xm Pattern" }, { 0x0043, 0x004c, "SS", "TGGC Trigger Mode" }, { 0x0043, 0x004d, "FL", "Start Scan To Xray On Delay" }, { 0x0043, 0x004e, "FL", "Duration Of Xray On" }, { 0x0044, 0x0000, "UI", "?" }, { 0x0045, 0x0004, "CS", "AES" }, { 0x0045, 0x0006, "DS", "Angulation" }, { 0x0045, 0x0009, "DS", "Real Magnification Factor" }, { 0x0045, 0x000b, "CS", "Senograph Type" }, { 0x0045, 0x000c, "DS", "Integration Time" }, { 0x0045, 0x000d, "DS", "ROI Origin X and Y" }, { 0x0045, 0x0011, "DS", "Receptor Size cm X and Y" }, { 0x0045, 0x0012, "IS", "Receptor Size Pixels X and Y" }, { 0x0045, 0x0013, "ST", "Screen" }, { 0x0045, 0x0014, "DS", "Pixel Pitch Microns" }, { 0x0045, 0x0015, "IS", "Pixel Depth Bits" }, { 0x0045, 0x0016, "IS", "Binning Factor X and Y" }, { 0x0045, 0x001b, "CS", "Clinical View" }, { 0x0045, 0x001d, "DS", "Mean Of Raw Gray Levels" }, { 0x0045, 0x001e, "DS", "Mean Of Offset Gray Levels" }, { 0x0045, 0x001f, "DS", "Mean Of Corrected Gray Levels" }, { 0x0045, 0x0020, "DS", "Mean Of Region Gray Levels" }, { 0x0045, 0x0021, "DS", "Mean Of Log Region Gray Levels" }, { 0x0045, 0x0022, "DS", "Standard Deviation Of Raw Gray Levels" }, { 0x0045, 0x0023, "DS", "Standard Deviation Of Corrected Gray Levels" }, { 0x0045, 0x0024, "DS", "Standard Deviation Of Region Gray Levels" }, { 0x0045, 0x0025, "DS", "Standard Deviation Of Log Region Gray Levels" }, { 0x0045, 0x0026, "OB", "MAO Buffer" }, { 0x0045, 0x0027, "IS", "Set Number" }, { 0x0045, 0x0028, "CS", "WindowingType (LINEAR or GAMMA)" }, { 0x0045, 0x0029, "DS", "WindowingParameters" }, { 0x0045, 0x002a, "IS", "Crosshair Cursor X Coordinates" }, { 0x0045, 0x002b, "IS", "Crosshair Cursor Y Coordinates" }, { 0x0045, 0x0039, "US", "Vignette Rows" }, { 0x0045, 0x003a, "US", "Vignette Columns" }, { 0x0045, 0x003b, "US", "Vignette Bits Allocated" }, { 0x0045, 0x003c, "US", "Vignette Bits Stored" }, { 0x0045, 0x003d, "US", "Vignette High Bit" }, { 0x0045, 0x003e, "US", "Vignette Pixel Representation" }, { 0x0045, 0x003f, "OB", "Vignette Pixel Data" }, { 0x0047, 0x0001, "SQ", "Reconstruction Parameters Sequence" }, { 0x0047, 0x0050, "UL", "Volume Voxel Count" }, { 0x0047, 0x0051, "UL", "Volume Segment Count" }, { 0x0047, 0x0053, "US", "Volume Slice Size" }, { 0x0047, 0x0054, "US", "Volume Slice Count" }, { 0x0047, 0x0055, "SL", "Volume Threshold Value" }, { 0x0047, 0x0057, "DS", "Volume Voxel Ratio" }, { 0x0047, 0x0058, "DS", "Volume Voxel Size" }, { 0x0047, 0x0059, "US", "Volume Z Position Size" }, { 0x0047, 0x0060, "DS", "Volume Base Line" }, { 0x0047, 0x0061, "DS", "Volume Center Point" }, { 0x0047, 0x0063, "SL", "Volume Skew Base" }, { 0x0047, 0x0064, "DS", "Volume Registration Transform Rotation Matrix" }, { 0x0047, 0x0065, "DS", "Volume Registration Transform Translation Vector" }, { 0x0047, 0x0070, "DS", "KVP List" }, { 0x0047, 0x0071, "IS", "XRay Tube Current List" }, { 0x0047, 0x0072, "IS", "Exposure List" }, { 0x0047, 0x0080, "LO", "Acquisition DLX Identifier" }, { 0x0047, 0x0085, "SQ", "Acquisition DLX 2D Series Sequence" }, { 0x0047, 0x0089, "DS", "Contrast Agent Volume List" }, { 0x0047, 0x008a, "US", "Number Of Injections" }, { 0x0047, 0x008b, "US", "Frame Count" }, { 0x0047, 0x0096, "IS", "Used Frames" }, { 0x0047, 0x0091, "LO", "XA 3D Reconstruction Algorithm Name" }, { 0x0047, 0x0092, "CS", "XA 3D Reconstruction Algorithm Version" }, { 0x0047, 0x0093, "DA", "DLX Calibration Date" }, { 0x0047, 0x0094, "TM", "DLX Calibration Time" }, { 0x0047, 0x0095, "CS", "DLX Calibration Status" }, { 0x0047, 0x0098, "US", "Transform Count" }, { 0x0047, 0x0099, "SQ", "Transform Sequence" }, { 0x0047, 0x009a, "DS", "Transform Rotation Matrix" }, { 0x0047, 0x009b, "DS", "Transform Translation Vector" }, { 0x0047, 0x009c, "LO", "Transform Label" }, { 0x0047, 0x00b1, "US", "Wireframe Count" }, { 0x0047, 0x00b2, "US", "Location System" }, { 0x0047, 0x00b0, "SQ", "Wireframe List" }, { 0x0047, 0x00b5, "LO", "Wireframe Name" }, { 0x0047, 0x00b6, "LO", "Wireframe Group Name" }, { 0x0047, 0x00b7, "LO", "Wireframe Color" }, { 0x0047, 0x00b8, "SL", "Wireframe Attributes" }, { 0x0047, 0x00b9, "SL", "Wireframe Point Count" }, { 0x0047, 0x00ba, "SL", "Wireframe Timestamp" }, { 0x0047, 0x00bb, "SQ", "Wireframe Point List" }, { 0x0047, 0x00bc, "DS", "Wireframe Points Coordinates" }, { 0x0047, 0x00c0, "DS", "Volume Upper Left High Corner RAS" }, { 0x0047, 0x00c1, "DS", "Volume Slice To RAS Rotation Matrix" }, { 0x0047, 0x00c2, "DS", "Volume Upper Left High Corner TLOC" }, { 0x0047, 0x00d1, "OB", "Volume Segment List" }, { 0x0047, 0x00d2, "OB", "Volume Gradient List" }, { 0x0047, 0x00d3, "OB", "Volume Density List" }, { 0x0047, 0x00d4, "OB", "Volume Z Position List" }, { 0x0047, 0x00d5, "OB", "Volume Original Index List" }, { 0x0050, 0x0000, "UL", "Calibration Group Length" }, { 0x0050, 0x0004, "CS", "Calibration Object" }, { 0x0050, 0x0010, "SQ", "DeviceSequence" }, { 0x0050, 0x0014, "DS", "DeviceLength" }, { 0x0050, 0x0016, "DS", "DeviceDiameter" }, { 0x0050, 0x0017, "CS", "DeviceDiameterUnits" }, { 0x0050, 0x0018, "DS", "DeviceVolume" }, { 0x0050, 0x0019, "DS", "InterMarkerDistance" }, { 0x0050, 0x0020, "LO", "DeviceDescription" }, { 0x0050, 0x0030, "SQ", "CodedInterventionDeviceSequence" }, { 0x0051, 0x0010, "xs", "Image Text" }, { 0x0054, 0x0000, "UL", "Nuclear Acquisition Group Length" }, { 0x0054, 0x0010, "US", "Energy Window Vector" }, { 0x0054, 0x0011, "US", "Number of Energy Windows" }, { 0x0054, 0x0012, "SQ", "Energy Window Information Sequence" }, { 0x0054, 0x0013, "SQ", "Energy Window Range Sequence" }, { 0x0054, 0x0014, "DS", "Energy Window Lower Limit" }, { 0x0054, 0x0015, "DS", "Energy Window Upper Limit" }, { 0x0054, 0x0016, "SQ", "Radiopharmaceutical Information Sequence" }, { 0x0054, 0x0017, "IS", "Residual Syringe Counts" }, { 0x0054, 0x0018, "SH", "Energy Window Name" }, { 0x0054, 0x0020, "US", "Detector Vector" }, { 0x0054, 0x0021, "US", "Number of Detectors" }, { 0x0054, 0x0022, "SQ", "Detector Information Sequence" }, { 0x0054, 0x0030, "US", "Phase Vector" }, { 0x0054, 0x0031, "US", "Number of Phases" }, { 0x0054, 0x0032, "SQ", "Phase Information Sequence" }, { 0x0054, 0x0033, "US", "Number of Frames In Phase" }, { 0x0054, 0x0036, "IS", "Phase Delay" }, { 0x0054, 0x0038, "IS", "Pause Between Frames" }, { 0x0054, 0x0050, "US", "Rotation Vector" }, { 0x0054, 0x0051, "US", "Number of Rotations" }, { 0x0054, 0x0052, "SQ", "Rotation Information Sequence" }, { 0x0054, 0x0053, "US", "Number of Frames In Rotation" }, { 0x0054, 0x0060, "US", "R-R Interval Vector" }, { 0x0054, 0x0061, "US", "Number of R-R Intervals" }, { 0x0054, 0x0062, "SQ", "Gated Information Sequence" }, { 0x0054, 0x0063, "SQ", "Data Information Sequence" }, { 0x0054, 0x0070, "US", "Time Slot Vector" }, { 0x0054, 0x0071, "US", "Number of Time Slots" }, { 0x0054, 0x0072, "SQ", "Time Slot Information Sequence" }, { 0x0054, 0x0073, "DS", "Time Slot Time" }, { 0x0054, 0x0080, "US", "Slice Vector" }, { 0x0054, 0x0081, "US", "Number of Slices" }, { 0x0054, 0x0090, "US", "Angular View Vector" }, { 0x0054, 0x0100, "US", "Time Slice Vector" }, { 0x0054, 0x0101, "US", "Number Of Time Slices" }, { 0x0054, 0x0200, "DS", "Start Angle" }, { 0x0054, 0x0202, "CS", "Type of Detector Motion" }, { 0x0054, 0x0210, "IS", "Trigger Vector" }, { 0x0054, 0x0211, "US", "Number of Triggers in Phase" }, { 0x0054, 0x0220, "SQ", "View Code Sequence" }, { 0x0054, 0x0222, "SQ", "View Modifier Code Sequence" }, { 0x0054, 0x0300, "SQ", "Radionuclide Code Sequence" }, { 0x0054, 0x0302, "SQ", "Radiopharmaceutical Route Code Sequence" }, { 0x0054, 0x0304, "SQ", "Radiopharmaceutical Code Sequence" }, { 0x0054, 0x0306, "SQ", "Calibration Data Sequence" }, { 0x0054, 0x0308, "US", "Energy Window Number" }, { 0x0054, 0x0400, "SH", "Image ID" }, { 0x0054, 0x0410, "SQ", "Patient Orientation Code Sequence" }, { 0x0054, 0x0412, "SQ", "Patient Orientation Modifier Code Sequence" }, { 0x0054, 0x0414, "SQ", "Patient Gantry Relationship Code Sequence" }, { 0x0054, 0x1000, "CS", "Positron Emission Tomography Series Type" }, { 0x0054, 0x1001, "CS", "Positron Emission Tomography Units" }, { 0x0054, 0x1002, "CS", "Counts Source" }, { 0x0054, 0x1004, "CS", "Reprojection Method" }, { 0x0054, 0x1100, "CS", "Randoms Correction Method" }, { 0x0054, 0x1101, "LO", "Attenuation Correction Method" }, { 0x0054, 0x1102, "CS", "Decay Correction" }, { 0x0054, 0x1103, "LO", "Reconstruction Method" }, { 0x0054, 0x1104, "LO", "Detector Lines of Response Used" }, { 0x0054, 0x1105, "LO", "Scatter Correction Method" }, { 0x0054, 0x1200, "DS", "Axial Acceptance" }, { 0x0054, 0x1201, "IS", "Axial Mash" }, { 0x0054, 0x1202, "IS", "Transverse Mash" }, { 0x0054, 0x1203, "DS", "Detector Element Size" }, { 0x0054, 0x1210, "DS", "Coincidence Window Width" }, { 0x0054, 0x1220, "CS", "Secondary Counts Type" }, { 0x0054, 0x1300, "DS", "Frame Reference Time" }, { 0x0054, 0x1310, "IS", "Primary Prompts Counts Accumulated" }, { 0x0054, 0x1311, "IS", "Secondary Counts Accumulated" }, { 0x0054, 0x1320, "DS", "Slice Sensitivity Factor" }, { 0x0054, 0x1321, "DS", "Decay Factor" }, { 0x0054, 0x1322, "DS", "Dose Calibration Factor" }, { 0x0054, 0x1323, "DS", "Scatter Fraction Factor" }, { 0x0054, 0x1324, "DS", "Dead Time Factor" }, { 0x0054, 0x1330, "US", "Image Index" }, { 0x0054, 0x1400, "CS", "Counts Included" }, { 0x0054, 0x1401, "CS", "Dead Time Correction Flag" }, { 0x0055, 0x0046, "LT", "Current Ward" }, { 0x0058, 0x0000, "SQ", "?" }, { 0x0060, 0x3000, "SQ", "Histogram Sequence" }, { 0x0060, 0x3002, "US", "Histogram Number of Bins" }, { 0x0060, 0x3004, "xs", "Histogram First Bin Value" }, { 0x0060, 0x3006, "xs", "Histogram Last Bin Value" }, { 0x0060, 0x3008, "US", "Histogram Bin Width" }, { 0x0060, 0x3010, "LO", "Histogram Explanation" }, { 0x0060, 0x3020, "UL", "Histogram Data" }, { 0x0070, 0x0001, "SQ", "Graphic Annotation Sequence" }, { 0x0070, 0x0002, "CS", "Graphic Layer" }, { 0x0070, 0x0003, "CS", "Bounding Box Annotation Units" }, { 0x0070, 0x0004, "CS", "Anchor Point Annotation Units" }, { 0x0070, 0x0005, "CS", "Graphic Annotation Units" }, { 0x0070, 0x0006, "ST", "Unformatted Text Value" }, { 0x0070, 0x0008, "SQ", "Text Object Sequence" }, { 0x0070, 0x0009, "SQ", "Graphic Object Sequence" }, { 0x0070, 0x0010, "FL", "Bounding Box TLHC" }, { 0x0070, 0x0011, "FL", "Bounding Box BRHC" }, { 0x0070, 0x0014, "FL", "Anchor Point" }, { 0x0070, 0x0015, "CS", "Anchor Point Visibility" }, { 0x0070, 0x0020, "US", "Graphic Dimensions" }, { 0x0070, 0x0021, "US", "Number Of Graphic Points" }, { 0x0070, 0x0022, "FL", "Graphic Data" }, { 0x0070, 0x0023, "CS", "Graphic Type" }, { 0x0070, 0x0024, "CS", "Graphic Filled" }, { 0x0070, 0x0040, "IS", "Image Rotation" }, { 0x0070, 0x0041, "CS", "Image Horizontal Flip" }, { 0x0070, 0x0050, "US", "Displayed Area TLHC" }, { 0x0070, 0x0051, "US", "Displayed Area BRHC" }, { 0x0070, 0x0060, "SQ", "Graphic Layer Sequence" }, { 0x0070, 0x0062, "IS", "Graphic Layer Order" }, { 0x0070, 0x0066, "US", "Graphic Layer Recommended Display Value" }, { 0x0070, 0x0068, "LO", "Graphic Layer Description" }, { 0x0070, 0x0080, "CS", "Presentation Label" }, { 0x0070, 0x0081, "LO", "Presentation Description" }, { 0x0070, 0x0082, "DA", "Presentation Creation Date" }, { 0x0070, 0x0083, "TM", "Presentation Creation Time" }, { 0x0070, 0x0084, "PN", "Presentation Creator's Name" }, { 0x0070, 0x031a, "UI", "Fiducial UID" }, { 0x0087, 0x0010, "CS", "Media Type" }, { 0x0087, 0x0020, "CS", "Media Location" }, { 0x0087, 0x0050, "IS", "Estimated Retrieve Time" }, { 0x0088, 0x0000, "UL", "Storage Group Length" }, { 0x0088, 0x0130, "SH", "Storage Media FileSet ID" }, { 0x0088, 0x0140, "UI", "Storage Media FileSet UID" }, { 0x0088, 0x0200, "SQ", "Icon Image Sequence" }, { 0x0088, 0x0904, "LO", "Topic Title" }, { 0x0088, 0x0906, "ST", "Topic Subject" }, { 0x0088, 0x0910, "LO", "Topic Author" }, { 0x0088, 0x0912, "LO", "Topic Key Words" }, { 0x0095, 0x0001, "LT", "Examination Folder ID" }, { 0x0095, 0x0004, "UL", "Folder Reported Status" }, { 0x0095, 0x0005, "LT", "Folder Reporting Radiologist" }, { 0x0095, 0x0007, "LT", "SIENET ISA PLA" }, { 0x0099, 0x0002, "UL", "Data Object Attributes" }, { 0x00e1, 0x0001, "US", "Data Dictionary Version" }, { 0x00e1, 0x0014, "LT", "?" }, { 0x00e1, 0x0022, "DS", "?" }, { 0x00e1, 0x0023, "DS", "?" }, { 0x00e1, 0x0024, "LT", "?" }, { 0x00e1, 0x0025, "LT", "?" }, { 0x00e1, 0x0040, "SH", "Offset From CT MR Images" }, { 0x0193, 0x0002, "DS", "RIS Key" }, { 0x0307, 0x0001, "UN", "RIS Worklist IMGEF" }, { 0x0309, 0x0001, "UN", "RIS Report IMGEF" }, { 0x0601, 0x0000, "SH", "Implementation Version" }, { 0x0601, 0x0020, "DS", "Relative Table Position" }, { 0x0601, 0x0021, "DS", "Relative Table Height" }, { 0x0601, 0x0030, "SH", "Surview Direction" }, { 0x0601, 0x0031, "DS", "Surview Length" }, { 0x0601, 0x0050, "SH", "Image View Type" }, { 0x0601, 0x0070, "DS", "Batch Number" }, { 0x0601, 0x0071, "DS", "Batch Size" }, { 0x0601, 0x0072, "DS", "Batch Slice Number" }, { 0x1000, 0x0000, "xs", "?" }, { 0x1000, 0x0001, "US", "Run Length Triplet" }, { 0x1000, 0x0002, "US", "Huffman Table Size" }, { 0x1000, 0x0003, "US", "Huffman Table Triplet" }, { 0x1000, 0x0004, "US", "Shift Table Size" }, { 0x1000, 0x0005, "US", "Shift Table Triplet" }, { 0x1010, 0x0000, "xs", "?" }, { 0x1369, 0x0000, "US", "?" }, { 0x2000, 0x0000, "UL", "Film Session Group Length" }, { 0x2000, 0x0010, "IS", "Number of Copies" }, { 0x2000, 0x0020, "CS", "Print Priority" }, { 0x2000, 0x0030, "CS", "Medium Type" }, { 0x2000, 0x0040, "CS", "Film Destination" }, { 0x2000, 0x0050, "LO", "Film Session Label" }, { 0x2000, 0x0060, "IS", "Memory Allocation" }, { 0x2000, 0x0500, "SQ", "Referenced Film Box Sequence" }, { 0x2010, 0x0000, "UL", "Film Box Group Length" }, { 0x2010, 0x0010, "ST", "Image Display Format" }, { 0x2010, 0x0030, "CS", "Annotation Display Format ID" }, { 0x2010, 0x0040, "CS", "Film Orientation" }, { 0x2010, 0x0050, "CS", "Film Size ID" }, { 0x2010, 0x0060, "CS", "Magnification Type" }, { 0x2010, 0x0080, "CS", "Smoothing Type" }, { 0x2010, 0x0100, "CS", "Border Density" }, { 0x2010, 0x0110, "CS", "Empty Image Density" }, { 0x2010, 0x0120, "US", "Min Density" }, { 0x2010, 0x0130, "US", "Max Density" }, { 0x2010, 0x0140, "CS", "Trim" }, { 0x2010, 0x0150, "ST", "Configuration Information" }, { 0x2010, 0x0500, "SQ", "Referenced Film Session Sequence" }, { 0x2010, 0x0510, "SQ", "Referenced Image Box Sequence" }, { 0x2010, 0x0520, "SQ", "Referenced Basic Annotation Box Sequence" }, { 0x2020, 0x0000, "UL", "Image Box Group Length" }, { 0x2020, 0x0010, "US", "Image Box Position" }, { 0x2020, 0x0020, "CS", "Polarity" }, { 0x2020, 0x0030, "DS", "Requested Image Size" }, { 0x2020, 0x0110, "SQ", "Preformatted Grayscale Image Sequence" }, { 0x2020, 0x0111, "SQ", "Preformatted Color Image Sequence" }, { 0x2020, 0x0130, "SQ", "Referenced Image Overlay Box Sequence" }, { 0x2020, 0x0140, "SQ", "Referenced VOI LUT Box Sequence" }, { 0x2030, 0x0000, "UL", "Annotation Group Length" }, { 0x2030, 0x0010, "US", "Annotation Position" }, { 0x2030, 0x0020, "LO", "Text String" }, { 0x2040, 0x0000, "UL", "Overlay Box Group Length" }, { 0x2040, 0x0010, "SQ", "Referenced Overlay Plane Sequence" }, { 0x2040, 0x0011, "US", "Referenced Overlay Plane Groups" }, { 0x2040, 0x0060, "CS", "Overlay Magnification Type" }, { 0x2040, 0x0070, "CS", "Overlay Smoothing Type" }, { 0x2040, 0x0080, "CS", "Overlay Foreground Density" }, { 0x2040, 0x0090, "CS", "Overlay Mode" }, { 0x2040, 0x0100, "CS", "Threshold Density" }, { 0x2040, 0x0500, "SQ", "Referenced Overlay Image Box Sequence" }, { 0x2050, 0x0010, "SQ", "Presentation LUT Sequence" }, { 0x2050, 0x0020, "CS", "Presentation LUT Shape" }, { 0x2100, 0x0000, "UL", "Print Job Group Length" }, { 0x2100, 0x0020, "CS", "Execution Status" }, { 0x2100, 0x0030, "CS", "Execution Status Info" }, { 0x2100, 0x0040, "DA", "Creation Date" }, { 0x2100, 0x0050, "TM", "Creation Time" }, { 0x2100, 0x0070, "AE", "Originator" }, { 0x2100, 0x0500, "SQ", "Referenced Print Job Sequence" }, { 0x2110, 0x0000, "UL", "Printer Group Length" }, { 0x2110, 0x0010, "CS", "Printer Status" }, { 0x2110, 0x0020, "CS", "Printer Status Info" }, { 0x2110, 0x0030, "LO", "Printer Name" }, { 0x2110, 0x0099, "SH", "Print Queue ID" }, { 0x3002, 0x0002, "SH", "RT Image Label" }, { 0x3002, 0x0003, "LO", "RT Image Name" }, { 0x3002, 0x0004, "ST", "RT Image Description" }, { 0x3002, 0x000a, "CS", "Reported Values Origin" }, { 0x3002, 0x000c, "CS", "RT Image Plane" }, { 0x3002, 0x000e, "DS", "X-Ray Image Receptor Angle" }, { 0x3002, 0x0010, "DS", "RTImageOrientation" }, { 0x3002, 0x0011, "DS", "Image Plane Pixel Spacing" }, { 0x3002, 0x0012, "DS", "RT Image Position" }, { 0x3002, 0x0020, "SH", "Radiation Machine Name" }, { 0x3002, 0x0022, "DS", "Radiation Machine SAD" }, { 0x3002, 0x0024, "DS", "Radiation Machine SSD" }, { 0x3002, 0x0026, "DS", "RT Image SID" }, { 0x3002, 0x0028, "DS", "Source to Reference Object Distance" }, { 0x3002, 0x0029, "IS", "Fraction Number" }, { 0x3002, 0x0030, "SQ", "Exposure Sequence" }, { 0x3002, 0x0032, "DS", "Meterset Exposure" }, { 0x3004, 0x0001, "CS", "DVH Type" }, { 0x3004, 0x0002, "CS", "Dose Units" }, { 0x3004, 0x0004, "CS", "Dose Type" }, { 0x3004, 0x0006, "LO", "Dose Comment" }, { 0x3004, 0x0008, "DS", "Normalization Point" }, { 0x3004, 0x000a, "CS", "Dose Summation Type" }, { 0x3004, 0x000c, "DS", "GridFrame Offset Vector" }, { 0x3004, 0x000e, "DS", "Dose Grid Scaling" }, { 0x3004, 0x0010, "SQ", "RT Dose ROI Sequence" }, { 0x3004, 0x0012, "DS", "Dose Value" }, { 0x3004, 0x0040, "DS", "DVH Normalization Point" }, { 0x3004, 0x0042, "DS", "DVH Normalization Dose Value" }, { 0x3004, 0x0050, "SQ", "DVH Sequence" }, { 0x3004, 0x0052, "DS", "DVH Dose Scaling" }, { 0x3004, 0x0054, "CS", "DVH Volume Units" }, { 0x3004, 0x0056, "IS", "DVH Number of Bins" }, { 0x3004, 0x0058, "DS", "DVH Data" }, { 0x3004, 0x0060, "SQ", "DVH Referenced ROI Sequence" }, { 0x3004, 0x0062, "CS", "DVH ROI Contribution Type" }, { 0x3004, 0x0070, "DS", "DVH Minimum Dose" }, { 0x3004, 0x0072, "DS", "DVH Maximum Dose" }, { 0x3004, 0x0074, "DS", "DVH Mean Dose" }, { 0x3006, 0x0002, "SH", "Structure Set Label" }, { 0x3006, 0x0004, "LO", "Structure Set Name" }, { 0x3006, 0x0006, "ST", "Structure Set Description" }, { 0x3006, 0x0008, "DA", "Structure Set Date" }, { 0x3006, 0x0009, "TM", "Structure Set Time" }, { 0x3006, 0x0010, "SQ", "Referenced Frame of Reference Sequence" }, { 0x3006, 0x0012, "SQ", "RT Referenced Study Sequence" }, { 0x3006, 0x0014, "SQ", "RT Referenced Series Sequence" }, { 0x3006, 0x0016, "SQ", "Contour Image Sequence" }, { 0x3006, 0x0020, "SQ", "Structure Set ROI Sequence" }, { 0x3006, 0x0022, "IS", "ROI Number" }, { 0x3006, 0x0024, "UI", "Referenced Frame of Reference UID" }, { 0x3006, 0x0026, "LO", "ROI Name" }, { 0x3006, 0x0028, "ST", "ROI Description" }, { 0x3006, 0x002a, "IS", "ROI Display Color" }, { 0x3006, 0x002c, "DS", "ROI Volume" }, { 0x3006, 0x0030, "SQ", "RT Related ROI Sequence" }, { 0x3006, 0x0033, "CS", "RT ROI Relationship" }, { 0x3006, 0x0036, "CS", "ROI Generation Algorithm" }, { 0x3006, 0x0038, "LO", "ROI Generation Description" }, { 0x3006, 0x0039, "SQ", "ROI Contour Sequence" }, { 0x3006, 0x0040, "SQ", "Contour Sequence" }, { 0x3006, 0x0042, "CS", "Contour Geometric Type" }, { 0x3006, 0x0044, "DS", "Contour SlabT hickness" }, { 0x3006, 0x0045, "DS", "Contour Offset Vector" }, { 0x3006, 0x0046, "IS", "Number of Contour Points" }, { 0x3006, 0x0050, "DS", "Contour Data" }, { 0x3006, 0x0080, "SQ", "RT ROI Observations Sequence" }, { 0x3006, 0x0082, "IS", "Observation Number" }, { 0x3006, 0x0084, "IS", "Referenced ROI Number" }, { 0x3006, 0x0085, "SH", "ROI Observation Label" }, { 0x3006, 0x0086, "SQ", "RT ROI Identification Code Sequence" }, { 0x3006, 0x0088, "ST", "ROI Observation Description" }, { 0x3006, 0x00a0, "SQ", "Related RT ROI Observations Sequence" }, { 0x3006, 0x00a4, "CS", "RT ROI Interpreted Type" }, { 0x3006, 0x00a6, "PN", "ROI Interpreter" }, { 0x3006, 0x00b0, "SQ", "ROI Physical Properties Sequence" }, { 0x3006, 0x00b2, "CS", "ROI Physical Property" }, { 0x3006, 0x00b4, "DS", "ROI Physical Property Value" }, { 0x3006, 0x00c0, "SQ", "Frame of Reference Relationship Sequence" }, { 0x3006, 0x00c2, "UI", "Related Frame of Reference UID" }, { 0x3006, 0x00c4, "CS", "Frame of Reference Transformation Type" }, { 0x3006, 0x00c6, "DS", "Frame of Reference Transformation Matrix" }, { 0x3006, 0x00c8, "LO", "Frame of Reference Transformation Comment" }, { 0x300a, 0x0002, "SH", "RT Plan Label" }, { 0x300a, 0x0003, "LO", "RT Plan Name" }, { 0x300a, 0x0004, "ST", "RT Plan Description" }, { 0x300a, 0x0006, "DA", "RT Plan Date" }, { 0x300a, 0x0007, "TM", "RT Plan Time" }, { 0x300a, 0x0009, "LO", "Treatment Protocols" }, { 0x300a, 0x000a, "CS", "Treatment Intent" }, { 0x300a, 0x000b, "LO", "Treatment Sites" }, { 0x300a, 0x000c, "CS", "RT Plan Geometry" }, { 0x300a, 0x000e, "ST", "Prescription Description" }, { 0x300a, 0x0010, "SQ", "Dose ReferenceSequence" }, { 0x300a, 0x0012, "IS", "Dose ReferenceNumber" }, { 0x300a, 0x0014, "CS", "Dose Reference Structure Type" }, { 0x300a, 0x0016, "LO", "Dose ReferenceDescription" }, { 0x300a, 0x0018, "DS", "Dose Reference Point Coordinates" }, { 0x300a, 0x001a, "DS", "Nominal Prior Dose" }, { 0x300a, 0x0020, "CS", "Dose Reference Type" }, { 0x300a, 0x0021, "DS", "Constraint Weight" }, { 0x300a, 0x0022, "DS", "Delivery Warning Dose" }, { 0x300a, 0x0023, "DS", "Delivery Maximum Dose" }, { 0x300a, 0x0025, "DS", "Target Minimum Dose" }, { 0x300a, 0x0026, "DS", "Target Prescription Dose" }, { 0x300a, 0x0027, "DS", "Target Maximum Dose" }, { 0x300a, 0x0028, "DS", "Target Underdose Volume Fraction" }, { 0x300a, 0x002a, "DS", "Organ at Risk Full-volume Dose" }, { 0x300a, 0x002b, "DS", "Organ at Risk Limit Dose" }, { 0x300a, 0x002c, "DS", "Organ at Risk Maximum Dose" }, { 0x300a, 0x002d, "DS", "Organ at Risk Overdose Volume Fraction" }, { 0x300a, 0x0040, "SQ", "Tolerance Table Sequence" }, { 0x300a, 0x0042, "IS", "Tolerance Table Number" }, { 0x300a, 0x0043, "SH", "Tolerance Table Label" }, { 0x300a, 0x0044, "DS", "Gantry Angle Tolerance" }, { 0x300a, 0x0046, "DS", "Beam Limiting Device Angle Tolerance" }, { 0x300a, 0x0048, "SQ", "Beam Limiting Device Tolerance Sequence" }, { 0x300a, 0x004a, "DS", "Beam Limiting Device Position Tolerance" }, { 0x300a, 0x004c, "DS", "Patient Support Angle Tolerance" }, { 0x300a, 0x004e, "DS", "Table Top Eccentric Angle Tolerance" }, { 0x300a, 0x0051, "DS", "Table Top Vertical Position Tolerance" }, { 0x300a, 0x0052, "DS", "Table Top Longitudinal Position Tolerance" }, { 0x300a, 0x0053, "DS", "Table Top Lateral Position Tolerance" }, { 0x300a, 0x0055, "CS", "RT Plan Relationship" }, { 0x300a, 0x0070, "SQ", "Fraction Group Sequence" }, { 0x300a, 0x0071, "IS", "Fraction Group Number" }, { 0x300a, 0x0078, "IS", "Number of Fractions Planned" }, { 0x300a, 0x0079, "IS", "Number of Fractions Per Day" }, { 0x300a, 0x007a, "IS", "Repeat Fraction Cycle Length" }, { 0x300a, 0x007b, "LT", "Fraction Pattern" }, { 0x300a, 0x0080, "IS", "Number of Beams" }, { 0x300a, 0x0082, "DS", "Beam Dose Specification Point" }, { 0x300a, 0x0084, "DS", "Beam Dose" }, { 0x300a, 0x0086, "DS", "Beam Meterset" }, { 0x300a, 0x00a0, "IS", "Number of Brachy Application Setups" }, { 0x300a, 0x00a2, "DS", "Brachy Application Setup Dose Specification Point" }, { 0x300a, 0x00a4, "DS", "Brachy Application Setup Dose" }, { 0x300a, 0x00b0, "SQ", "Beam Sequence" }, { 0x300a, 0x00b2, "SH", "Treatment Machine Name " }, { 0x300a, 0x00b3, "CS", "Primary Dosimeter Unit" }, { 0x300a, 0x00b4, "DS", "Source-Axis Distance" }, { 0x300a, 0x00b6, "SQ", "Beam Limiting Device Sequence" }, { 0x300a, 0x00b8, "CS", "RT Beam Limiting Device Type" }, { 0x300a, 0x00ba, "DS", "Source to Beam Limiting Device Distance" }, { 0x300a, 0x00bc, "IS", "Number of Leaf/Jaw Pairs" }, { 0x300a, 0x00be, "DS", "Leaf Position Boundaries" }, { 0x300a, 0x00c0, "IS", "Beam Number" }, { 0x300a, 0x00c2, "LO", "Beam Name" }, { 0x300a, 0x00c3, "ST", "Beam Description" }, { 0x300a, 0x00c4, "CS", "Beam Type" }, { 0x300a, 0x00c6, "CS", "Radiation Type" }, { 0x300a, 0x00c8, "IS", "Reference Image Number" }, { 0x300a, 0x00ca, "SQ", "Planned Verification Image Sequence" }, { 0x300a, 0x00cc, "LO", "Imaging Device Specific Acquisition Parameters" }, { 0x300a, 0x00ce, "CS", "Treatment Delivery Type" }, { 0x300a, 0x00d0, "IS", "Number of Wedges" }, { 0x300a, 0x00d1, "SQ", "Wedge Sequence" }, { 0x300a, 0x00d2, "IS", "Wedge Number" }, { 0x300a, 0x00d3, "CS", "Wedge Type" }, { 0x300a, 0x00d4, "SH", "Wedge ID" }, { 0x300a, 0x00d5, "IS", "Wedge Angle" }, { 0x300a, 0x00d6, "DS", "Wedge Factor" }, { 0x300a, 0x00d8, "DS", "Wedge Orientation" }, { 0x300a, 0x00da, "DS", "Source to Wedge Tray Distance" }, { 0x300a, 0x00e0, "IS", "Number of Compensators" }, { 0x300a, 0x00e1, "SH", "Material ID" }, { 0x300a, 0x00e2, "DS", "Total Compensator Tray Factor" }, { 0x300a, 0x00e3, "SQ", "Compensator Sequence" }, { 0x300a, 0x00e4, "IS", "Compensator Number" }, { 0x300a, 0x00e5, "SH", "Compensator ID" }, { 0x300a, 0x00e6, "DS", "Source to Compensator Tray Distance" }, { 0x300a, 0x00e7, "IS", "Compensator Rows" }, { 0x300a, 0x00e8, "IS", "Compensator Columns" }, { 0x300a, 0x00e9, "DS", "Compensator Pixel Spacing" }, { 0x300a, 0x00ea, "DS", "Compensator Position" }, { 0x300a, 0x00eb, "DS", "Compensator Transmission Data" }, { 0x300a, 0x00ec, "DS", "Compensator Thickness Data" }, { 0x300a, 0x00ed, "IS", "Number of Boli" }, { 0x300a, 0x00f0, "IS", "Number of Blocks" }, { 0x300a, 0x00f2, "DS", "Total Block Tray Factor" }, { 0x300a, 0x00f4, "SQ", "Block Sequence" }, { 0x300a, 0x00f5, "SH", "Block Tray ID" }, { 0x300a, 0x00f6, "DS", "Source to Block Tray Distance" }, { 0x300a, 0x00f8, "CS", "Block Type" }, { 0x300a, 0x00fa, "CS", "Block Divergence" }, { 0x300a, 0x00fc, "IS", "Block Number" }, { 0x300a, 0x00fe, "LO", "Block Name" }, { 0x300a, 0x0100, "DS", "Block Thickness" }, { 0x300a, 0x0102, "DS", "Block Transmission" }, { 0x300a, 0x0104, "IS", "Block Number of Points" }, { 0x300a, 0x0106, "DS", "Block Data" }, { 0x300a, 0x0107, "SQ", "Applicator Sequence" }, { 0x300a, 0x0108, "SH", "Applicator ID" }, { 0x300a, 0x0109, "CS", "Applicator Type" }, { 0x300a, 0x010a, "LO", "Applicator Description" }, { 0x300a, 0x010c, "DS", "Cumulative Dose Reference Coefficient" }, { 0x300a, 0x010e, "DS", "Final Cumulative Meterset Weight" }, { 0x300a, 0x0110, "IS", "Number of Control Points" }, { 0x300a, 0x0111, "SQ", "Control Point Sequence" }, { 0x300a, 0x0112, "IS", "Control Point Index" }, { 0x300a, 0x0114, "DS", "Nominal Beam Energy" }, { 0x300a, 0x0115, "DS", "Dose Rate Set" }, { 0x300a, 0x0116, "SQ", "Wedge Position Sequence" }, { 0x300a, 0x0118, "CS", "Wedge Position" }, { 0x300a, 0x011a, "SQ", "Beam Limiting Device Position Sequence" }, { 0x300a, 0x011c, "DS", "Leaf Jaw Positions" }, { 0x300a, 0x011e, "DS", "Gantry Angle" }, { 0x300a, 0x011f, "CS", "Gantry Rotation Direction" }, { 0x300a, 0x0120, "DS", "Beam Limiting Device Angle" }, { 0x300a, 0x0121, "CS", "Beam Limiting Device Rotation Direction" }, { 0x300a, 0x0122, "DS", "Patient Support Angle" }, { 0x300a, 0x0123, "CS", "Patient Support Rotation Direction" }, { 0x300a, 0x0124, "DS", "Table Top Eccentric Axis Distance" }, { 0x300a, 0x0125, "DS", "Table Top Eccentric Angle" }, { 0x300a, 0x0126, "CS", "Table Top Eccentric Rotation Direction" }, { 0x300a, 0x0128, "DS", "Table Top Vertical Position" }, { 0x300a, 0x0129, "DS", "Table Top Longitudinal Position" }, { 0x300a, 0x012a, "DS", "Table Top Lateral Position" }, { 0x300a, 0x012c, "DS", "Isocenter Position" }, { 0x300a, 0x012e, "DS", "Surface Entry Point" }, { 0x300a, 0x0130, "DS", "Source to Surface Distance" }, { 0x300a, 0x0134, "DS", "Cumulative Meterset Weight" }, { 0x300a, 0x0180, "SQ", "Patient Setup Sequence" }, { 0x300a, 0x0182, "IS", "Patient Setup Number" }, { 0x300a, 0x0184, "LO", "Patient Additional Position" }, { 0x300a, 0x0190, "SQ", "Fixation Device Sequence" }, { 0x300a, 0x0192, "CS", "Fixation Device Type" }, { 0x300a, 0x0194, "SH", "Fixation Device Label" }, { 0x300a, 0x0196, "ST", "Fixation Device Description" }, { 0x300a, 0x0198, "SH", "Fixation Device Position" }, { 0x300a, 0x01a0, "SQ", "Shielding Device Sequence" }, { 0x300a, 0x01a2, "CS", "Shielding Device Type" }, { 0x300a, 0x01a4, "SH", "Shielding Device Label" }, { 0x300a, 0x01a6, "ST", "Shielding Device Description" }, { 0x300a, 0x01a8, "SH", "Shielding Device Position" }, { 0x300a, 0x01b0, "CS", "Setup Technique" }, { 0x300a, 0x01b2, "ST", "Setup TechniqueDescription" }, { 0x300a, 0x01b4, "SQ", "Setup Device Sequence" }, { 0x300a, 0x01b6, "CS", "Setup Device Type" }, { 0x300a, 0x01b8, "SH", "Setup Device Label" }, { 0x300a, 0x01ba, "ST", "Setup Device Description" }, { 0x300a, 0x01bc, "DS", "Setup Device Parameter" }, { 0x300a, 0x01d0, "ST", "Setup ReferenceDescription" }, { 0x300a, 0x01d2, "DS", "Table Top Vertical Setup Displacement" }, { 0x300a, 0x01d4, "DS", "Table Top Longitudinal Setup Displacement" }, { 0x300a, 0x01d6, "DS", "Table Top Lateral Setup Displacement" }, { 0x300a, 0x0200, "CS", "Brachy Treatment Technique" }, { 0x300a, 0x0202, "CS", "Brachy Treatment Type" }, { 0x300a, 0x0206, "SQ", "Treatment Machine Sequence" }, { 0x300a, 0x0210, "SQ", "Source Sequence" }, { 0x300a, 0x0212, "IS", "Source Number" }, { 0x300a, 0x0214, "CS", "Source Type" }, { 0x300a, 0x0216, "LO", "Source Manufacturer" }, { 0x300a, 0x0218, "DS", "Active Source Diameter" }, { 0x300a, 0x021a, "DS", "Active Source Length" }, { 0x300a, 0x0222, "DS", "Source Encapsulation Nominal Thickness" }, { 0x300a, 0x0224, "DS", "Source Encapsulation Nominal Transmission" }, { 0x300a, 0x0226, "LO", "Source IsotopeName" }, { 0x300a, 0x0228, "DS", "Source Isotope Half Life" }, { 0x300a, 0x022a, "DS", "Reference Air Kerma Rate" }, { 0x300a, 0x022c, "DA", "Air Kerma Rate Reference Date" }, { 0x300a, 0x022e, "TM", "Air Kerma Rate Reference Time" }, { 0x300a, 0x0230, "SQ", "Application Setup Sequence" }, { 0x300a, 0x0232, "CS", "Application Setup Type" }, { 0x300a, 0x0234, "IS", "Application Setup Number" }, { 0x300a, 0x0236, "LO", "Application Setup Name" }, { 0x300a, 0x0238, "LO", "Application Setup Manufacturer" }, { 0x300a, 0x0240, "IS", "Template Number" }, { 0x300a, 0x0242, "SH", "Template Type" }, { 0x300a, 0x0244, "LO", "Template Name" }, { 0x300a, 0x0250, "DS", "Total Reference Air Kerma" }, { 0x300a, 0x0260, "SQ", "Brachy Accessory Device Sequence" }, { 0x300a, 0x0262, "IS", "Brachy Accessory Device Number" }, { 0x300a, 0x0263, "SH", "Brachy Accessory Device ID" }, { 0x300a, 0x0264, "CS", "Brachy Accessory Device Type" }, { 0x300a, 0x0266, "LO", "Brachy Accessory Device Name" }, { 0x300a, 0x026a, "DS", "Brachy Accessory Device Nominal Thickness" }, { 0x300a, 0x026c, "DS", "Brachy Accessory Device Nominal Transmission" }, { 0x300a, 0x0280, "SQ", "Channel Sequence" }, { 0x300a, 0x0282, "IS", "Channel Number" }, { 0x300a, 0x0284, "DS", "Channel Length" }, { 0x300a, 0x0286, "DS", "Channel Total Time" }, { 0x300a, 0x0288, "CS", "Source Movement Type" }, { 0x300a, 0x028a, "IS", "Number of Pulses" }, { 0x300a, 0x028c, "DS", "Pulse Repetition Interval" }, { 0x300a, 0x0290, "IS", "Source Applicator Number" }, { 0x300a, 0x0291, "SH", "Source Applicator ID" }, { 0x300a, 0x0292, "CS", "Source Applicator Type" }, { 0x300a, 0x0294, "LO", "Source Applicator Name" }, { 0x300a, 0x0296, "DS", "Source Applicator Length" }, { 0x300a, 0x0298, "LO", "Source Applicator Manufacturer" }, { 0x300a, 0x029c, "DS", "Source Applicator Wall Nominal Thickness" }, { 0x300a, 0x029e, "DS", "Source Applicator Wall Nominal Transmission" }, { 0x300a, 0x02a0, "DS", "Source Applicator Step Size" }, { 0x300a, 0x02a2, "IS", "Transfer Tube Number" }, { 0x300a, 0x02a4, "DS", "Transfer Tube Length" }, { 0x300a, 0x02b0, "SQ", "Channel Shield Sequence" }, { 0x300a, 0x02b2, "IS", "Channel Shield Number" }, { 0x300a, 0x02b3, "SH", "Channel Shield ID" }, { 0x300a, 0x02b4, "LO", "Channel Shield Name" }, { 0x300a, 0x02b8, "DS", "Channel Shield Nominal Thickness" }, { 0x300a, 0x02ba, "DS", "Channel Shield Nominal Transmission" }, { 0x300a, 0x02c8, "DS", "Final Cumulative Time Weight" }, { 0x300a, 0x02d0, "SQ", "Brachy Control Point Sequence" }, { 0x300a, 0x02d2, "DS", "Control Point Relative Position" }, { 0x300a, 0x02d4, "DS", "Control Point 3D Position" }, { 0x300a, 0x02d6, "DS", "Cumulative Time Weight" }, { 0x300c, 0x0002, "SQ", "Referenced RT Plan Sequence" }, { 0x300c, 0x0004, "SQ", "Referenced Beam Sequence" }, { 0x300c, 0x0006, "IS", "Referenced Beam Number" }, { 0x300c, 0x0007, "IS", "Referenced Reference Image Number" }, { 0x300c, 0x0008, "DS", "Start Cumulative Meterset Weight" }, { 0x300c, 0x0009, "DS", "End Cumulative Meterset Weight" }, { 0x300c, 0x000a, "SQ", "Referenced Brachy Application Setup Sequence" }, { 0x300c, 0x000c, "IS", "Referenced Brachy Application Setup Number" }, { 0x300c, 0x000e, "IS", "Referenced Source Number" }, { 0x300c, 0x0020, "SQ", "Referenced Fraction Group Sequence" }, { 0x300c, 0x0022, "IS", "Referenced Fraction Group Number" }, { 0x300c, 0x0040, "SQ", "Referenced Verification Image Sequence" }, { 0x300c, 0x0042, "SQ", "Referenced Reference Image Sequence" }, { 0x300c, 0x0050, "SQ", "Referenced Dose Reference Sequence" }, { 0x300c, 0x0051, "IS", "Referenced Dose Reference Number" }, { 0x300c, 0x0055, "SQ", "Brachy Referenced Dose Reference Sequence" }, { 0x300c, 0x0060, "SQ", "Referenced Structure Set Sequence" }, { 0x300c, 0x006a, "IS", "Referenced Patient Setup Number" }, { 0x300c, 0x0080, "SQ", "Referenced Dose Sequence" }, { 0x300c, 0x00a0, "IS", "Referenced Tolerance Table Number" }, { 0x300c, 0x00b0, "SQ", "Referenced Bolus Sequence" }, { 0x300c, 0x00c0, "IS", "Referenced Wedge Number" }, { 0x300c, 0x00d0, "IS", "Referenced Compensato rNumber" }, { 0x300c, 0x00e0, "IS", "Referenced Block Number" }, { 0x300c, 0x00f0, "IS", "Referenced Control Point" }, { 0x300e, 0x0002, "CS", "Approval Status" }, { 0x300e, 0x0004, "DA", "Review Date" }, { 0x300e, 0x0005, "TM", "Review Time" }, { 0x300e, 0x0008, "PN", "Reviewer Name" }, { 0x4000, 0x0000, "UL", "Text Group Length" }, { 0x4000, 0x0010, "LT", "Text Arbitrary" }, { 0x4000, 0x4000, "LT", "Text Comments" }, { 0x4008, 0x0000, "UL", "Results Group Length" }, { 0x4008, 0x0040, "SH", "Results ID" }, { 0x4008, 0x0042, "LO", "Results ID Issuer" }, { 0x4008, 0x0050, "SQ", "Referenced Interpretation Sequence" }, { 0x4008, 0x00ff, "CS", "Report Production Status" }, { 0x4008, 0x0100, "DA", "Interpretation Recorded Date" }, { 0x4008, 0x0101, "TM", "Interpretation Recorded Time" }, { 0x4008, 0x0102, "PN", "Interpretation Recorder" }, { 0x4008, 0x0103, "LO", "Reference to Recorded Sound" }, { 0x4008, 0x0108, "DA", "Interpretation Transcription Date" }, { 0x4008, 0x0109, "TM", "Interpretation Transcription Time" }, { 0x4008, 0x010a, "PN", "Interpretation Transcriber" }, { 0x4008, 0x010b, "ST", "Interpretation Text" }, { 0x4008, 0x010c, "PN", "Interpretation Author" }, { 0x4008, 0x0111, "SQ", "Interpretation Approver Sequence" }, { 0x4008, 0x0112, "DA", "Interpretation Approval Date" }, { 0x4008, 0x0113, "TM", "Interpretation Approval Time" }, { 0x4008, 0x0114, "PN", "Physician Approving Interpretation" }, { 0x4008, 0x0115, "LT", "Interpretation Diagnosis Description" }, { 0x4008, 0x0117, "SQ", "InterpretationDiagnosis Code Sequence" }, { 0x4008, 0x0118, "SQ", "Results Distribution List Sequence" }, { 0x4008, 0x0119, "PN", "Distribution Name" }, { 0x4008, 0x011a, "LO", "Distribution Address" }, { 0x4008, 0x0200, "SH", "Interpretation ID" }, { 0x4008, 0x0202, "LO", "Interpretation ID Issuer" }, { 0x4008, 0x0210, "CS", "Interpretation Type ID" }, { 0x4008, 0x0212, "CS", "Interpretation Status ID" }, { 0x4008, 0x0300, "ST", "Impressions" }, { 0x4008, 0x4000, "ST", "Results Comments" }, { 0x4009, 0x0001, "LT", "Report ID" }, { 0x4009, 0x0020, "LT", "Report Status" }, { 0x4009, 0x0030, "DA", "Report Creation Date" }, { 0x4009, 0x0070, "LT", "Report Approving Physician" }, { 0x4009, 0x00e0, "LT", "Report Text" }, { 0x4009, 0x00e1, "LT", "Report Author" }, { 0x4009, 0x00e3, "LT", "Reporting Radiologist" }, { 0x5000, 0x0000, "UL", "Curve Group Length" }, { 0x5000, 0x0005, "US", "Curve Dimensions" }, { 0x5000, 0x0010, "US", "Number of Points" }, { 0x5000, 0x0020, "CS", "Type of Data" }, { 0x5000, 0x0022, "LO", "Curve Description" }, { 0x5000, 0x0030, "SH", "Axis Units" }, { 0x5000, 0x0040, "SH", "Axis Labels" }, { 0x5000, 0x0103, "US", "Data Value Representation" }, { 0x5000, 0x0104, "US", "Minimum Coordinate Value" }, { 0x5000, 0x0105, "US", "Maximum Coordinate Value" }, { 0x5000, 0x0106, "SH", "Curve Range" }, { 0x5000, 0x0110, "US", "Curve Data Descriptor" }, { 0x5000, 0x0112, "US", "Coordinate Start Value" }, { 0x5000, 0x0114, "US", "Coordinate Step Value" }, { 0x5000, 0x1001, "CS", "Curve Activation Layer" }, { 0x5000, 0x2000, "US", "Audio Type" }, { 0x5000, 0x2002, "US", "Audio Sample Format" }, { 0x5000, 0x2004, "US", "Number of Channels" }, { 0x5000, 0x2006, "UL", "Number of Samples" }, { 0x5000, 0x2008, "UL", "Sample Rate" }, { 0x5000, 0x200a, "UL", "Total Time" }, { 0x5000, 0x200c, "xs", "Audio Sample Data" }, { 0x5000, 0x200e, "LT", "Audio Comments" }, { 0x5000, 0x2500, "LO", "Curve Label" }, { 0x5000, 0x2600, "SQ", "CurveReferenced Overlay Sequence" }, { 0x5000, 0x2610, "US", "CurveReferenced Overlay Group" }, { 0x5000, 0x3000, "OW", "Curve Data" }, { 0x6000, 0x0000, "UL", "Overlay Group Length" }, { 0x6000, 0x0001, "US", "Gray Palette Color Lookup Table Descriptor" }, { 0x6000, 0x0002, "US", "Gray Palette Color Lookup Table Data" }, { 0x6000, 0x0010, "US", "Overlay Rows" }, { 0x6000, 0x0011, "US", "Overlay Columns" }, { 0x6000, 0x0012, "US", "Overlay Planes" }, { 0x6000, 0x0015, "IS", "Number of Frames in Overlay" }, { 0x6000, 0x0022, "LO", "Overlay Description" }, { 0x6000, 0x0040, "CS", "Overlay Type" }, { 0x6000, 0x0045, "CS", "Overlay Subtype" }, { 0x6000, 0x0050, "SS", "Overlay Origin" }, { 0x6000, 0x0051, "US", "Image Frame Origin" }, { 0x6000, 0x0052, "US", "Plane Origin" }, { 0x6000, 0x0060, "LO", "Overlay Compression Code" }, { 0x6000, 0x0061, "SH", "Overlay Compression Originator" }, { 0x6000, 0x0062, "SH", "Overlay Compression Label" }, { 0x6000, 0x0063, "SH", "Overlay Compression Description" }, { 0x6000, 0x0066, "AT", "Overlay Compression Step Pointers" }, { 0x6000, 0x0068, "US", "Overlay Repeat Interval" }, { 0x6000, 0x0069, "US", "Overlay Bits Grouped" }, { 0x6000, 0x0100, "US", "Overlay Bits Allocated" }, { 0x6000, 0x0102, "US", "Overlay Bit Position" }, { 0x6000, 0x0110, "LO", "Overlay Format" }, { 0x6000, 0x0200, "xs", "Overlay Location" }, { 0x6000, 0x0800, "LO", "Overlay Code Label" }, { 0x6000, 0x0802, "US", "Overlay Number of Tables" }, { 0x6000, 0x0803, "AT", "Overlay Code Table Location" }, { 0x6000, 0x0804, "US", "Overlay Bits For Code Word" }, { 0x6000, 0x1001, "CS", "Overlay Activation Layer" }, { 0x6000, 0x1100, "US", "Overlay Descriptor - Gray" }, { 0x6000, 0x1101, "US", "Overlay Descriptor - Red" }, { 0x6000, 0x1102, "US", "Overlay Descriptor - Green" }, { 0x6000, 0x1103, "US", "Overlay Descriptor - Blue" }, { 0x6000, 0x1200, "US", "Overlays - Gray" }, { 0x6000, 0x1201, "US", "Overlays - Red" }, { 0x6000, 0x1202, "US", "Overlays - Green" }, { 0x6000, 0x1203, "US", "Overlays - Blue" }, { 0x6000, 0x1301, "IS", "ROI Area" }, { 0x6000, 0x1302, "DS", "ROI Mean" }, { 0x6000, 0x1303, "DS", "ROI Standard Deviation" }, { 0x6000, 0x1500, "LO", "Overlay Label" }, { 0x6000, 0x3000, "OW", "Overlay Data" }, { 0x6000, 0x4000, "LT", "Overlay Comments" }, { 0x6001, 0x0000, "UN", "?" }, { 0x6001, 0x0010, "LO", "?" }, { 0x6001, 0x1010, "xs", "?" }, { 0x6001, 0x1030, "xs", "?" }, { 0x6021, 0x0000, "xs", "?" }, { 0x6021, 0x0010, "xs", "?" }, { 0x7001, 0x0010, "LT", "Dummy" }, { 0x7003, 0x0010, "LT", "Info" }, { 0x7005, 0x0010, "LT", "Dummy" }, { 0x7000, 0x0004, "ST", "TextAnnotation" }, { 0x7000, 0x0005, "IS", "Box" }, { 0x7000, 0x0007, "IS", "ArrowEnd" }, { 0x7001, 0x0001, "SL", "Private Group Length To End" }, { 0x7001, 0x0002, "OB", "Unknown" }, { 0x7001, 0x0011, "SL", "Private Creator" }, { 0x7001, 0x0021, "SL", "Private Creator" }, { 0x7001, 0x0022, "SQ", "Private Creator" }, { 0x7001, 0x0041, "SL", "Private Creator" }, { 0x7001, 0x0042, "SL", "Private Creator" }, { 0x7001, 0x0051, "SL", "Private Creator" }, { 0x7001, 0x0052, "SL", "Private Creator" }, { 0x7001, 0x0075, "SL", "Private Creator" }, { 0x7001, 0x0076, "SL", "Private Creator" }, { 0x7001, 0x0077, "OB", "Private Creator" }, { 0x7001, 0x0101, "SL", "Unknown" }, { 0x7001, 0x0121, "SL", "Unknown" }, { 0x7001, 0x0122, "SQ", "Unknown" }, { 0x7fe0, 0x0000, "UL", "Pixel Data Group Length" }, { 0x7fe0, 0x0010, "xs", "Pixel Data" }, { 0x7fe0, 0x0020, "OW", "Coefficients SDVN" }, { 0x7fe0, 0x0030, "OW", "Coefficients SDHN" }, { 0x7fe0, 0x0040, "OW", "Coefficients SDDN" }, { 0x7fe1, 0x0010, "xs", "Pixel Data" }, { 0x7f00, 0x0000, "UL", "Variable Pixel Data Group Length" }, { 0x7f00, 0x0010, "xs", "Variable Pixel Data" }, { 0x7f00, 0x0011, "US", "Variable Next Data Group" }, { 0x7f00, 0x0020, "OW", "Variable Coefficients SDVN" }, { 0x7f00, 0x0030, "OW", "Variable Coefficients SDHN" }, { 0x7f00, 0x0040, "OW", "Variable Coefficients SDDN" }, { 0x7fe1, 0x0000, "OB", "Binary Data" }, { 0x7fe3, 0x0000, "LT", "Image Graphics Format Code" }, { 0x7fe3, 0x0010, "OB", "Image Graphics" }, { 0x7fe3, 0x0020, "OB", "Image Graphics Dummy" }, { 0x7ff1, 0x0001, "US", "?" }, { 0x7ff1, 0x0002, "US", "?" }, { 0x7ff1, 0x0003, "xs", "?" }, { 0x7ff1, 0x0004, "IS", "?" }, { 0x7ff1, 0x0005, "US", "?" }, { 0x7ff1, 0x0007, "US", "?" }, { 0x7ff1, 0x0008, "US", "?" }, { 0x7ff1, 0x0009, "US", "?" }, { 0x7ff1, 0x000a, "LT", "?" }, { 0x7ff1, 0x000b, "US", "?" }, { 0x7ff1, 0x000c, "US", "?" }, { 0x7ff1, 0x000d, "US", "?" }, { 0x7ff1, 0x0010, "US", "?" }, { 0xfffc, 0xfffc, "OB", "Data Set Trailing Padding" }, { 0xfffe, 0xe000, "!!", "Item" }, { 0xfffe, 0xe00d, "!!", "Item Delimitation Item" }, { 0xfffe, 0xe0dd, "!!", "Sequence Delimitation Item" }, { 0xffff, 0xffff, "xs", (char *) NULL } }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s D C M % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsDCM() returns MagickTrue if the image format type, identified by the % magick string, is DCM. % % The format of the IsDCM method is: % % MagickBooleanType IsDCM(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsDCM(const unsigned char *magick,const size_t length) { if (length < 132) return(MagickFalse); if (LocaleNCompare((char *) (magick+128),"DICM",4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d D C M I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadDCMImage() reads a Digital Imaging and Communications in Medicine % (DICOM) file and returns it. It allocates the memory necessary for the % new Image structure and returns a pointer to the new image. % % The format of the ReadDCMImage method is: % % Image *ReadDCMImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ typedef struct _DCMInfo { MagickBooleanType polarity; Quantum *scale; size_t bits_allocated, bytes_per_pixel, depth, mask, max_value, samples_per_pixel, signed_data, significant_bits; MagickBooleanType rescale; double rescale_intercept, rescale_slope, window_center, window_width; } DCMInfo; typedef struct _DCMStreamInfo { size_t remaining, segment_count; ssize_t segments[15]; size_t offset_count; ssize_t *offsets; ssize_t count; int byte; } DCMStreamInfo; static int ReadDCMByte(DCMStreamInfo *stream_info,Image *image) { if (image->compression != RLECompression) return(ReadBlobByte(image)); if (stream_info->count == 0) { int byte; ssize_t count; if (stream_info->remaining <= 2) stream_info->remaining=0; else stream_info->remaining-=2; count=(ssize_t) ReadBlobByte(image); byte=ReadBlobByte(image); if (count == 128) return(0); else if (count < 128) { /* Literal bytes. */ stream_info->count=count; stream_info->byte=(-1); return(byte); } else { /* Repeated bytes. */ stream_info->count=256-count; stream_info->byte=byte; return(byte); } } stream_info->count--; if (stream_info->byte >= 0) return(stream_info->byte); if (stream_info->remaining > 0) stream_info->remaining--; return(ReadBlobByte(image)); } static unsigned short ReadDCMShort(DCMStreamInfo *stream_info,Image *image) { int shift, byte; unsigned short value; if (image->compression != RLECompression) return(ReadBlobLSBShort(image)); shift=image->depth < 16 ? 4 : 8; value=(unsigned short) ReadDCMByte(stream_info,image); byte=ReadDCMByte(stream_info,image); if (byte < 0) return(0); value|=(unsigned short) (byte << shift); return(value); } static signed short ReadDCMSignedShort(DCMStreamInfo *stream_info,Image *image) { union { unsigned short unsigned_value; signed short signed_value; } quantum; quantum.unsigned_value=ReadDCMShort(stream_info,image); return(quantum.signed_value); } static MagickBooleanType ReadDCMPixels(Image *image,DCMInfo *info, DCMStreamInfo *stream_info,MagickBooleanType first_segment, ExceptionInfo *exception) { int byte, index; MagickBooleanType status; PixelPacket pixel; register ssize_t i, x; register Quantum *q; ssize_t y; /* Convert DCM Medical image to pixel packets. */ byte=0; i=0; status=MagickTrue; (void) memset(&pixel,0,sizeof(pixel)); for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (info->samples_per_pixel == 1) { int pixel_value; if (info->bytes_per_pixel == 1) pixel_value=info->polarity != MagickFalse ? ((int) info->max_value-ReadDCMByte(stream_info,image)) : ReadDCMByte(stream_info,image); else if ((info->bits_allocated != 12) || (info->significant_bits != 12)) { if (info->signed_data) pixel_value=ReadDCMSignedShort(stream_info,image); else pixel_value=(int) ReadDCMShort(stream_info,image); if (info->polarity != MagickFalse) pixel_value=(int)info->max_value-pixel_value; } else { if ((i & 0x01) != 0) pixel_value=(ReadDCMByte(stream_info,image) << 8) | byte; else { pixel_value=ReadDCMSignedShort(stream_info,image); byte=(int) (pixel_value & 0x0f); pixel_value>>=4; } i++; } if (info->signed_data == 1) pixel_value-=32767; index=pixel_value; if (info->rescale != MagickFalse) { double scaled_value; scaled_value=pixel_value*info->rescale_slope+ info->rescale_intercept; index=(int) scaled_value; if (info->window_width != 0) { double window_max, window_min; window_min=ceil(info->window_center- (info->window_width-1.0)/2.0-0.5); window_max=floor(info->window_center+ (info->window_width-1.0)/2.0+0.5); if (scaled_value <= window_min) index=0; else if (scaled_value > window_max) index=(int) info->max_value; else index=(int) (info->max_value*(((scaled_value- info->window_center-0.5)/(info->window_width-1))+0.5)); } } index&=info->mask; index=(int) ConstrainColormapIndex(image,(ssize_t) index,exception); if (first_segment) SetPixelIndex(image,(Quantum) index,q); else SetPixelIndex(image,(Quantum) (((size_t) index) | (((size_t) GetPixelIndex(image,q)) << 8)),q); pixel.red=(unsigned int) image->colormap[index].red; pixel.green=(unsigned int) image->colormap[index].green; pixel.blue=(unsigned int) image->colormap[index].blue; } else { if (info->bytes_per_pixel == 1) { pixel.red=(unsigned int) ReadDCMByte(stream_info,image); pixel.green=(unsigned int) ReadDCMByte(stream_info,image); pixel.blue=(unsigned int) ReadDCMByte(stream_info,image); } else { pixel.red=ReadDCMShort(stream_info,image); pixel.green=ReadDCMShort(stream_info,image); pixel.blue=ReadDCMShort(stream_info,image); } pixel.red&=info->mask; pixel.green&=info->mask; pixel.blue&=info->mask; if (info->scale != (Quantum *) NULL) { if ((MagickSizeType) pixel.red <= GetQuantumRange(info->depth)) pixel.red=info->scale[pixel.red]; if ((MagickSizeType) pixel.green <= GetQuantumRange(info->depth)) pixel.green=info->scale[pixel.green]; if ((MagickSizeType) pixel.blue <= GetQuantumRange(info->depth)) pixel.blue=info->scale[pixel.blue]; } } if (first_segment != MagickFalse) { SetPixelRed(image,(Quantum) pixel.red,q); SetPixelGreen(image,(Quantum) pixel.green,q); SetPixelBlue(image,(Quantum) pixel.blue,q); } else { SetPixelRed(image,(Quantum) (((size_t) pixel.red) | (((size_t) GetPixelRed(image,q)) << 8)),q); SetPixelGreen(image,(Quantum) (((size_t) pixel.green) | (((size_t) GetPixelGreen(image,q)) << 8)),q); SetPixelBlue(image,(Quantum) (((size_t) pixel.blue) | (((size_t) GetPixelBlue(image,q)) << 8)),q); } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } return(status); } static Image *ReadDCMImage(const ImageInfo *image_info,ExceptionInfo *exception) { #define ThrowDCMException(exception,message) \ { \ if (info.scale != (Quantum *) NULL) \ info.scale=(Quantum *) RelinquishMagickMemory(info.scale); \ if (data != (unsigned char *) NULL) \ data=(unsigned char *) RelinquishMagickMemory(data); \ if (graymap != (int *) NULL) \ graymap=(int *) RelinquishMagickMemory(graymap); \ if (bluemap != (int *) NULL) \ bluemap=(int *) RelinquishMagickMemory(bluemap); \ if (greenmap != (int *) NULL) \ greenmap=(int *) RelinquishMagickMemory(greenmap); \ if (redmap != (int *) NULL) \ redmap=(int *) RelinquishMagickMemory(redmap); \ if (stream_info->offsets != (ssize_t *) NULL) \ stream_info->offsets=(ssize_t *) RelinquishMagickMemory( \ stream_info->offsets); \ if (stream_info != (DCMStreamInfo *) NULL) \ stream_info=(DCMStreamInfo *) RelinquishMagickMemory(stream_info); \ ThrowReaderException((exception),(message)); \ } char explicit_vr[MagickPathExtent], implicit_vr[MagickPathExtent], magick[MagickPathExtent], photometric[MagickPathExtent]; DCMInfo info; DCMStreamInfo *stream_info; Image *image; int *bluemap, datum, *greenmap, *graymap, *redmap; MagickBooleanType explicit_file, explicit_retry, use_explicit; MagickOffsetType offset; register unsigned char *p; register ssize_t i; size_t colors, height, length, number_scenes, quantum, status, width; ssize_t count, scene; unsigned char *data; unsigned short group, element; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } image->depth=8UL; image->endian=LSBEndian; /* Read DCM preamble. */ (void) memset(&info,0,sizeof(info)); data=(unsigned char *) NULL; graymap=(int *) NULL; redmap=(int *) NULL; greenmap=(int *) NULL; bluemap=(int *) NULL; stream_info=(DCMStreamInfo *) AcquireMagickMemory(sizeof(*stream_info)); if (stream_info == (DCMStreamInfo *) NULL) ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(stream_info,0,sizeof(*stream_info)); count=ReadBlob(image,128,(unsigned char *) magick); if (count != 128) ThrowDCMException(CorruptImageError,"ImproperImageHeader"); count=ReadBlob(image,4,(unsigned char *) magick); if ((count != 4) || (LocaleNCompare(magick,"DICM",4) != 0)) { offset=SeekBlob(image,0L,SEEK_SET); if (offset < 0) ThrowDCMException(CorruptImageError,"ImproperImageHeader"); } /* Read DCM Medical image. */ (void) CopyMagickString(photometric,"MONOCHROME1 ",MagickPathExtent); info.bits_allocated=8; info.bytes_per_pixel=1; info.depth=8; info.mask=0xffff; info.max_value=255UL; info.samples_per_pixel=1; info.signed_data=(~0UL); info.rescale_slope=1.0; data=(unsigned char *) NULL; element=0; explicit_vr[2]='\0'; explicit_file=MagickFalse; colors=0; redmap=(int *) NULL; greenmap=(int *) NULL; bluemap=(int *) NULL; graymap=(int *) NULL; height=0; number_scenes=1; use_explicit=MagickFalse; explicit_retry = MagickFalse; width=0; while (TellBlob(image) < (MagickOffsetType) GetBlobSize(image)) { for (group=0; (group != 0x7FE0) || (element != 0x0010) ; ) { /* Read a group. */ image->offset=(ssize_t) TellBlob(image); group=ReadBlobLSBShort(image); element=ReadBlobLSBShort(image); if ((group == 0xfffc) && (element == 0xfffc)) break; if ((group != 0x0002) && (image->endian == MSBEndian)) { group=(unsigned short) ((group << 8) | ((group >> 8) & 0xFF)); element=(unsigned short) ((element << 8) | ((element >> 8) & 0xFF)); } quantum=0; /* Find corresponding VR for this group and element. */ for (i=0; dicom_info[i].group < 0xffff; i++) if ((group == dicom_info[i].group) && (element == dicom_info[i].element)) break; (void) CopyMagickString(implicit_vr,dicom_info[i].vr,MagickPathExtent); count=ReadBlob(image,2,(unsigned char *) explicit_vr); if (count != 2) ThrowDCMException(CorruptImageError,"ImproperImageHeader"); /* Check for "explicitness", but meta-file headers always explicit. */ if ((explicit_file == MagickFalse) && (group != 0x0002)) explicit_file=(isupper((unsigned char) *explicit_vr) != MagickFalse) && (isupper((unsigned char) *(explicit_vr+1)) != MagickFalse) ? MagickTrue : MagickFalse; use_explicit=((group == 0x0002) && (explicit_retry == MagickFalse)) || (explicit_file != MagickFalse) ? MagickTrue : MagickFalse; if ((use_explicit != MagickFalse) && (strncmp(implicit_vr,"xs",2) == 0)) (void) CopyMagickString(implicit_vr,explicit_vr,MagickPathExtent); if ((use_explicit == MagickFalse) || (strncmp(implicit_vr,"!!",2) == 0)) { offset=SeekBlob(image,(MagickOffsetType) -2,SEEK_CUR); if (offset < 0) ThrowDCMException(CorruptImageError,"ImproperImageHeader"); quantum=4; } else { /* Assume explicit type. */ quantum=2; if ((strncmp(explicit_vr,"OB",2) == 0) || (strncmp(explicit_vr,"UN",2) == 0) || (strncmp(explicit_vr,"OW",2) == 0) || (strncmp(explicit_vr,"SQ",2) == 0)) { (void) ReadBlobLSBShort(image); quantum=4; } } datum=0; if (quantum == 4) { if (group == 0x0002) datum=ReadBlobLSBSignedLong(image); else datum=ReadBlobSignedLong(image); } else if (quantum == 2) { if (group == 0x0002) datum=ReadBlobLSBSignedShort(image); else datum=ReadBlobSignedShort(image); } quantum=0; length=1; if (datum != 0) { if ((strncmp(implicit_vr,"OW",2) == 0) || (strncmp(implicit_vr,"SS",2) == 0) || (strncmp(implicit_vr,"US",2) == 0)) quantum=2; else if ((strncmp(implicit_vr,"FL",2) == 0) || (strncmp(implicit_vr,"OF",2) == 0) || (strncmp(implicit_vr,"SL",2) == 0) || (strncmp(implicit_vr,"UL",2) == 0)) quantum=4; else if (strncmp(implicit_vr,"FD",2) == 0) quantum=8; else quantum=1; if (datum != ~0) length=(size_t) datum/quantum; else { /* Sequence and item of undefined length. */ quantum=0; length=0; } } if (image_info->verbose != MagickFalse) { /* Display Dicom info. */ if (use_explicit == MagickFalse) explicit_vr[0]='\0'; for (i=0; dicom_info[i].description != (char *) NULL; i++) if ((group == dicom_info[i].group) && (element == dicom_info[i].element)) break; (void) FormatLocaleFile(stdout,"0x%04lX %4ld %s-%s (0x%04lx,0x%04lx)", (unsigned long) image->offset,(long) length,implicit_vr,explicit_vr, (unsigned long) group,(unsigned long) element); if (dicom_info[i].description != (char *) NULL) (void) FormatLocaleFile(stdout," %s",dicom_info[i].description); (void) FormatLocaleFile(stdout,": "); } if ((group == 0x7FE0) && (element == 0x0010)) { if (image_info->verbose != MagickFalse) (void) FormatLocaleFile(stdout,"\n"); break; } /* Allocate space and read an array. */ data=(unsigned char *) NULL; if ((length == 1) && (quantum == 1)) datum=ReadBlobByte(image); else if ((length == 1) && (quantum == 2)) { if (group == 0x0002) datum=ReadBlobLSBSignedShort(image); else datum=ReadBlobSignedShort(image); } else if ((length == 1) && (quantum == 4)) { if (group == 0x0002) datum=ReadBlobLSBSignedLong(image); else datum=ReadBlobSignedLong(image); } else if ((quantum != 0) && (length != 0)) { if (length > (size_t) GetBlobSize(image)) ThrowDCMException(CorruptImageError, "InsufficientImageDataInFile"); if (~length >= 1) data=(unsigned char *) AcquireQuantumMemory(length+1,quantum* sizeof(*data)); if (data == (unsigned char *) NULL) ThrowDCMException(ResourceLimitError, "MemoryAllocationFailed"); count=ReadBlob(image,(size_t) quantum*length,data); if (count != (ssize_t) (quantum*length)) { if (image_info->verbose != MagickFalse) (void) FormatLocaleFile(stdout,"count=%d quantum=%d " "length=%d group=%d\n",(int) count,(int) quantum,(int) length,(int) group); ThrowDCMException(CorruptImageError, "InsufficientImageDataInFile"); } data[length*quantum]='\0'; } if ((((unsigned int) group << 16) | element) == 0xFFFEE0DD) { if (data != (unsigned char *) NULL) data=(unsigned char *) RelinquishMagickMemory(data); continue; } switch (group) { case 0x0002: { switch (element) { case 0x0010: { char transfer_syntax[MagickPathExtent]; /* Transfer Syntax. */ if ((datum == 0) && (explicit_retry == MagickFalse)) { explicit_retry=MagickTrue; (void) SeekBlob(image,(MagickOffsetType) 0,SEEK_SET); group=0; element=0; if (image_info->verbose != MagickFalse) (void) FormatLocaleFile(stdout, "Corrupted image - trying explicit format\n"); break; } *transfer_syntax='\0'; if (data != (unsigned char *) NULL) (void) CopyMagickString(transfer_syntax,(char *) data, MagickPathExtent); if (image_info->verbose != MagickFalse) (void) FormatLocaleFile(stdout,"transfer_syntax=%s\n", (const char *) transfer_syntax); if (strncmp(transfer_syntax,"1.2.840.10008.1.2",17) == 0) { int subtype, type; type=1; subtype=0; if (strlen(transfer_syntax) > 17) { count=(ssize_t) sscanf(transfer_syntax+17,".%d.%d",&type, &subtype); if (count < 1) ThrowDCMException(CorruptImageError, "ImproperImageHeader"); } switch (type) { case 1: { image->endian=LSBEndian; break; } case 2: { image->endian=MSBEndian; break; } case 4: { if ((subtype >= 80) && (subtype <= 81)) image->compression=JPEGCompression; else if ((subtype >= 90) && (subtype <= 93)) image->compression=JPEG2000Compression; else image->compression=JPEGCompression; break; } case 5: { image->compression=RLECompression; break; } } } break; } default: break; } break; } case 0x0028: { switch (element) { case 0x0002: { /* Samples per pixel. */ info.samples_per_pixel=(size_t) datum; if ((info.samples_per_pixel == 0) || (info.samples_per_pixel > 4)) ThrowDCMException(CorruptImageError,"ImproperImageHeader"); break; } case 0x0004: { /* Photometric interpretation. */ if (data == (unsigned char *) NULL) break; for (i=0; i < (ssize_t) MagickMin(length,MagickPathExtent-1); i++) photometric[i]=(char) data[i]; photometric[i]='\0'; info.polarity=LocaleCompare(photometric,"MONOCHROME1 ") == 0 ? MagickTrue : MagickFalse; break; } case 0x0006: { /* Planar configuration. */ if (datum == 1) image->interlace=PlaneInterlace; break; } case 0x0008: { /* Number of frames. */ if (data == (unsigned char *) NULL) break; number_scenes=StringToUnsignedLong((char *) data); break; } case 0x0010: { /* Image rows. */ height=(size_t) datum; break; } case 0x0011: { /* Image columns. */ width=(size_t) datum; break; } case 0x0100: { /* Bits allocated. */ info.bits_allocated=(size_t) datum; info.bytes_per_pixel=1; if (datum > 8) info.bytes_per_pixel=2; info.depth=info.bits_allocated; if ((info.depth == 0) || (info.depth > 32)) ThrowDCMException(CorruptImageError,"ImproperImageHeader"); info.max_value=(1UL << info.bits_allocated)-1; image->depth=info.depth; break; } case 0x0101: { /* Bits stored. */ info.significant_bits=(size_t) datum; info.bytes_per_pixel=1; if (info.significant_bits > 8) info.bytes_per_pixel=2; info.depth=info.significant_bits; if ((info.depth == 0) || (info.depth > 16)) ThrowDCMException(CorruptImageError,"ImproperImageHeader"); info.max_value=(1UL << info.significant_bits)-1; info.mask=(size_t) GetQuantumRange(info.significant_bits); image->depth=info.depth; break; } case 0x0102: { /* High bit. */ break; } case 0x0103: { /* Pixel representation. */ info.signed_data=(size_t) datum; break; } case 0x1050: { /* Visible pixel range: center. */ if (data != (unsigned char *) NULL) info.window_center=StringToDouble((char *) data,(char **) NULL); break; } case 0x1051: { /* Visible pixel range: width. */ if (data != (unsigned char *) NULL) info.window_width=StringToDouble((char *) data,(char **) NULL); break; } case 0x1052: { /* Rescale intercept */ if (data != (unsigned char *) NULL) info.rescale_intercept=StringToDouble((char *) data, (char **) NULL); break; } case 0x1053: { /* Rescale slope */ if (data != (unsigned char *) NULL) info.rescale_slope=StringToDouble((char *) data,(char **) NULL); break; } case 0x1200: case 0x3006: { /* Populate graymap. */ if (data == (unsigned char *) NULL) break; colors=(size_t) (length/info.bytes_per_pixel); datum=(int) colors; if (graymap != (int *) NULL) graymap=(int *) RelinquishMagickMemory(graymap); graymap=(int *) AcquireQuantumMemory(MagickMax(colors,65536), sizeof(*graymap)); if (graymap == (int *) NULL) ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(graymap,0,MagickMax(colors,65536)* sizeof(*graymap)); for (i=0; i < (ssize_t) colors; i++) if (info.bytes_per_pixel == 1) graymap[i]=(int) data[i]; else graymap[i]=(int) ((short *) data)[i]; break; } case 0x1201: { unsigned short index; /* Populate redmap. */ if (data == (unsigned char *) NULL) break; colors=(size_t) (length/2); datum=(int) colors; if (redmap != (int *) NULL) redmap=(int *) RelinquishMagickMemory(redmap); redmap=(int *) AcquireQuantumMemory(MagickMax(colors,65536), sizeof(*redmap)); if (redmap == (int *) NULL) ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(redmap,0,MagickMax(colors,65536)* sizeof(*redmap)); p=data; for (i=0; i < (ssize_t) colors; i++) { if (image->endian == MSBEndian) index=(unsigned short) ((*p << 8) | *(p+1)); else index=(unsigned short) (*p | (*(p+1) << 8)); redmap[i]=(int) index; p+=2; } break; } case 0x1202: { unsigned short index; /* Populate greenmap. */ if (data == (unsigned char *) NULL) break; colors=(size_t) (length/2); datum=(int) colors; if (greenmap != (int *) NULL) greenmap=(int *) RelinquishMagickMemory(greenmap); greenmap=(int *) AcquireQuantumMemory(MagickMax(colors,65536), sizeof(*greenmap)); if (greenmap == (int *) NULL) ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(greenmap,0,MagickMax(colors,65536)* sizeof(*greenmap)); p=data; for (i=0; i < (ssize_t) colors; i++) { if (image->endian == MSBEndian) index=(unsigned short) ((*p << 8) | *(p+1)); else index=(unsigned short) (*p | (*(p+1) << 8)); greenmap[i]=(int) index; p+=2; } break; } case 0x1203: { unsigned short index; /* Populate bluemap. */ if (data == (unsigned char *) NULL) break; colors=(size_t) (length/2); datum=(int) colors; if (bluemap != (int *) NULL) bluemap=(int *) RelinquishMagickMemory(bluemap); bluemap=(int *) AcquireQuantumMemory(MagickMax(colors,65536), sizeof(*bluemap)); if (bluemap == (int *) NULL) ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(bluemap,0,MagickMax(colors,65536)* sizeof(*bluemap)); p=data; for (i=0; i < (ssize_t) colors; i++) { if (image->endian == MSBEndian) index=(unsigned short) ((*p << 8) | *(p+1)); else index=(unsigned short) (*p | (*(p+1) << 8)); bluemap[i]=(int) index; p+=2; } break; } default: break; } break; } case 0x2050: { switch (element) { case 0x0020: { if ((data != (unsigned char *) NULL) && (strncmp((char *) data,"INVERSE",7) == 0)) info.polarity=MagickTrue; break; } default: break; } break; } default: break; } if (data != (unsigned char *) NULL) { char *attribute; for (i=0; dicom_info[i].description != (char *) NULL; i++) if ((group == dicom_info[i].group) && (element == dicom_info[i].element)) break; if (dicom_info[i].description != (char *) NULL) { attribute=AcquireString("dcm:"); (void) ConcatenateString(&attribute,dicom_info[i].description); for (i=0; i < (ssize_t) MagickMax(length,4); i++) if (isprint((int) data[i]) == MagickFalse) break; if ((i == (ssize_t) length) || (length > 4)) { (void) SubstituteString(&attribute," ",""); (void) SetImageProperty(image,attribute,(char *) data, exception); } attribute=DestroyString(attribute); } } if (image_info->verbose != MagickFalse) { if (data == (unsigned char *) NULL) (void) FormatLocaleFile(stdout,"%d\n",datum); else { /* Display group data. */ for (i=0; i < (ssize_t) MagickMax(length,4); i++) if (isprint((int) data[i]) == MagickFalse) break; if ((i != (ssize_t) length) && (length <= 4)) { ssize_t j; datum=0; for (j=(ssize_t) length-1; j >= 0; j--) datum=(256*datum+data[j]); (void) FormatLocaleFile(stdout,"%d",datum); } else for (i=0; i < (ssize_t) length; i++) if (isprint((int) data[i]) != MagickFalse) (void) FormatLocaleFile(stdout,"%c",data[i]); else (void) FormatLocaleFile(stdout,"%c",'.'); (void) FormatLocaleFile(stdout,"\n"); } } if (data != (unsigned char *) NULL) data=(unsigned char *) RelinquishMagickMemory(data); if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); break; } } if ((group == 0xfffc) && (element == 0xfffc)) { Image *last; last=RemoveLastImageFromList(&image); if (last != (Image *) NULL) last=DestroyImage(last); break; } if ((width == 0) || (height == 0)) ThrowDCMException(CorruptImageError,"ImproperImageHeader"); image->columns=(size_t) width; image->rows=(size_t) height; if (info.signed_data == 0xffff) info.signed_data=(size_t) (info.significant_bits == 16 ? 1 : 0); if ((image->compression == JPEGCompression) || (image->compression == JPEG2000Compression)) { Image *images; ImageInfo *read_info; int c; /* Read offset table. */ for (i=0; i < (ssize_t) stream_info->remaining; i++) if (ReadBlobByte(image) == EOF) break; (void) (((ssize_t) ReadBlobLSBShort(image) << 16) | ReadBlobLSBShort(image)); length=(size_t) ReadBlobLSBLong(image); if (length > (size_t) GetBlobSize(image)) ThrowDCMException(CorruptImageError,"InsufficientImageDataInFile"); stream_info->offset_count=length >> 2; if (stream_info->offset_count != 0) { if (stream_info->offsets != (ssize_t *) NULL) stream_info->offsets=(ssize_t *) RelinquishMagickMemory( stream_info->offsets); stream_info->offsets=(ssize_t *) AcquireQuantumMemory( stream_info->offset_count,sizeof(*stream_info->offsets)); if (stream_info->offsets == (ssize_t *) NULL) ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) stream_info->offset_count; i++) stream_info->offsets[i]=(ssize_t) ReadBlobLSBSignedLong(image); offset=TellBlob(image); for (i=0; i < (ssize_t) stream_info->offset_count; i++) stream_info->offsets[i]+=offset; } /* Handle non-native image formats. */ read_info=CloneImageInfo(image_info); SetImageInfoBlob(read_info,(void *) NULL,0); images=NewImageList(); for (scene=0; scene < (ssize_t) number_scenes; scene++) { char filename[MagickPathExtent]; const char *property; FILE *file; Image *jpeg_image; int unique_file; unsigned int tag; tag=((unsigned int) ReadBlobLSBShort(image) << 16) | ReadBlobLSBShort(image); length=(size_t) ReadBlobLSBLong(image); if (tag == 0xFFFEE0DD) break; /* sequence delimiter tag */ if (tag != 0xFFFEE000) { read_info=DestroyImageInfo(read_info); ThrowDCMException(CorruptImageError,"ImproperImageHeader"); } file=(FILE *) NULL; unique_file=AcquireUniqueFileResource(filename); if (unique_file != -1) file=fdopen(unique_file,"wb"); if (file == (FILE *) NULL) { (void) RelinquishUniqueFileResource(filename); ThrowFileException(exception,FileOpenError, "UnableToCreateTemporaryFile",filename); break; } for (c=EOF; length != 0; length--) { c=ReadBlobByte(image); if (c == EOF) { ThrowFileException(exception,CorruptImageError, "UnexpectedEndOfFile",image->filename); break; } (void) fputc(c,file); } (void) fclose(file); if (c == EOF) break; (void) FormatLocaleString(read_info->filename,MagickPathExtent, "jpeg:%s",filename); if (image->compression == JPEG2000Compression) (void) FormatLocaleString(read_info->filename,MagickPathExtent, "j2k:%s",filename); jpeg_image=ReadImage(read_info,exception); if (jpeg_image != (Image *) NULL) { ResetImagePropertyIterator(image); property=GetNextImageProperty(image); while (property != (const char *) NULL) { (void) SetImageProperty(jpeg_image,property, GetImageProperty(image,property,exception),exception); property=GetNextImageProperty(image); } AppendImageToList(&images,jpeg_image); } (void) RelinquishUniqueFileResource(filename); } read_info=DestroyImageInfo(read_info); if (stream_info->offsets != (ssize_t *) NULL) stream_info->offsets=(ssize_t *) RelinquishMagickMemory(stream_info->offsets); stream_info=(DCMStreamInfo *) RelinquishMagickMemory(stream_info); if (info.scale != (Quantum *) NULL) info.scale=(Quantum *) RelinquishMagickMemory(info.scale); if (graymap != (int *) NULL) graymap=(int *) RelinquishMagickMemory(graymap); if (bluemap != (int *) NULL) bluemap=(int *) RelinquishMagickMemory(bluemap); if (greenmap != (int *) NULL) greenmap=(int *) RelinquishMagickMemory(greenmap); if (redmap != (int *) NULL) redmap=(int *) RelinquishMagickMemory(redmap); image=DestroyImageList(image); return(GetFirstImageInList(images)); } if (info.depth != (1UL*MAGICKCORE_QUANTUM_DEPTH)) { QuantumAny range; /* Compute pixel scaling table. */ length=(size_t) (GetQuantumRange(info.depth)+1); if (length > (size_t) GetBlobSize(image)) ThrowDCMException(CorruptImageError,"InsufficientImageDataInFile"); if (info.scale != (Quantum *) NULL) info.scale=(Quantum *) RelinquishMagickMemory(info.scale); info.scale=(Quantum *) AcquireQuantumMemory(MagickMax(length,256), sizeof(*info.scale)); if (info.scale == (Quantum *) NULL) ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(info.scale,0,MagickMax(length,256)* sizeof(*info.scale)); range=GetQuantumRange(info.depth); for (i=0; i <= (ssize_t) GetQuantumRange(info.depth); i++) info.scale[i]=ScaleAnyToQuantum((size_t) i,range); } if (image->compression == RLECompression) { unsigned int tag; /* Read RLE offset table. */ for (i=0; i < (ssize_t) stream_info->remaining; i++) { int c; c=ReadBlobByte(image); if (c == EOF) break; } tag=((unsigned int) ReadBlobLSBShort(image) << 16) | ReadBlobLSBShort(image); (void) tag; length=(size_t) ReadBlobLSBLong(image); if (length > (size_t) GetBlobSize(image)) ThrowDCMException(CorruptImageError,"InsufficientImageDataInFile"); stream_info->offset_count=length >> 2; if (stream_info->offset_count != 0) { stream_info->offsets=(ssize_t *) AcquireQuantumMemory( stream_info->offset_count,sizeof(*stream_info->offsets)); if (stream_info->offsets == (ssize_t *) NULL) ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) stream_info->offset_count; i++) { stream_info->offsets[i]=(ssize_t) ReadBlobLSBSignedLong(image); if (EOFBlob(image) != MagickFalse) break; } offset=TellBlob(image)+8; for (i=0; i < (ssize_t) stream_info->offset_count; i++) stream_info->offsets[i]+=offset; } } for (scene=0; scene < (ssize_t) number_scenes; scene++) { if (image_info->ping != MagickFalse) break; image->columns=(size_t) width; image->rows=(size_t) height; image->depth=info.depth; status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) break; image->colorspace=RGBColorspace; (void) SetImageBackgroundColor(image,exception); if ((image->colormap == (PixelInfo *) NULL) && (info.samples_per_pixel == 1)) { int index; size_t one; one=1; if (colors == 0) colors=one << info.depth; if (AcquireImageColormap(image,colors,exception) == MagickFalse) ThrowDCMException(ResourceLimitError,"MemoryAllocationFailed"); if (redmap != (int *) NULL) for (i=0; i < (ssize_t) colors; i++) { index=redmap[i]; if ((info.scale != (Quantum *) NULL) && (index >= 0) && (index <= (int) info.max_value)) index=(int) info.scale[index]; image->colormap[i].red=(MagickRealType) index; } if (greenmap != (int *) NULL) for (i=0; i < (ssize_t) colors; i++) { index=greenmap[i]; if ((info.scale != (Quantum *) NULL) && (index >= 0) && (index <= (int) info.max_value)) index=(int) info.scale[index]; image->colormap[i].green=(MagickRealType) index; } if (bluemap != (int *) NULL) for (i=0; i < (ssize_t) colors; i++) { index=bluemap[i]; if ((info.scale != (Quantum *) NULL) && (index >= 0) && (index <= (int) info.max_value)) index=(int) info.scale[index]; image->colormap[i].blue=(MagickRealType) index; } if (graymap != (int *) NULL) for (i=0; i < (ssize_t) colors; i++) { index=graymap[i]; if ((info.scale != (Quantum *) NULL) && (index >= 0) && (index <= (int) info.max_value)) index=(int) info.scale[index]; image->colormap[i].red=(MagickRealType) index; image->colormap[i].green=(MagickRealType) index; image->colormap[i].blue=(MagickRealType) index; } } if (image->compression == RLECompression) { unsigned int tag; /* Read RLE segment table. */ for (i=0; i < (ssize_t) stream_info->remaining; i++) { int c; c=ReadBlobByte(image); if (c == EOF) break; } tag=((unsigned int) ReadBlobLSBShort(image) << 16) | ReadBlobLSBShort(image); stream_info->remaining=(size_t) ReadBlobLSBLong(image); if ((tag != 0xFFFEE000) || (stream_info->remaining <= 64) || (EOFBlob(image) != MagickFalse)) { if (stream_info->offsets != (ssize_t *) NULL) stream_info->offsets=(ssize_t *) RelinquishMagickMemory(stream_info->offsets); ThrowDCMException(CorruptImageError,"ImproperImageHeader"); } stream_info->count=0; stream_info->segment_count=ReadBlobLSBLong(image); for (i=0; i < 15; i++) stream_info->segments[i]=(ssize_t) ReadBlobLSBSignedLong(image); stream_info->remaining-=64; if (stream_info->segment_count > 1) { info.bytes_per_pixel=1; info.depth=8; if (stream_info->offset_count > 0) (void) SeekBlob(image,(MagickOffsetType) stream_info->offsets[0]+stream_info->segments[0],SEEK_SET); } } if ((info.samples_per_pixel > 1) && (image->interlace == PlaneInterlace)) { register ssize_t x; register Quantum *q; ssize_t y; /* Convert Planar RGB DCM Medical image to pixel packets. */ for (i=0; i < (ssize_t) info.samples_per_pixel; i++) { for (y=0; y < (ssize_t) image->rows; y++) { q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { switch ((int) i) { case 0: { SetPixelRed(image,ScaleCharToQuantum((unsigned char) ReadDCMByte(stream_info,image)),q); break; } case 1: { SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ReadDCMByte(stream_info,image)),q); break; } case 2: { SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ReadDCMByte(stream_info,image)),q); break; } case 3: { SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) ReadDCMByte(stream_info,image)),q); break; } default: break; } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } } } else { const char *option; /* Convert DCM Medical image to pixel packets. */ option=GetImageOption(image_info,"dcm:display-range"); if (option != (const char *) NULL) { if (LocaleCompare(option,"reset") == 0) info.window_width=0; } option=GetImageOption(image_info,"dcm:window"); if (option != (char *) NULL) { GeometryInfo geometry_info; MagickStatusType flags; flags=ParseGeometry(option,&geometry_info); if (flags & RhoValue) info.window_center=geometry_info.rho; if (flags & SigmaValue) info.window_width=geometry_info.sigma; info.rescale=MagickTrue; } option=GetImageOption(image_info,"dcm:rescale"); if (option != (char *) NULL) info.rescale=IsStringTrue(option); if ((info.window_center != 0) && (info.window_width == 0)) info.window_width=info.window_center; status=ReadDCMPixels(image,&info,stream_info,MagickTrue,exception); if ((status != MagickFalse) && (stream_info->segment_count > 1)) { if (stream_info->offset_count > 0) (void) SeekBlob(image,(MagickOffsetType) stream_info->offsets[0]+stream_info->segments[1],SEEK_SET); (void) ReadDCMPixels(image,&info,stream_info,MagickFalse, exception); } } if (SetImageGray(image,exception) != MagickFalse) (void) SetImageColorspace(image,GRAYColorspace,exception); if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); break; } /* Proceed to next image. */ if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; if (scene < (ssize_t) (number_scenes-1)) { /* Allocate next image structure. */ AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) { status=MagickFalse; break; } image=SyncNextImageInList(image); status=SetImageProgress(image,LoadImagesTag,TellBlob(image), GetBlobSize(image)); if (status == MagickFalse) break; } } if (TellBlob(image) < (MagickOffsetType) GetBlobSize(image)) { /* Allocate next image structure. */ AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) { status=MagickFalse; break; } image=SyncNextImageInList(image); status=SetImageProgress(image,LoadImagesTag,TellBlob(image), GetBlobSize(image)); if (status == MagickFalse) break; } } /* Free resources. */ if (stream_info->offsets != (ssize_t *) NULL) stream_info->offsets=(ssize_t *) RelinquishMagickMemory(stream_info->offsets); stream_info=(DCMStreamInfo *) RelinquishMagickMemory(stream_info); if (info.scale != (Quantum *) NULL) info.scale=(Quantum *) RelinquishMagickMemory(info.scale); if (graymap != (int *) NULL) graymap=(int *) RelinquishMagickMemory(graymap); if (bluemap != (int *) NULL) bluemap=(int *) RelinquishMagickMemory(bluemap); if (greenmap != (int *) NULL) greenmap=(int *) RelinquishMagickMemory(greenmap); if (redmap != (int *) NULL) redmap=(int *) RelinquishMagickMemory(redmap); (void) CloseBlob(image); if (status == MagickFalse) return(DestroyImageList(image)); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r D C M I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterDCMImage() adds attributes for the DCM image format to % the list of supported formats. The attributes include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterDCMImage method is: % % size_t RegisterDCMImage(void) % */ ModuleExport size_t RegisterDCMImage(void) { MagickInfo *entry; static const char *DCMNote= { "DICOM is used by the medical community for images like X-rays. The\n" "specification, \"Digital Imaging and Communications in Medicine\n" "(DICOM)\", is available at http://medical.nema.org/. In particular,\n" "see part 5 which describes the image encoding (RLE, JPEG, JPEG-LS),\n" "and supplement 61 which adds JPEG-2000 encoding." }; entry=AcquireMagickInfo("DCM","DCM", "Digital Imaging and Communications in Medicine image"); entry->decoder=(DecodeImageHandler *) ReadDCMImage; entry->magick=(IsImageFormatHandler *) IsDCM; entry->flags^=CoderAdjoinFlag; entry->flags|=CoderDecoderSeekableStreamFlag; entry->note=ConstantString(DCMNote); (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r D C M I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterDCMImage() removes format registrations made by the % DCM module from the list of supported formats. % % The format of the UnregisterDCMImage method is: % % UnregisterDCMImage(void) % */ ModuleExport void UnregisterDCMImage(void) { (void) UnregisterMagickInfo("DCM"); }
./CrossVul/dataset_final_sorted/CWE-20/c/bad_364_1
crossvul-cpp_data_bad_5115_2
/* packet-rpcap.c * * Routines for RPCAP message formats. * * Copyright 2008, Stig Bjorlykke <stig@bjorlykke.org>, Thales Norway AS * * Wireshark - Network traffic analyzer * By Gerald Combs <gerald@wireshark.org> * Copyright 1998 Gerald Combs * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "config.h" #include <epan/packet.h> #include <epan/aftypes.h> #include <epan/prefs.h> #include <epan/to_str.h> #include <epan/expert.h> #include <wiretap/wtap.h> #include "packet-frame.h" #include "packet-tcp.h" #define PNAME "Remote Packet Capture" #define PSNAME "RPCAP" #define PFNAME "rpcap" #define RPCAP_MSG_ERROR 1 #define RPCAP_MSG_FINDALLIF_REQ 2 #define RPCAP_MSG_OPEN_REQ 3 #define RPCAP_MSG_STARTCAP_REQ 4 #define RPCAP_MSG_UPDATEFILTER_REQ 5 #define RPCAP_MSG_CLOSE 6 #define RPCAP_MSG_PACKET 7 #define RPCAP_MSG_AUTH_REQ 8 #define RPCAP_MSG_STATS_REQ 9 #define RPCAP_MSG_ENDCAP_REQ 10 #define RPCAP_MSG_SETSAMPLING_REQ 11 #define RPCAP_MSG_FINDALLIF_REPLY (128+RPCAP_MSG_FINDALLIF_REQ) #define RPCAP_MSG_OPEN_REPLY (128+RPCAP_MSG_OPEN_REQ) #define RPCAP_MSG_STARTCAP_REPLY (128+RPCAP_MSG_STARTCAP_REQ) #define RPCAP_MSG_UPDATEFILTER_REPLY (128+RPCAP_MSG_UPDATEFILTER_REQ) #define RPCAP_MSG_AUTH_REPLY (128+RPCAP_MSG_AUTH_REQ) #define RPCAP_MSG_STATS_REPLY (128+RPCAP_MSG_STATS_REQ) #define RPCAP_MSG_ENDCAP_REPLY (128+RPCAP_MSG_ENDCAP_REQ) #define RPCAP_MSG_SETSAMPLING_REPLY (128+RPCAP_MSG_SETSAMPLING_REQ) #define RPCAP_ERR_NETW 1 #define RPCAP_ERR_INITTIMEOUT 2 #define RPCAP_ERR_AUTH 3 #define RPCAP_ERR_FINDALLIF 4 #define RPCAP_ERR_NOREMOTEIF 5 #define RPCAP_ERR_OPEN 6 #define RPCAP_ERR_UPDATEFILTER 7 #define RPCAP_ERR_GETSTATS 8 #define RPCAP_ERR_READEX 9 #define RPCAP_ERR_HOSTNOAUTH 10 #define RPCAP_ERR_REMOTEACCEPT 11 #define RPCAP_ERR_STARTCAPTURE 12 #define RPCAP_ERR_ENDCAPTURE 13 #define RPCAP_ERR_RUNTIMETIMEOUT 14 #define RPCAP_ERR_SETSAMPLING 15 #define RPCAP_ERR_WRONGMSG 16 #define RPCAP_ERR_WRONGVER 17 #define RPCAP_SAMP_NOSAMP 0 #define RPCAP_SAMP_1_EVERY_N 1 #define RPCAP_SAMP_FIRST_AFTER_N_MS 2 #define RPCAP_RMTAUTH_NULL 0 #define RPCAP_RMTAUTH_PWD 1 #define FLAG_PROMISC 0x0001 #define FLAG_DGRAM 0x0002 #define FLAG_SERVEROPEN 0x0004 #define FLAG_INBOUND 0x0008 #define FLAG_OUTBOUND 0x0010 void proto_register_rpcap (void); void proto_reg_handoff_rpcap (void); static int proto_rpcap = -1; static int hf_version = -1; static int hf_type = -1; static int hf_value = -1; static int hf_plen = -1; static int hf_error = -1; static int hf_error_value = -1; static int hf_packet = -1; static int hf_timestamp = -1; static int hf_caplen = -1; static int hf_len = -1; static int hf_npkt = -1; static int hf_auth_request = -1; static int hf_auth_type = -1; static int hf_auth_slen1 = -1; static int hf_auth_slen2 = -1; static int hf_auth_username = -1; static int hf_auth_password = -1; static int hf_open_request = -1; static int hf_open_reply = -1; static int hf_linktype = -1; static int hf_tzoff = -1; static int hf_startcap_request = -1; static int hf_snaplen = -1; static int hf_read_timeout = -1; static int hf_flags = -1; static int hf_flags_promisc = -1; static int hf_flags_dgram = -1; static int hf_flags_serveropen = -1; static int hf_flags_inbound = -1; static int hf_flags_outbound = -1; static int hf_client_port = -1; static int hf_startcap_reply = -1; static int hf_bufsize = -1; static int hf_server_port = -1; static int hf_dummy = -1; static int hf_filter = -1; static int hf_filtertype = -1; static int hf_nitems = -1; static int hf_filterbpf_insn = -1; static int hf_code = -1; static int hf_code_class = -1; static int hf_code_fields = -1; static int hf_code_ld_size = -1; static int hf_code_ld_mode = -1; static int hf_code_alu_op = -1; static int hf_code_jmp_op = -1; static int hf_code_src = -1; static int hf_code_rval = -1; static int hf_code_misc_op = -1; static int hf_jt = -1; static int hf_jf = -1; static int hf_instr_value = -1; static int hf_stats_reply = -1; static int hf_ifrecv = -1; static int hf_ifdrop = -1; static int hf_krnldrop = -1; static int hf_srvcapt = -1; static int hf_findalldevs_reply = -1; static int hf_findalldevs_if = -1; static int hf_namelen = -1; static int hf_desclen = -1; static int hf_if_flags = -1; static int hf_naddr = -1; static int hf_if_name = -1; static int hf_if_desc = -1; static int hf_findalldevs_ifaddr = -1; static int hf_if_addr = -1; static int hf_if_netmask = -1; static int hf_if_broadaddr = -1; static int hf_if_dstaddr = -1; static int hf_if_af = -1; static int hf_if_port = -1; static int hf_if_ip = -1; static int hf_if_padding = -1; static int hf_if_unknown = -1; static int hf_sampling_request = -1; static int hf_sampling_method = -1; static int hf_sampling_dummy1 = -1; static int hf_sampling_dummy2 = -1; static int hf_sampling_value = -1; static gint ett_rpcap = -1; static gint ett_error = -1; static gint ett_packet = -1; static gint ett_auth_request = -1; static gint ett_open_reply = -1; static gint ett_startcap_request = -1; static gint ett_startcap_reply = -1; static gint ett_startcap_flags = -1; static gint ett_filter = -1; static gint ett_filterbpf_insn = -1; static gint ett_filterbpf_insn_code = -1; static gint ett_stats_reply = -1; static gint ett_findalldevs_reply = -1; static gint ett_findalldevs_if = -1; static gint ett_findalldevs_ifaddr = -1; static gint ett_ifaddr = -1; static gint ett_sampling_request = -1; static expert_field ei_error = EI_INIT; static expert_field ei_if_unknown = EI_INIT; static expert_field ei_no_more_data = EI_INIT; static expert_field ei_caplen_too_big = EI_INIT; static dissector_handle_t data_handle; /* User definable values */ static gboolean rpcap_desegment = TRUE; static gboolean decode_content = TRUE; static guint32 global_linktype = WTAP_ENCAP_UNKNOWN; /* Global variables */ static guint32 linktype = WTAP_ENCAP_UNKNOWN; static gboolean info_added = FALSE; static const true_false_string open_closed = { "Open", "Closed" }; static const value_string message_type[] = { { RPCAP_MSG_ERROR, "Error" }, { RPCAP_MSG_FINDALLIF_REQ, "Find all interfaces request" }, { RPCAP_MSG_OPEN_REQ, "Open request" }, { RPCAP_MSG_STARTCAP_REQ, "Start capture request" }, { RPCAP_MSG_UPDATEFILTER_REQ, "Update filter request" }, { RPCAP_MSG_CLOSE, "Close" }, { RPCAP_MSG_PACKET, "Packet" }, { RPCAP_MSG_AUTH_REQ, "Authentication request" }, { RPCAP_MSG_STATS_REQ, "Statistics request" }, { RPCAP_MSG_ENDCAP_REQ, "End capture request" }, { RPCAP_MSG_SETSAMPLING_REQ, "Set sampling request" }, { RPCAP_MSG_FINDALLIF_REPLY, "Find all interfaces reply" }, { RPCAP_MSG_OPEN_REPLY, "Open reply" }, { RPCAP_MSG_STARTCAP_REPLY, "Start capture reply" }, { RPCAP_MSG_UPDATEFILTER_REPLY, "Update filter reply" }, { RPCAP_MSG_AUTH_REPLY, "Authentication reply" }, { RPCAP_MSG_STATS_REPLY, "Statistics reply" }, { RPCAP_MSG_ENDCAP_REPLY, "End capture reply" }, { RPCAP_MSG_SETSAMPLING_REPLY, "Set sampling reply" }, { 0, NULL } }; static const value_string error_codes[] = { { RPCAP_ERR_NETW, "Network error" }, { RPCAP_ERR_INITTIMEOUT, "Initial timeout has expired" }, { RPCAP_ERR_AUTH, "Authentication error" }, { RPCAP_ERR_FINDALLIF, "Generic findalldevs error" }, { RPCAP_ERR_NOREMOTEIF, "No remote interfaces" }, { RPCAP_ERR_OPEN, "Generic pcap_open error" }, { RPCAP_ERR_UPDATEFILTER, "Generic updatefilter error" }, { RPCAP_ERR_GETSTATS, "Generic pcap_stats error" }, { RPCAP_ERR_READEX, "Generic pcap_next_ex error" }, { RPCAP_ERR_HOSTNOAUTH, "The host is not authorized" }, { RPCAP_ERR_REMOTEACCEPT, "Generic pcap_remoteaccept error" }, { RPCAP_ERR_STARTCAPTURE, "Generic pcap_startcapture error" }, { RPCAP_ERR_ENDCAPTURE, "Generic pcap_endcapture error" }, { RPCAP_ERR_RUNTIMETIMEOUT, "Runtime timeout has expired" }, { RPCAP_ERR_SETSAMPLING, "Error in setting sampling parameters" }, { RPCAP_ERR_WRONGMSG, "Unrecognized message" }, { RPCAP_ERR_WRONGVER, "Incompatible version" }, { 0, NULL } }; static const value_string sampling_method[] = { { RPCAP_SAMP_NOSAMP, "No sampling" }, { RPCAP_SAMP_1_EVERY_N, "1 every N" }, { RPCAP_SAMP_FIRST_AFTER_N_MS, "First after N ms" }, { 0, NULL } }; static const value_string auth_type[] = { { RPCAP_RMTAUTH_NULL, "None" }, { RPCAP_RMTAUTH_PWD, "Password" }, { 0, NULL } }; static const value_string address_family[] = { { COMMON_AF_UNSPEC, "AF_UNSPEC" }, { COMMON_AF_INET, "AF_INET" }, { 0, NULL } }; static const value_string bpf_class[] = { { 0x00, "ld" }, { 0x01, "ldx" }, { 0x02, "st" }, { 0x03, "stx" }, { 0x04, "alu" }, { 0x05, "jmp" }, { 0x06, "ret" }, { 0x07, "misc" }, { 0, NULL } }; static const value_string bpf_size[] = { { 0x00, "w" }, { 0x01, "h" }, { 0x02, "b" }, { 0, NULL } }; static const value_string bpf_mode[] = { { 0x00, "imm" }, { 0x01, "abs" }, { 0x02, "ind" }, { 0x03, "mem" }, { 0x04, "len" }, { 0x05, "msh" }, { 0, NULL } }; static const value_string bpf_alu_op[] = { { 0x00, "add" }, { 0x01, "sub" }, { 0x02, "mul" }, { 0x03, "div" }, { 0x04, "or" }, { 0x05, "and" }, { 0x06, "lsh" }, { 0x07, "rsh" }, { 0x08, "neg" }, { 0, NULL } }; static const value_string bpf_jmp_op[] = { { 0x00, "ja" }, { 0x01, "jeq" }, { 0x02, "jgt" }, { 0x03, "jge" }, { 0x04, "jset" }, { 0, NULL } }; static const value_string bpf_src[] = { { 0x00, "k" }, { 0x01, "x" }, { 0, NULL } }; static const value_string bpf_rval[] = { { 0x00, "k" }, { 0x01, "x" }, { 0x02, "a" }, { 0, NULL } }; static const value_string bpf_misc_op[] = { { 0x00, "tax" }, { 0x10, "txa" }, { 0, NULL } }; static void rpcap_frame_end (void) { info_added = FALSE; } static void dissect_rpcap_error (tvbuff_t *tvb, packet_info *pinfo, proto_tree *parent_tree, gint offset) { proto_item *ti; gint len; len = tvb_captured_length_remaining (tvb, offset); if (len <= 0) return; col_append_fstr (pinfo->cinfo, COL_INFO, ": %s", tvb_format_text_wsp (tvb, offset, len)); ti = proto_tree_add_item (parent_tree, hf_error, tvb, offset, len, ENC_ASCII|ENC_NA); expert_add_info_format(pinfo, ti, &ei_error, "Error: %s", tvb_format_text_wsp (tvb, offset, len)); } static gint dissect_rpcap_ifaddr (tvbuff_t *tvb, packet_info *pinfo, proto_tree *parent_tree, gint offset, int hf_id, proto_item *parent_item) { proto_tree *tree; proto_item *ti; gchar ipaddr[MAX_ADDR_STR_LEN]; guint32 ipv4; guint16 af; ti = proto_tree_add_item (parent_tree, hf_id, tvb, offset, 128, ENC_BIG_ENDIAN); tree = proto_item_add_subtree (ti, ett_ifaddr); af = tvb_get_ntohs (tvb, offset); proto_tree_add_item (tree, hf_if_af, tvb, offset, 2, ENC_BIG_ENDIAN); offset += 2; if (af == COMMON_AF_INET) { proto_tree_add_item (tree, hf_if_port, tvb, offset, 2, ENC_BIG_ENDIAN); offset += 2; ipv4 = tvb_get_ipv4 (tvb, offset); ip_to_str_buf((guint8 *)&ipv4, ipaddr, MAX_ADDR_STR_LEN); proto_item_append_text (ti, ": %s", ipaddr); if (parent_item) { proto_item_append_text (parent_item, ": %s", ipaddr); } proto_tree_add_item (tree, hf_if_ip, tvb, offset, 4, ENC_BIG_ENDIAN); offset += 4; proto_tree_add_item (tree, hf_if_padding, tvb, offset, 120, ENC_NA); offset += 120; } else { ti = proto_tree_add_item (tree, hf_if_unknown, tvb, offset, 126, ENC_NA); if (af != COMMON_AF_UNSPEC) { expert_add_info_format(pinfo, ti, &ei_if_unknown, "Unknown address family: %d", af); } offset += 126; } return offset; } static gint dissect_rpcap_findalldevs_ifaddr (tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *parent_tree, gint offset) { proto_tree *tree; proto_item *ti; gint boffset = offset; ti = proto_tree_add_item (parent_tree, hf_findalldevs_ifaddr, tvb, offset, -1, ENC_NA); tree = proto_item_add_subtree (ti, ett_findalldevs_ifaddr); offset = dissect_rpcap_ifaddr (tvb, pinfo, tree, offset, hf_if_addr, ti); offset = dissect_rpcap_ifaddr (tvb, pinfo, tree, offset, hf_if_netmask, NULL); offset = dissect_rpcap_ifaddr (tvb, pinfo, tree, offset, hf_if_broadaddr, NULL); offset = dissect_rpcap_ifaddr (tvb, pinfo, tree, offset, hf_if_dstaddr, NULL); proto_item_set_len (ti, offset - boffset); return offset; } static gint dissect_rpcap_findalldevs_if (tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *parent_tree, gint offset) { proto_tree *tree; proto_item *ti; guint16 namelen, desclen, naddr, i; gint boffset = offset; ti = proto_tree_add_item (parent_tree, hf_findalldevs_if, tvb, offset, -1, ENC_NA); tree = proto_item_add_subtree (ti, ett_findalldevs_if); namelen = tvb_get_ntohs (tvb, offset); proto_tree_add_item (tree, hf_namelen, tvb, offset, 2, ENC_BIG_ENDIAN); offset += 2; desclen = tvb_get_ntohs (tvb, offset); proto_tree_add_item (tree, hf_desclen, tvb, offset, 2, ENC_BIG_ENDIAN); offset += 2; proto_tree_add_item (tree, hf_if_flags, tvb, offset, 4, ENC_BIG_ENDIAN); offset += 4; naddr = tvb_get_ntohs (tvb, offset); proto_tree_add_item (tree, hf_naddr, tvb, offset, 2, ENC_BIG_ENDIAN); offset += 2; proto_tree_add_item (tree, hf_dummy, tvb, offset, 2, ENC_BIG_ENDIAN); offset += 2; if (namelen) { proto_item_append_text (ti, ": %s", tvb_get_string_enc(wmem_packet_scope(), tvb, offset, namelen, ENC_ASCII)); proto_tree_add_item (tree, hf_if_name, tvb, offset, namelen, ENC_ASCII|ENC_NA); offset += namelen; } if (desclen) { proto_tree_add_item (tree, hf_if_desc, tvb, offset, desclen, ENC_ASCII|ENC_NA); offset += desclen; } for (i = 0; i < naddr; i++) { offset = dissect_rpcap_findalldevs_ifaddr (tvb, pinfo, tree, offset); if (tvb_reported_length_remaining (tvb, offset) < 0) { /* No more data in packet */ expert_add_info(pinfo, ti, &ei_no_more_data); break; } } proto_item_set_len (ti, offset - boffset); return offset; } static void dissect_rpcap_findalldevs_reply (tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *parent_tree, gint offset, guint16 no_devs) { proto_tree *tree; proto_item *ti; guint16 i; ti = proto_tree_add_item (parent_tree, hf_findalldevs_reply, tvb, offset, -1, ENC_NA); tree = proto_item_add_subtree (ti, ett_findalldevs_reply); for (i = 0; i < no_devs; i++) { offset = dissect_rpcap_findalldevs_if (tvb, pinfo, tree, offset); if (tvb_reported_length_remaining (tvb, offset) < 0) { /* No more data in packet */ expert_add_info(pinfo, ti, &ei_no_more_data); break; } } proto_item_append_text (ti, ", %d item%s", no_devs, plurality (no_devs, "", "s")); } static gint dissect_rpcap_filterbpf_insn (tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *parent_tree, gint offset) { proto_tree *tree, *code_tree; proto_item *ti, *code_ti; guint8 inst_class; ti = proto_tree_add_item (parent_tree, hf_filterbpf_insn, tvb, offset, 8, ENC_NA); tree = proto_item_add_subtree (ti, ett_filterbpf_insn); code_ti = proto_tree_add_item (tree, hf_code, tvb, offset, 2, ENC_BIG_ENDIAN); code_tree = proto_item_add_subtree (code_ti, ett_filterbpf_insn_code); proto_tree_add_item (code_tree, hf_code_class, tvb, offset, 2, ENC_BIG_ENDIAN); inst_class = tvb_get_guint8 (tvb, offset + 1) & 0x07; proto_item_append_text (ti, ": %s", val_to_str_const (inst_class, bpf_class, "")); switch (inst_class) { case 0x00: /* ld */ case 0x01: /* ldx */ proto_tree_add_item (code_tree, hf_code_ld_size, tvb, offset, 2, ENC_BIG_ENDIAN); proto_tree_add_item (code_tree, hf_code_ld_mode, tvb, offset, 2, ENC_BIG_ENDIAN); break; case 0x04: /* alu */ proto_tree_add_item (code_tree, hf_code_src, tvb, offset, 2, ENC_BIG_ENDIAN); proto_tree_add_item (code_tree, hf_code_alu_op, tvb, offset, 2, ENC_BIG_ENDIAN); break; case 0x05: /* jmp */ proto_tree_add_item (code_tree, hf_code_src, tvb, offset, 2, ENC_BIG_ENDIAN); proto_tree_add_item (code_tree, hf_code_jmp_op, tvb, offset, 2, ENC_BIG_ENDIAN); break; case 0x06: /* ret */ proto_tree_add_item (code_tree, hf_code_rval, tvb, offset, 2, ENC_BIG_ENDIAN); break; case 0x07: /* misc */ proto_tree_add_item (code_tree, hf_code_misc_op, tvb, offset, 2, ENC_BIG_ENDIAN); break; default: proto_tree_add_item (code_tree, hf_code_fields, tvb, offset, 2, ENC_BIG_ENDIAN); break; } offset += 2; proto_tree_add_item (tree, hf_jt, tvb, offset, 1, ENC_BIG_ENDIAN); offset += 1; proto_tree_add_item (tree, hf_jf, tvb, offset, 1, ENC_BIG_ENDIAN); offset += 1; proto_tree_add_item (tree, hf_instr_value, tvb, offset, 4, ENC_BIG_ENDIAN); offset += 4; return offset; } static void dissect_rpcap_filter (tvbuff_t *tvb, packet_info *pinfo, proto_tree *parent_tree, gint offset) { proto_tree *tree; proto_item *ti; guint32 nitems, i; ti = proto_tree_add_item (parent_tree, hf_filter, tvb, offset, -1, ENC_NA); tree = proto_item_add_subtree (ti, ett_filter); proto_tree_add_item (tree, hf_filtertype, tvb, offset, 2, ENC_BIG_ENDIAN); offset += 2; proto_tree_add_item (tree, hf_dummy, tvb, offset, 2, ENC_BIG_ENDIAN); offset += 2; nitems = tvb_get_ntohl (tvb, offset); proto_tree_add_item (tree, hf_nitems, tvb, offset, 4, ENC_BIG_ENDIAN); offset += 4; for (i = 0; i < nitems; i++) { offset = dissect_rpcap_filterbpf_insn (tvb, pinfo, tree, offset); if (tvb_reported_length_remaining (tvb, offset) < 0) { /* No more data in packet */ expert_add_info(pinfo, ti, &ei_no_more_data); break; } } } static int dissect_rpcap_auth_request (tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *parent_tree, gint offset) { proto_tree *tree; proto_item *ti; guint16 type, slen1, slen2; ti = proto_tree_add_item (parent_tree, hf_auth_request, tvb, offset, -1, ENC_NA); tree = proto_item_add_subtree (ti, ett_auth_request); type = tvb_get_ntohs (tvb, offset); proto_tree_add_item (tree, hf_auth_type, tvb, offset, 2, ENC_BIG_ENDIAN); offset += 2; proto_tree_add_item (tree, hf_dummy, tvb, offset, 2, ENC_BIG_ENDIAN); offset += 2; slen1 = tvb_get_ntohs (tvb, offset); proto_tree_add_item (tree, hf_auth_slen1, tvb, offset, 2, ENC_BIG_ENDIAN); offset += 2; slen2 = tvb_get_ntohs (tvb, offset); proto_tree_add_item (tree, hf_auth_slen2, tvb, offset, 2, ENC_BIG_ENDIAN); offset += 2; if (type == RPCAP_RMTAUTH_NULL) { proto_item_append_text (ti, " (none)"); } else if (type == RPCAP_RMTAUTH_PWD) { guint8 *username, *password; username = tvb_get_string_enc(wmem_packet_scope(), tvb, offset, slen1, ENC_ASCII); proto_tree_add_item (tree, hf_auth_username, tvb, offset, slen1, ENC_ASCII|ENC_NA); offset += slen1; password = tvb_get_string_enc(wmem_packet_scope(), tvb, offset, slen2, ENC_ASCII); proto_tree_add_item (tree, hf_auth_password, tvb, offset, slen2, ENC_ASCII|ENC_NA); offset += slen2; proto_item_append_text (ti, " (%s/%s)", username, password); } return offset; } static void dissect_rpcap_open_request (tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *parent_tree, gint offset) { gint len; len = tvb_captured_length_remaining (tvb, offset); proto_tree_add_item (parent_tree, hf_open_request, tvb, offset, len, ENC_ASCII|ENC_NA); } static void dissect_rpcap_open_reply (tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *parent_tree, gint offset) { proto_tree *tree; proto_item *ti; ti = proto_tree_add_item (parent_tree, hf_open_reply, tvb, offset, -1, ENC_NA); tree = proto_item_add_subtree (ti, ett_open_reply); linktype = tvb_get_ntohl (tvb, offset); proto_tree_add_item (tree, hf_linktype, tvb, offset, 4, ENC_BIG_ENDIAN); offset += 4; proto_tree_add_item (tree, hf_tzoff, tvb, offset, 4, ENC_BIG_ENDIAN); } static void dissect_rpcap_startcap_request (tvbuff_t *tvb, packet_info *pinfo, proto_tree *parent_tree, gint offset) { proto_tree *tree, *field_tree; proto_item *ti, *field_ti; guint16 flags; ti = proto_tree_add_item (parent_tree, hf_startcap_request, tvb, offset, -1, ENC_NA); tree = proto_item_add_subtree (ti, ett_startcap_request); proto_tree_add_item (tree, hf_snaplen, tvb, offset, 4, ENC_BIG_ENDIAN); offset += 4; proto_tree_add_item (tree, hf_read_timeout, tvb, offset, 4, ENC_BIG_ENDIAN); offset += 4; flags = tvb_get_ntohs (tvb, offset); field_ti = proto_tree_add_uint_format (tree, hf_flags, tvb, offset, 2, flags, "Flags"); field_tree = proto_item_add_subtree (field_ti, ett_startcap_flags); proto_tree_add_item (field_tree, hf_flags_promisc, tvb, offset, 2, ENC_BIG_ENDIAN); proto_tree_add_item (field_tree, hf_flags_dgram, tvb, offset, 2, ENC_BIG_ENDIAN); proto_tree_add_item (field_tree, hf_flags_serveropen, tvb, offset, 2, ENC_BIG_ENDIAN); proto_tree_add_item (field_tree, hf_flags_inbound, tvb, offset, 2, ENC_BIG_ENDIAN); proto_tree_add_item (field_tree, hf_flags_outbound, tvb, offset, 2, ENC_BIG_ENDIAN); if (flags & 0x1F) { gchar *flagstr = wmem_strdup_printf (wmem_packet_scope(), "%s%s%s%s%s", (flags & FLAG_PROMISC) ? ", Promiscuous" : "", (flags & FLAG_DGRAM) ? ", Datagram" : "", (flags & FLAG_SERVEROPEN) ? ", ServerOpen" : "", (flags & FLAG_INBOUND) ? ", Inbound" : "", (flags & FLAG_OUTBOUND) ? ", Outbound" : ""); proto_item_append_text (field_ti, ":%s", &flagstr[1]); } else { proto_item_append_text (field_ti, " (none)"); } offset += 2; proto_tree_add_item (tree, hf_client_port, tvb, offset, 2, ENC_BIG_ENDIAN); offset += 2; dissect_rpcap_filter (tvb, pinfo, tree, offset); } static void dissect_rpcap_startcap_reply (tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *parent_tree, gint offset) { proto_tree *tree; proto_item *ti; ti = proto_tree_add_item (parent_tree, hf_startcap_reply, tvb, offset, -1, ENC_NA); tree = proto_item_add_subtree (ti, ett_startcap_reply); proto_tree_add_item (tree, hf_bufsize, tvb, offset, 4, ENC_BIG_ENDIAN); offset += 4; proto_tree_add_item (tree, hf_server_port, tvb, offset, 2, ENC_BIG_ENDIAN); offset += 2; proto_tree_add_item (tree, hf_dummy, tvb, offset, 2, ENC_BIG_ENDIAN); } static void dissect_rpcap_stats_reply (tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *parent_tree, gint offset) { proto_tree *tree; proto_item *ti; ti = proto_tree_add_item (parent_tree, hf_stats_reply, tvb, offset, 16, ENC_NA); tree = proto_item_add_subtree (ti, ett_stats_reply); proto_tree_add_item (tree, hf_ifrecv, tvb, offset, 4, ENC_BIG_ENDIAN); offset += 4; proto_tree_add_item (tree, hf_ifdrop, tvb, offset, 4, ENC_BIG_ENDIAN); offset += 4; proto_tree_add_item (tree, hf_krnldrop, tvb, offset, 4, ENC_BIG_ENDIAN); offset += 4; proto_tree_add_item (tree, hf_srvcapt, tvb, offset, 4, ENC_BIG_ENDIAN); } static int dissect_rpcap_sampling_request (tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *parent_tree, gint offset) { proto_tree *tree; proto_item *ti; guint32 value; guint8 method; ti = proto_tree_add_item (parent_tree, hf_sampling_request, tvb, offset, -1, ENC_NA); tree = proto_item_add_subtree (ti, ett_sampling_request); method = tvb_get_guint8 (tvb, offset); proto_tree_add_item (tree, hf_sampling_method, tvb, offset, 1, ENC_BIG_ENDIAN); offset += 1; proto_tree_add_item (tree, hf_sampling_dummy1, tvb, offset, 1, ENC_BIG_ENDIAN); offset += 1; proto_tree_add_item (tree, hf_sampling_dummy2, tvb, offset, 2, ENC_BIG_ENDIAN); offset += 2; value = tvb_get_ntohl (tvb, offset); proto_tree_add_item (tree, hf_sampling_value, tvb, offset, 4, ENC_BIG_ENDIAN); offset += 4; switch (method) { case RPCAP_SAMP_NOSAMP: proto_item_append_text (ti, ": None"); break; case RPCAP_SAMP_1_EVERY_N: proto_item_append_text (ti, ": 1 every %d", value); break; case RPCAP_SAMP_FIRST_AFTER_N_MS: proto_item_append_text (ti, ": First after %d ms", value); break; default: break; } return offset; } static void dissect_rpcap_packet (tvbuff_t *tvb, packet_info *pinfo, proto_tree *top_tree, proto_tree *parent_tree, gint offset, proto_item *top_item) { proto_tree *tree; proto_item *ti; nstime_t ts; tvbuff_t *new_tvb; guint caplen, len, frame_no; gint reported_length_remaining; ti = proto_tree_add_item (parent_tree, hf_packet, tvb, offset, 20, ENC_NA); tree = proto_item_add_subtree (ti, ett_packet); ts.secs = tvb_get_ntohl (tvb, offset); ts.nsecs = tvb_get_ntohl (tvb, offset + 4) * 1000; proto_tree_add_time(tree, hf_timestamp, tvb, offset, 8, &ts); offset += 8; caplen = tvb_get_ntohl (tvb, offset); ti = proto_tree_add_item (tree, hf_caplen, tvb, offset, 4, ENC_BIG_ENDIAN); offset += 4; len = tvb_get_ntohl (tvb, offset); proto_tree_add_item (tree, hf_len, tvb, offset, 4, ENC_BIG_ENDIAN); offset += 4; frame_no = tvb_get_ntohl (tvb, offset); proto_tree_add_item (tree, hf_npkt, tvb, offset, 4, ENC_BIG_ENDIAN); offset += 4; proto_item_append_text (ti, ", Frame %u", frame_no); proto_item_append_text (top_item, " Frame %u", frame_no); /* * reported_length_remaining should not be -1, as offset is at * most right past the end of the available data in the packet. */ reported_length_remaining = tvb_reported_length_remaining (tvb, offset); if (caplen > (guint)reported_length_remaining) { expert_add_info(pinfo, ti, &ei_caplen_too_big); return; } new_tvb = tvb_new_subset (tvb, offset, caplen, len); if (decode_content && linktype != WTAP_ENCAP_UNKNOWN) { dissector_try_uint(wtap_encap_dissector_table, linktype, new_tvb, pinfo, top_tree); if (!info_added) { /* Only indicate when not added before */ /* Indicate RPCAP in the protocol column */ col_prepend_fence_fstr(pinfo->cinfo, COL_PROTOCOL, "R|"); /* Indicate RPCAP in the info column */ col_prepend_fence_fstr (pinfo->cinfo, COL_INFO, "Remote | "); info_added = TRUE; register_frame_end_routine(pinfo, rpcap_frame_end); } } else { if (linktype == WTAP_ENCAP_UNKNOWN) { proto_item_append_text (ti, ", Unknown link-layer type"); } call_dissector (data_handle, new_tvb, pinfo, top_tree); } } static int dissect_rpcap (tvbuff_t *tvb, packet_info *pinfo, proto_tree *top_tree, void* data _U_) { proto_tree *tree; proto_item *ti; tvbuff_t *new_tvb; gint len, offset = 0; guint8 msg_type; guint16 msg_value; col_set_str (pinfo->cinfo, COL_PROTOCOL, PSNAME); col_clear(pinfo->cinfo, COL_INFO); ti = proto_tree_add_item (top_tree, proto_rpcap, tvb, offset, -1, ENC_NA); tree = proto_item_add_subtree (ti, ett_rpcap); proto_tree_add_item (tree, hf_version, tvb, offset, 1, ENC_BIG_ENDIAN); offset++; msg_type = tvb_get_guint8 (tvb, offset); proto_tree_add_item (tree, hf_type, tvb, offset, 1, ENC_BIG_ENDIAN); offset++; col_append_fstr (pinfo->cinfo, COL_INFO, "%s", val_to_str (msg_type, message_type, "Unknown: %d")); proto_item_append_text (ti, ", %s", val_to_str (msg_type, message_type, "Unknown: %d")); msg_value = tvb_get_ntohs (tvb, offset); if (msg_type == RPCAP_MSG_ERROR) { proto_tree_add_item (tree, hf_error_value, tvb, offset, 2, ENC_BIG_ENDIAN); } else { proto_tree_add_item (tree, hf_value, tvb, offset, 2, ENC_BIG_ENDIAN); } offset += 2; proto_tree_add_item (tree, hf_plen, tvb, offset, 4, ENC_BIG_ENDIAN); offset += 4; switch (msg_type) { case RPCAP_MSG_ERROR: dissect_rpcap_error (tvb, pinfo, tree, offset); break; case RPCAP_MSG_OPEN_REQ: dissect_rpcap_open_request (tvb, pinfo, tree, offset); break; case RPCAP_MSG_STARTCAP_REQ: dissect_rpcap_startcap_request (tvb, pinfo, tree, offset); break; case RPCAP_MSG_UPDATEFILTER_REQ: dissect_rpcap_filter (tvb, pinfo, tree, offset); break; case RPCAP_MSG_PACKET: proto_item_set_len (ti, 28); dissect_rpcap_packet (tvb, pinfo, top_tree, tree, offset, ti); break; case RPCAP_MSG_AUTH_REQ: dissect_rpcap_auth_request (tvb, pinfo, tree, offset); break; case RPCAP_MSG_SETSAMPLING_REQ: dissect_rpcap_sampling_request (tvb, pinfo, tree, offset); break; case RPCAP_MSG_FINDALLIF_REPLY: dissect_rpcap_findalldevs_reply (tvb, pinfo, tree, offset, msg_value); break; case RPCAP_MSG_OPEN_REPLY: dissect_rpcap_open_reply (tvb, pinfo, tree, offset); break; case RPCAP_MSG_STARTCAP_REPLY: dissect_rpcap_startcap_reply (tvb, pinfo, tree, offset); break; case RPCAP_MSG_STATS_REPLY: dissect_rpcap_stats_reply (tvb, pinfo, tree, offset); break; default: len = tvb_reported_length_remaining (tvb, offset); if (len) { /* Yet unknown, dump as data */ proto_item_set_len (ti, 8); new_tvb = tvb_new_subset_remaining (tvb, offset); call_dissector (data_handle, new_tvb, pinfo, top_tree); } break; } return tvb_captured_length(tvb); } static gboolean check_rpcap_heur (tvbuff_t *tvb, gboolean tcp) { gint offset = 0; guint8 version, msg_type; guint16 msg_value; guint32 plen, len, caplen; if (tvb_captured_length (tvb) < 8) /* Too short */ return FALSE; version = tvb_get_guint8 (tvb, offset); if (version != 0) /* Incorrect version */ return FALSE; offset++; msg_type = tvb_get_guint8 (tvb, offset); if (!tcp && msg_type != 7) { /* UDP is only used for packets */ return FALSE; } if (try_val_to_str(msg_type, message_type) == NULL) /* Unknown message type */ return FALSE; offset++; msg_value = tvb_get_ntohs (tvb, offset); if (msg_value > 0) { if (msg_type == RPCAP_MSG_ERROR) { /* Must have a valid error code */ if (try_val_to_str(msg_value, error_codes) == NULL) return FALSE; } else if (msg_type != RPCAP_MSG_FINDALLIF_REPLY) { return FALSE; } } offset += 2; plen = tvb_get_ntohl (tvb, offset); offset += 4; len = (guint32) tvb_reported_length_remaining (tvb, offset); switch (msg_type) { case RPCAP_MSG_FINDALLIF_REQ: case RPCAP_MSG_UPDATEFILTER_REPLY: case RPCAP_MSG_AUTH_REPLY: case RPCAP_MSG_STATS_REQ: case RPCAP_MSG_CLOSE: case RPCAP_MSG_SETSAMPLING_REPLY: case RPCAP_MSG_ENDCAP_REQ: case RPCAP_MSG_ENDCAP_REPLY: /* Empty payload */ if (plen != 0 || len != 0) return FALSE; break; case RPCAP_MSG_OPEN_REPLY: case RPCAP_MSG_STARTCAP_REPLY: case RPCAP_MSG_SETSAMPLING_REQ: /* Always 8 bytes */ if (plen != 8 || len != 8) return FALSE; break; case RPCAP_MSG_STATS_REPLY: /* Always 16 bytes */ if (plen != 16 || len != 16) return FALSE; break; case RPCAP_MSG_PACKET: /* Must have the frame header */ if (plen < 20) return FALSE; /* Check if capture length is valid */ caplen = tvb_get_ntohl (tvb, offset+8); /* Always 20 bytes less than packet length */ if (caplen != (plen - 20) || caplen > 65535) return FALSE; break; case RPCAP_MSG_FINDALLIF_REPLY: case RPCAP_MSG_ERROR: case RPCAP_MSG_OPEN_REQ: case RPCAP_MSG_STARTCAP_REQ: case RPCAP_MSG_UPDATEFILTER_REQ: case RPCAP_MSG_AUTH_REQ: /* Variable length */ if (plen != len) return FALSE; break; default: /* Unknown message type */ return FALSE; } return TRUE; } static guint get_rpcap_pdu_len (packet_info *pinfo _U_, tvbuff_t *tvb, int offset, void *data _U_) { return tvb_get_ntohl (tvb, offset + 4) + 8; } static gboolean dissect_rpcap_heur_tcp (tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void *data) { if (check_rpcap_heur (tvb, TRUE)) { /* This is probably a rpcap tcp package */ tcp_dissect_pdus (tvb, pinfo, tree, rpcap_desegment, 8, get_rpcap_pdu_len, dissect_rpcap, data); return TRUE; } return FALSE; } static gboolean dissect_rpcap_heur_udp (tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void *data) { if (check_rpcap_heur (tvb, FALSE)) { /* This is probably a rpcap udp package */ dissect_rpcap (tvb, pinfo, tree, data); return TRUE; } return FALSE; } void proto_register_rpcap (void) { static hf_register_info hf[] = { /* Common header for all messages */ { &hf_version, { "Version", "rpcap.version", FT_UINT8, BASE_DEC, NULL, 0x0, NULL, HFILL } }, { &hf_type, { "Message type", "rpcap.type", FT_UINT8, BASE_DEC, VALS(message_type), 0x0, NULL, HFILL } }, { &hf_value, { "Message value", "rpcap.value", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL } }, { &hf_plen, { "Payload length", "rpcap.len", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL } }, /* Error */ { &hf_error, { "Error", "rpcap.error", FT_STRING, BASE_NONE, NULL, 0x0, "Error text", HFILL } }, { &hf_error_value, { "Error value", "rpcap.error_value", FT_UINT16, BASE_DEC, VALS(error_codes), 0x0, NULL, HFILL } }, /* Packet header */ { &hf_packet, { "Packet", "rpcap.packet", FT_NONE, BASE_NONE, NULL, 0x0, "Packet data", HFILL } }, { &hf_timestamp, { "Arrival time", "rpcap.time", FT_ABSOLUTE_TIME, ABSOLUTE_TIME_LOCAL, NULL, 0x0, NULL, HFILL } }, { &hf_caplen, { "Capture length", "rpcap.cap_len", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL } }, { &hf_len, { "Frame length", "rpcap.len", FT_UINT32, BASE_DEC, NULL, 0x0, "Frame length (off wire)", HFILL } }, { &hf_npkt, { "Frame number", "rpcap.number", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL } }, /* Authentication request */ { &hf_auth_request, { "Authentication", "rpcap.auth", FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } }, { &hf_auth_type, { "Authentication type", "rpcap.auth_type", FT_UINT16, BASE_DEC, VALS(auth_type), 0x0, NULL, HFILL } }, { &hf_auth_slen1, { "Authentication item length 1", "rpcap.auth_len1", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL } }, { &hf_auth_slen2, { "Authentication item length 2", "rpcap.auth_len2", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL } }, { &hf_auth_username, { "Username", "rpcap.username", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL } }, { &hf_auth_password, { "Password", "rpcap.password", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL } }, /* Open request */ { &hf_open_request, { "Open request", "rpcap.open_request", FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL } }, /* Open reply */ { &hf_open_reply, { "Open reply", "rpcap.open_reply", FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } }, { &hf_linktype, { "Link type", "rpcap.linktype", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL } }, { &hf_tzoff, { "Timezone offset", "rpcap.tzoff", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL } }, /* Start capture request */ { &hf_startcap_request, { "Start capture request", "rpcap.startcap_request", FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } }, { &hf_snaplen, { "Snap length", "rpcap.snaplen", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL } }, { &hf_read_timeout, { "Read timeout", "rpcap.read_timeout", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL } }, { &hf_flags, { "Flags", "rpcap.flags", FT_UINT16, BASE_DEC, NULL, 0x0, "Capture flags", HFILL } }, { &hf_flags_promisc, { "Promiscuous mode", "rpcap.flags.promisc", FT_BOOLEAN, 16, TFS(&tfs_enabled_disabled), FLAG_PROMISC, NULL, HFILL } }, { &hf_flags_dgram, { "Use Datagram", "rpcap.flags.dgram", FT_BOOLEAN, 16, TFS(&tfs_yes_no), FLAG_DGRAM, NULL, HFILL } }, { &hf_flags_serveropen, { "Server open", "rpcap.flags.serveropen", FT_BOOLEAN, 16, TFS(&open_closed), FLAG_SERVEROPEN, NULL, HFILL } }, { &hf_flags_inbound, { "Inbound", "rpcap.flags.inbound", FT_BOOLEAN, 16, TFS(&tfs_yes_no), FLAG_INBOUND, NULL, HFILL } }, { &hf_flags_outbound, { "Outbound", "rpcap.flags.outbound", FT_BOOLEAN, 16, TFS(&tfs_yes_no), FLAG_OUTBOUND, NULL, HFILL } }, { &hf_client_port, { "Client Port", "rpcap.client_port", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL } }, /* Start capture reply */ { &hf_startcap_reply, { "Start capture reply", "rpcap.startcap_reply", FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } }, { &hf_bufsize, { "Buffer size", "rpcap.bufsize", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL } }, { &hf_server_port, { "Server port", "rpcap.server_port", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL } }, { &hf_dummy, { "Dummy", "rpcap.dummy", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL } }, /* Filter */ { &hf_filter, { "Filter", "rpcap.filter", FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } }, { &hf_filtertype, { "Filter type", "rpcap.filtertype", FT_UINT16, BASE_DEC, NULL, 0x0, "Filter type (BPF)", HFILL } }, { &hf_nitems, { "Number of items", "rpcap.nitems", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL } }, /* Filter BPF instruction */ { &hf_filterbpf_insn, { "Filter BPF instruction", "rpcap.filterbpf_insn", FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } }, { &hf_code, { "Op code", "rpcap.opcode", FT_UINT16, BASE_HEX, NULL, 0x0, "Operation code", HFILL } }, { &hf_code_class, { "Class", "rpcap.opcode.class", FT_UINT16, BASE_HEX, VALS(bpf_class), 0x07, "Instruction Class", HFILL } }, { &hf_code_fields, { "Fields", "rpcap.opcode.fields", FT_UINT16, BASE_HEX, NULL, 0xF8, "Class Fields", HFILL } }, { &hf_code_ld_size, { "Size", "rpcap.opcode.size", FT_UINT16, BASE_HEX, VALS(bpf_size), 0x18, NULL, HFILL } }, { &hf_code_ld_mode, { "Mode", "rpcap.opcode.mode", FT_UINT16, BASE_HEX, VALS(bpf_mode), 0xE0, NULL, HFILL } }, { &hf_code_alu_op, { "Op", "rpcap.opcode.aluop", FT_UINT16, BASE_HEX, VALS(bpf_alu_op), 0xF0, NULL, HFILL } }, { &hf_code_jmp_op, { "Op", "rpcap.opcode.jmpop", FT_UINT16, BASE_HEX, VALS(bpf_jmp_op), 0xF0, NULL, HFILL } }, { &hf_code_src, { "Src", "rpcap.opcode.src", FT_UINT16, BASE_HEX, VALS(bpf_src), 0x08, NULL, HFILL } }, { &hf_code_rval, { "Rval", "rpcap.opcode.rval", FT_UINT16, BASE_HEX, VALS(bpf_rval), 0x18, NULL, HFILL } }, { &hf_code_misc_op, { "Op", "rpcap.opcode.miscop", FT_UINT16, BASE_HEX, VALS(bpf_misc_op), 0xF8, NULL, HFILL } }, { &hf_jt, { "JT", "rpcap.jt", FT_UINT8, BASE_DEC, NULL, 0x0, NULL, HFILL } }, { &hf_jf, { "JF", "rpcap.jf", FT_UINT8, BASE_DEC, NULL, 0x0, NULL, HFILL } }, { &hf_instr_value, { "Instruction value", "rpcap.instr_value", FT_UINT32, BASE_DEC, NULL, 0x0, "Instruction-Dependent value", HFILL } }, /* Statistics reply */ { &hf_stats_reply, { "Statistics", "rpcap.stats_reply", FT_NONE, BASE_NONE, NULL, 0x0, "Statistics reply data", HFILL } }, { &hf_ifrecv, { "Received by kernel filter", "rpcap.ifrecv", FT_UINT32, BASE_DEC, NULL, 0x0, "Received by kernel", HFILL } }, { &hf_ifdrop, { "Dropped by network interface", "rpcap.ifdrop", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL } }, { &hf_krnldrop, { "Dropped by kernel filter", "rpcap.krnldrop", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL } }, { &hf_srvcapt, { "Captured by rpcapd", "rpcap.srvcapt", FT_UINT32, BASE_DEC, NULL, 0x0, "Captured by RPCAP daemon", HFILL } }, /* Find all devices reply */ { &hf_findalldevs_reply, { "Find all devices", "rpcap.findalldevs_reply", FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } }, { &hf_findalldevs_if, { "Interface", "rpcap.if", FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } }, { &hf_namelen, { "Name length", "rpcap.namelen", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL } }, { &hf_desclen, { "Description length", "rpcap.desclen", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL } }, { &hf_if_flags, { "Interface flags", "rpcap.if.flags", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL } }, { &hf_naddr, { "Number of addresses", "rpcap.naddr", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL } }, { &hf_if_name, { "Name", "rpcap.ifname", FT_STRING, BASE_NONE, NULL, 0x0, "Interface name", HFILL } }, { &hf_if_desc, { "Description", "rpcap.ifdesc", FT_STRING, BASE_NONE, NULL, 0x0, "Interface description", HFILL } }, /* Find all devices / Interface addresses */ { &hf_findalldevs_ifaddr, { "Interface address", "rpcap.ifaddr", FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } }, { &hf_if_addr, { "Address", "rpcap.addr", FT_NONE, BASE_NONE, NULL, 0x0, "Network address", HFILL } }, { &hf_if_netmask, { "Netmask", "rpcap.netmask", FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } }, { &hf_if_broadaddr, { "Broadcast", "rpcap.broadaddr", FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } }, { &hf_if_dstaddr, { "P2P destination address", "rpcap.dstaddr", FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } }, { &hf_if_af, { "Address family", "rpcap.if.af", FT_UINT16, BASE_HEX, VALS(address_family), 0x0, NULL, HFILL } }, { &hf_if_port, { "Port", "rpcap.if.port", FT_UINT16, BASE_DEC, NULL, 0x0, "Port number", HFILL } }, { &hf_if_ip, { "IP address", "rpcap.if.ip", FT_IPv4, BASE_NONE, NULL, 0x0, NULL, HFILL } }, { &hf_if_padding, { "Padding", "rpcap.if.padding", FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL } }, { &hf_if_unknown, { "Unknown address", "rpcap.if.unknown", FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL } }, /* Sampling request */ { &hf_sampling_request, { "Sampling", "rpcap.sampling_request", FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL } }, { &hf_sampling_method, { "Method", "rpcap.sampling_method", FT_UINT8, BASE_DEC, VALS(sampling_method), 0x0, "Sampling method", HFILL } }, { &hf_sampling_dummy1, { "Dummy1", "rpcap.dummy", FT_UINT8, BASE_DEC, NULL, 0x0, NULL, HFILL } }, { &hf_sampling_dummy2, { "Dummy2", "rpcap.dummy", FT_UINT16, BASE_DEC, NULL, 0x0, NULL, HFILL } }, { &hf_sampling_value, { "Value", "rpcap.sampling_value", FT_UINT32, BASE_DEC, NULL, 0x0, NULL, HFILL } }, }; static gint *ett[] = { &ett_rpcap, &ett_error, &ett_packet, &ett_auth_request, &ett_open_reply, &ett_startcap_request, &ett_startcap_reply, &ett_startcap_flags, &ett_filter, &ett_filterbpf_insn, &ett_filterbpf_insn_code, &ett_stats_reply, &ett_findalldevs_reply, &ett_findalldevs_if, &ett_findalldevs_ifaddr, &ett_ifaddr, &ett_sampling_request }; static ei_register_info ei[] = { { &ei_error, { "rpcap.error.expert", PI_SEQUENCE, PI_NOTE, "Error", EXPFILL }}, { &ei_if_unknown, { "rpcap.if_unknown", PI_SEQUENCE, PI_NOTE, "Unknown address family", EXPFILL }}, { &ei_no_more_data, { "rpcap.no_more_data", PI_MALFORMED, PI_ERROR, "No more data in packet", EXPFILL }}, { &ei_caplen_too_big, { "rpcap.caplen_too_big", PI_MALFORMED, PI_ERROR, "Caplen is bigger than the remaining message length", EXPFILL }}, }; module_t *rpcap_module; expert_module_t* expert_rpcap; proto_rpcap = proto_register_protocol (PNAME, PSNAME, PFNAME); new_register_dissector (PFNAME, dissect_rpcap, proto_rpcap); expert_rpcap = expert_register_protocol(proto_rpcap); expert_register_field_array(expert_rpcap, ei, array_length(ei)); proto_register_field_array (proto_rpcap, hf, array_length (hf)); proto_register_subtree_array (ett, array_length (ett)); /* Register our configuration options */ rpcap_module = prefs_register_protocol (proto_rpcap, proto_reg_handoff_rpcap); prefs_register_bool_preference (rpcap_module, "desegment_pdus", "Reassemble PDUs spanning multiple TCP segments", "Whether the RPCAP dissector should reassemble PDUs" " spanning multiple TCP segments." " To use this option, you must also enable \"Allow subdissectors" " to reassemble TCP streams\" in the TCP protocol settings.", &rpcap_desegment); prefs_register_bool_preference (rpcap_module, "decode_content", "Decode content according to link-layer type", "Whether the packets should be decoded according to" " the link-layer type.", &decode_content); prefs_register_uint_preference (rpcap_module, "linktype", "Default link-layer type", "Default link-layer type to use if an Open Reply packet" " has not been received.", 10, &global_linktype); } void proto_reg_handoff_rpcap (void) { static gboolean rpcap_prefs_initialized = FALSE; if (!rpcap_prefs_initialized) { data_handle = find_dissector ("data"); rpcap_prefs_initialized = TRUE; heur_dissector_add ("tcp", dissect_rpcap_heur_tcp, "RPCAP over TCP", "rpcap_tcp", proto_rpcap, HEURISTIC_ENABLE); heur_dissector_add ("udp", dissect_rpcap_heur_udp, "RPCAP over UDP", "rpcap_udp", proto_rpcap, HEURISTIC_ENABLE); } info_added = FALSE; linktype = global_linktype; } /* * Editor modelines - http://www.wireshark.org/tools/modelines.html * * Local Variables: * c-basic-offset: 2 * tab-width: 8 * indent-tabs-mode: nil * End: * * ex: set shiftwidth=2 tabstop=8 expandtab: * :indentSize=2:tabSize=8:noTabs=true: */
./CrossVul/dataset_final_sorted/CWE-20/c/bad_5115_2
crossvul-cpp_data_good_5844_1
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * "Ping" sockets * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Based on ipv4/udp.c code. * * Authors: Vasiliy Kulikov / Openwall (for Linux 2.6), * Pavel Kankovsky (for Linux 2.4.32) * * Pavel gave all rights to bugs to Vasiliy, * none of the bugs are Pavel's now. * */ #include <linux/uaccess.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/in.h> #include <linux/errno.h> #include <linux/timer.h> #include <linux/mm.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <net/snmp.h> #include <net/ip.h> #include <net/icmp.h> #include <net/protocol.h> #include <linux/skbuff.h> #include <linux/proc_fs.h> #include <linux/export.h> #include <net/sock.h> #include <net/ping.h> #include <net/udp.h> #include <net/route.h> #include <net/inet_common.h> #include <net/checksum.h> #if IS_ENABLED(CONFIG_IPV6) #include <linux/in6.h> #include <linux/icmpv6.h> #include <net/addrconf.h> #include <net/ipv6.h> #include <net/transp_v6.h> #endif struct ping_table ping_table; struct pingv6_ops pingv6_ops; EXPORT_SYMBOL_GPL(pingv6_ops); static u16 ping_port_rover; static inline int ping_hashfn(struct net *net, unsigned int num, unsigned int mask) { int res = (num + net_hash_mix(net)) & mask; pr_debug("hash(%d) = %d\n", num, res); return res; } EXPORT_SYMBOL_GPL(ping_hash); static inline struct hlist_nulls_head *ping_hashslot(struct ping_table *table, struct net *net, unsigned int num) { return &table->hash[ping_hashfn(net, num, PING_HTABLE_MASK)]; } int ping_get_port(struct sock *sk, unsigned short ident) { struct hlist_nulls_node *node; struct hlist_nulls_head *hlist; struct inet_sock *isk, *isk2; struct sock *sk2 = NULL; isk = inet_sk(sk); write_lock_bh(&ping_table.lock); if (ident == 0) { u32 i; u16 result = ping_port_rover + 1; for (i = 0; i < (1L << 16); i++, result++) { if (!result) result++; /* avoid zero */ hlist = ping_hashslot(&ping_table, sock_net(sk), result); ping_portaddr_for_each_entry(sk2, node, hlist) { isk2 = inet_sk(sk2); if (isk2->inet_num == result) goto next_port; } /* found */ ping_port_rover = ident = result; break; next_port: ; } if (i >= (1L << 16)) goto fail; } else { hlist = ping_hashslot(&ping_table, sock_net(sk), ident); ping_portaddr_for_each_entry(sk2, node, hlist) { isk2 = inet_sk(sk2); /* BUG? Why is this reuse and not reuseaddr? ping.c * doesn't turn off SO_REUSEADDR, and it doesn't expect * that other ping processes can steal its packets. */ if ((isk2->inet_num == ident) && (sk2 != sk) && (!sk2->sk_reuse || !sk->sk_reuse)) goto fail; } } pr_debug("found port/ident = %d\n", ident); isk->inet_num = ident; if (sk_unhashed(sk)) { pr_debug("was not hashed\n"); sock_hold(sk); hlist_nulls_add_head(&sk->sk_nulls_node, hlist); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); } write_unlock_bh(&ping_table.lock); return 0; fail: write_unlock_bh(&ping_table.lock); return 1; } EXPORT_SYMBOL_GPL(ping_get_port); void ping_hash(struct sock *sk) { pr_debug("ping_hash(sk->port=%u)\n", inet_sk(sk)->inet_num); BUG(); /* "Please do not press this button again." */ } void ping_unhash(struct sock *sk) { struct inet_sock *isk = inet_sk(sk); pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num); if (sk_hashed(sk)) { write_lock_bh(&ping_table.lock); hlist_nulls_del(&sk->sk_nulls_node); sock_put(sk); isk->inet_num = 0; isk->inet_sport = 0; sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); write_unlock_bh(&ping_table.lock); } } EXPORT_SYMBOL_GPL(ping_unhash); static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident) { struct hlist_nulls_head *hslot = ping_hashslot(&ping_table, net, ident); struct sock *sk = NULL; struct inet_sock *isk; struct hlist_nulls_node *hnode; int dif = skb->dev->ifindex; if (skb->protocol == htons(ETH_P_IP)) { pr_debug("try to find: num = %d, daddr = %pI4, dif = %d\n", (int)ident, &ip_hdr(skb)->daddr, dif); #if IS_ENABLED(CONFIG_IPV6) } else if (skb->protocol == htons(ETH_P_IPV6)) { pr_debug("try to find: num = %d, daddr = %pI6c, dif = %d\n", (int)ident, &ipv6_hdr(skb)->daddr, dif); #endif } read_lock_bh(&ping_table.lock); ping_portaddr_for_each_entry(sk, hnode, hslot) { isk = inet_sk(sk); pr_debug("iterate\n"); if (isk->inet_num != ident) continue; if (skb->protocol == htons(ETH_P_IP) && sk->sk_family == AF_INET) { pr_debug("found: %p: num=%d, daddr=%pI4, dif=%d\n", sk, (int) isk->inet_num, &isk->inet_rcv_saddr, sk->sk_bound_dev_if); if (isk->inet_rcv_saddr && isk->inet_rcv_saddr != ip_hdr(skb)->daddr) continue; #if IS_ENABLED(CONFIG_IPV6) } else if (skb->protocol == htons(ETH_P_IPV6) && sk->sk_family == AF_INET6) { pr_debug("found: %p: num=%d, daddr=%pI6c, dif=%d\n", sk, (int) isk->inet_num, &sk->sk_v6_rcv_saddr, sk->sk_bound_dev_if); if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) && !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, &ipv6_hdr(skb)->daddr)) continue; #endif } if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) continue; sock_hold(sk); goto exit; } sk = NULL; exit: read_unlock_bh(&ping_table.lock); return sk; } static void inet_get_ping_group_range_net(struct net *net, kgid_t *low, kgid_t *high) { kgid_t *data = net->ipv4.sysctl_ping_group_range; unsigned int seq; do { seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock); *low = data[0]; *high = data[1]; } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq)); } int ping_init_sock(struct sock *sk) { struct net *net = sock_net(sk); kgid_t group = current_egid(); struct group_info *group_info = get_current_groups(); int i, j, count = group_info->ngroups; kgid_t low, high; inet_get_ping_group_range_net(net, &low, &high); if (gid_lte(low, group) && gid_lte(group, high)) return 0; for (i = 0; i < group_info->nblocks; i++) { int cp_count = min_t(int, NGROUPS_PER_BLOCK, count); for (j = 0; j < cp_count; j++) { kgid_t gid = group_info->blocks[i][j]; if (gid_lte(low, gid) && gid_lte(gid, high)) return 0; } count -= cp_count; } return -EACCES; } EXPORT_SYMBOL_GPL(ping_init_sock); void ping_close(struct sock *sk, long timeout) { pr_debug("ping_close(sk=%p,sk->num=%u)\n", inet_sk(sk), inet_sk(sk)->inet_num); pr_debug("isk->refcnt = %d\n", sk->sk_refcnt.counter); sk_common_release(sk); } EXPORT_SYMBOL_GPL(ping_close); /* Checks the bind address and possibly modifies sk->sk_bound_dev_if. */ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk, struct sockaddr *uaddr, int addr_len) { struct net *net = sock_net(sk); if (sk->sk_family == AF_INET) { struct sockaddr_in *addr = (struct sockaddr_in *) uaddr; int chk_addr_ret; if (addr_len < sizeof(*addr)) return -EINVAL; pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n", sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port)); chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr); if (addr->sin_addr.s_addr == htonl(INADDR_ANY)) chk_addr_ret = RTN_LOCAL; if ((sysctl_ip_nonlocal_bind == 0 && isk->freebind == 0 && isk->transparent == 0 && chk_addr_ret != RTN_LOCAL) || chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) return -EADDRNOTAVAIL; #if IS_ENABLED(CONFIG_IPV6) } else if (sk->sk_family == AF_INET6) { struct sockaddr_in6 *addr = (struct sockaddr_in6 *) uaddr; int addr_type, scoped, has_addr; struct net_device *dev = NULL; if (addr_len < sizeof(*addr)) return -EINVAL; pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n", sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port)); addr_type = ipv6_addr_type(&addr->sin6_addr); scoped = __ipv6_addr_needs_scope_id(addr_type); if ((addr_type != IPV6_ADDR_ANY && !(addr_type & IPV6_ADDR_UNICAST)) || (scoped && !addr->sin6_scope_id)) return -EINVAL; rcu_read_lock(); if (addr->sin6_scope_id) { dev = dev_get_by_index_rcu(net, addr->sin6_scope_id); if (!dev) { rcu_read_unlock(); return -ENODEV; } } has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev, scoped); rcu_read_unlock(); if (!(isk->freebind || isk->transparent || has_addr || addr_type == IPV6_ADDR_ANY)) return -EADDRNOTAVAIL; if (scoped) sk->sk_bound_dev_if = addr->sin6_scope_id; #endif } else { return -EAFNOSUPPORT; } return 0; } static void ping_set_saddr(struct sock *sk, struct sockaddr *saddr) { if (saddr->sa_family == AF_INET) { struct inet_sock *isk = inet_sk(sk); struct sockaddr_in *addr = (struct sockaddr_in *) saddr; isk->inet_rcv_saddr = isk->inet_saddr = addr->sin_addr.s_addr; #if IS_ENABLED(CONFIG_IPV6) } else if (saddr->sa_family == AF_INET6) { struct sockaddr_in6 *addr = (struct sockaddr_in6 *) saddr; struct ipv6_pinfo *np = inet6_sk(sk); sk->sk_v6_rcv_saddr = np->saddr = addr->sin6_addr; #endif } } static void ping_clear_saddr(struct sock *sk, int dif) { sk->sk_bound_dev_if = dif; if (sk->sk_family == AF_INET) { struct inet_sock *isk = inet_sk(sk); isk->inet_rcv_saddr = isk->inet_saddr = 0; #if IS_ENABLED(CONFIG_IPV6) } else if (sk->sk_family == AF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr)); memset(&np->saddr, 0, sizeof(np->saddr)); #endif } } /* * We need our own bind because there are no privileged id's == local ports. * Moreover, we don't allow binding to multi- and broadcast addresses. */ int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct inet_sock *isk = inet_sk(sk); unsigned short snum; int err; int dif = sk->sk_bound_dev_if; err = ping_check_bind_addr(sk, isk, uaddr, addr_len); if (err) return err; lock_sock(sk); err = -EINVAL; if (isk->inet_num != 0) goto out; err = -EADDRINUSE; ping_set_saddr(sk, uaddr); snum = ntohs(((struct sockaddr_in *)uaddr)->sin_port); if (ping_get_port(sk, snum) != 0) { ping_clear_saddr(sk, dif); goto out; } pr_debug("after bind(): num = %d, dif = %d\n", (int)isk->inet_num, (int)sk->sk_bound_dev_if); err = 0; if (sk->sk_family == AF_INET && isk->inet_rcv_saddr) sk->sk_userlocks |= SOCK_BINDADDR_LOCK; #if IS_ENABLED(CONFIG_IPV6) if (sk->sk_family == AF_INET6 && !ipv6_addr_any(&sk->sk_v6_rcv_saddr)) sk->sk_userlocks |= SOCK_BINDADDR_LOCK; #endif if (snum) sk->sk_userlocks |= SOCK_BINDPORT_LOCK; isk->inet_sport = htons(isk->inet_num); isk->inet_daddr = 0; isk->inet_dport = 0; #if IS_ENABLED(CONFIG_IPV6) if (sk->sk_family == AF_INET6) memset(&sk->sk_v6_daddr, 0, sizeof(sk->sk_v6_daddr)); #endif sk_dst_reset(sk); out: release_sock(sk); pr_debug("ping_v4_bind -> %d\n", err); return err; } EXPORT_SYMBOL_GPL(ping_bind); /* * Is this a supported type of ICMP message? */ static inline int ping_supported(int family, int type, int code) { return (family == AF_INET && type == ICMP_ECHO && code == 0) || (family == AF_INET6 && type == ICMPV6_ECHO_REQUEST && code == 0); } /* * This routine is called by the ICMP module when it gets some * sort of error condition. */ void ping_err(struct sk_buff *skb, int offset, u32 info) { int family; struct icmphdr *icmph; struct inet_sock *inet_sock; int type; int code; struct net *net = dev_net(skb->dev); struct sock *sk; int harderr; int err; if (skb->protocol == htons(ETH_P_IP)) { family = AF_INET; type = icmp_hdr(skb)->type; code = icmp_hdr(skb)->code; icmph = (struct icmphdr *)(skb->data + offset); } else if (skb->protocol == htons(ETH_P_IPV6)) { family = AF_INET6; type = icmp6_hdr(skb)->icmp6_type; code = icmp6_hdr(skb)->icmp6_code; icmph = (struct icmphdr *) (skb->data + offset); } else { BUG(); } /* We assume the packet has already been checked by icmp_unreach */ if (!ping_supported(family, icmph->type, icmph->code)) return; pr_debug("ping_err(proto=0x%x,type=%d,code=%d,id=%04x,seq=%04x)\n", skb->protocol, type, code, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence)); sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id)); if (sk == NULL) { pr_debug("no socket, dropping\n"); return; /* No socket for error */ } pr_debug("err on socket %p\n", sk); err = 0; harderr = 0; inet_sock = inet_sk(sk); if (skb->protocol == htons(ETH_P_IP)) { switch (type) { default: case ICMP_TIME_EXCEEDED: err = EHOSTUNREACH; break; case ICMP_SOURCE_QUENCH: /* This is not a real error but ping wants to see it. * Report it with some fake errno. */ err = EREMOTEIO; break; case ICMP_PARAMETERPROB: err = EPROTO; harderr = 1; break; case ICMP_DEST_UNREACH: if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */ ipv4_sk_update_pmtu(skb, sk, info); if (inet_sock->pmtudisc != IP_PMTUDISC_DONT) { err = EMSGSIZE; harderr = 1; break; } goto out; } err = EHOSTUNREACH; if (code <= NR_ICMP_UNREACH) { harderr = icmp_err_convert[code].fatal; err = icmp_err_convert[code].errno; } break; case ICMP_REDIRECT: /* See ICMP_SOURCE_QUENCH */ ipv4_sk_redirect(skb, sk); err = EREMOTEIO; break; } #if IS_ENABLED(CONFIG_IPV6) } else if (skb->protocol == htons(ETH_P_IPV6)) { harderr = pingv6_ops.icmpv6_err_convert(type, code, &err); #endif } /* * RFC1122: OK. Passes ICMP errors back to application, as per * 4.1.3.3. */ if ((family == AF_INET && !inet_sock->recverr) || (family == AF_INET6 && !inet6_sk(sk)->recverr)) { if (!harderr || sk->sk_state != TCP_ESTABLISHED) goto out; } else { if (family == AF_INET) { ip_icmp_error(sk, skb, err, 0 /* no remote port */, info, (u8 *)icmph); #if IS_ENABLED(CONFIG_IPV6) } else if (family == AF_INET6) { pingv6_ops.ipv6_icmp_error(sk, skb, err, 0, info, (u8 *)icmph); #endif } } sk->sk_err = err; sk->sk_error_report(sk); out: sock_put(sk); } EXPORT_SYMBOL_GPL(ping_err); /* * Copy and checksum an ICMP Echo packet from user space into a buffer * starting from the payload. */ int ping_getfrag(void *from, char *to, int offset, int fraglen, int odd, struct sk_buff *skb) { struct pingfakehdr *pfh = (struct pingfakehdr *)from; if (offset == 0) { if (fraglen < sizeof(struct icmphdr)) BUG(); if (csum_partial_copy_fromiovecend(to + sizeof(struct icmphdr), pfh->iov, 0, fraglen - sizeof(struct icmphdr), &pfh->wcheck)) return -EFAULT; } else if (offset < sizeof(struct icmphdr)) { BUG(); } else { if (csum_partial_copy_fromiovecend (to, pfh->iov, offset - sizeof(struct icmphdr), fraglen, &pfh->wcheck)) return -EFAULT; } #if IS_ENABLED(CONFIG_IPV6) /* For IPv6, checksum each skb as we go along, as expected by * icmpv6_push_pending_frames. For IPv4, accumulate the checksum in * wcheck, it will be finalized in ping_v4_push_pending_frames. */ if (pfh->family == AF_INET6) { skb->csum = pfh->wcheck; skb->ip_summed = CHECKSUM_NONE; pfh->wcheck = 0; } #endif return 0; } EXPORT_SYMBOL_GPL(ping_getfrag); static int ping_v4_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh, struct flowi4 *fl4) { struct sk_buff *skb = skb_peek(&sk->sk_write_queue); pfh->wcheck = csum_partial((char *)&pfh->icmph, sizeof(struct icmphdr), pfh->wcheck); pfh->icmph.checksum = csum_fold(pfh->wcheck); memcpy(icmp_hdr(skb), &pfh->icmph, sizeof(struct icmphdr)); skb->ip_summed = CHECKSUM_NONE; return ip_push_pending_frames(sk, fl4); } int ping_common_sendmsg(int family, struct msghdr *msg, size_t len, void *user_icmph, size_t icmph_len) { u8 type, code; if (len > 0xFFFF) return -EMSGSIZE; /* * Check the flags. */ /* Mirror BSD error message compatibility */ if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; /* * Fetch the ICMP header provided by the userland. * iovec is modified! The ICMP header is consumed. */ if (memcpy_fromiovec(user_icmph, msg->msg_iov, icmph_len)) return -EFAULT; if (family == AF_INET) { type = ((struct icmphdr *) user_icmph)->type; code = ((struct icmphdr *) user_icmph)->code; #if IS_ENABLED(CONFIG_IPV6) } else if (family == AF_INET6) { type = ((struct icmp6hdr *) user_icmph)->icmp6_type; code = ((struct icmp6hdr *) user_icmph)->icmp6_code; #endif } else { BUG(); } if (!ping_supported(family, type, code)) return -EINVAL; return 0; } EXPORT_SYMBOL_GPL(ping_common_sendmsg); int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len) { struct net *net = sock_net(sk); struct flowi4 fl4; struct inet_sock *inet = inet_sk(sk); struct ipcm_cookie ipc; struct icmphdr user_icmph; struct pingfakehdr pfh; struct rtable *rt = NULL; struct ip_options_data opt_copy; int free = 0; __be32 saddr, daddr, faddr; u8 tos; int err; pr_debug("ping_v4_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num); err = ping_common_sendmsg(AF_INET, msg, len, &user_icmph, sizeof(user_icmph)); if (err) return err; /* * Get and verify the address. */ if (msg->msg_name) { struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name; if (msg->msg_namelen < sizeof(*usin)) return -EINVAL; if (usin->sin_family != AF_INET) return -EINVAL; daddr = usin->sin_addr.s_addr; /* no remote port */ } else { if (sk->sk_state != TCP_ESTABLISHED) return -EDESTADDRREQ; daddr = inet->inet_daddr; /* no remote port */ } ipc.addr = inet->inet_saddr; ipc.opt = NULL; ipc.oif = sk->sk_bound_dev_if; ipc.tx_flags = 0; ipc.ttl = 0; ipc.tos = -1; sock_tx_timestamp(sk, &ipc.tx_flags); if (msg->msg_controllen) { err = ip_cmsg_send(sock_net(sk), msg, &ipc); if (err) return err; if (ipc.opt) free = 1; } if (!ipc.opt) { struct ip_options_rcu *inet_opt; rcu_read_lock(); inet_opt = rcu_dereference(inet->inet_opt); if (inet_opt) { memcpy(&opt_copy, inet_opt, sizeof(*inet_opt) + inet_opt->opt.optlen); ipc.opt = &opt_copy.opt; } rcu_read_unlock(); } saddr = ipc.addr; ipc.addr = faddr = daddr; if (ipc.opt && ipc.opt->opt.srr) { if (!daddr) return -EINVAL; faddr = ipc.opt->opt.faddr; } tos = get_rttos(&ipc, inet); if (sock_flag(sk, SOCK_LOCALROUTE) || (msg->msg_flags & MSG_DONTROUTE) || (ipc.opt && ipc.opt->opt.is_strictroute)) { tos |= RTO_ONLINK; } if (ipv4_is_multicast(daddr)) { if (!ipc.oif) ipc.oif = inet->mc_index; if (!saddr) saddr = inet->mc_addr; } else if (!ipc.oif) ipc.oif = inet->uc_index; flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE, sk->sk_protocol, inet_sk_flowi_flags(sk), faddr, saddr, 0, 0); security_sk_classify_flow(sk, flowi4_to_flowi(&fl4)); rt = ip_route_output_flow(net, &fl4, sk); if (IS_ERR(rt)) { err = PTR_ERR(rt); rt = NULL; if (err == -ENETUNREACH) IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); goto out; } err = -EACCES; if ((rt->rt_flags & RTCF_BROADCAST) && !sock_flag(sk, SOCK_BROADCAST)) goto out; if (msg->msg_flags & MSG_CONFIRM) goto do_confirm; back_from_confirm: if (!ipc.addr) ipc.addr = fl4.daddr; lock_sock(sk); pfh.icmph.type = user_icmph.type; /* already checked */ pfh.icmph.code = user_icmph.code; /* ditto */ pfh.icmph.checksum = 0; pfh.icmph.un.echo.id = inet->inet_sport; pfh.icmph.un.echo.sequence = user_icmph.un.echo.sequence; pfh.iov = msg->msg_iov; pfh.wcheck = 0; pfh.family = AF_INET; err = ip_append_data(sk, &fl4, ping_getfrag, &pfh, len, 0, &ipc, &rt, msg->msg_flags); if (err) ip_flush_pending_frames(sk); else err = ping_v4_push_pending_frames(sk, &pfh, &fl4); release_sock(sk); out: ip_rt_put(rt); if (free) kfree(ipc.opt); if (!err) { icmp_out_count(sock_net(sk), user_icmph.type); return len; } return err; do_confirm: dst_confirm(&rt->dst); if (!(msg->msg_flags & MSG_PROBE) || len) goto back_from_confirm; err = 0; goto out; } int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct inet_sock *isk = inet_sk(sk); int family = sk->sk_family; struct sk_buff *skb; int copied, err; pr_debug("ping_recvmsg(sk=%p,sk->num=%u)\n", isk, isk->inet_num); err = -EOPNOTSUPP; if (flags & MSG_OOB) goto out; if (flags & MSG_ERRQUEUE) { if (family == AF_INET) { return ip_recv_error(sk, msg, len); #if IS_ENABLED(CONFIG_IPV6) } else if (family == AF_INET6) { return pingv6_ops.ipv6_recv_error(sk, msg, len); #endif } } skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) goto out; copied = skb->len; if (copied > len) { msg->msg_flags |= MSG_TRUNC; copied = len; } /* Don't bother checking the checksum */ err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (err) goto done; sock_recv_timestamp(msg, sk, skb); /* Copy the address and add cmsg data. */ if (family == AF_INET) { struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; sin->sin_family = AF_INET; sin->sin_port = 0 /* skb->h.uh->source */; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); *addr_len = sizeof(*sin); if (isk->cmsg_flags) ip_cmsg_recv(msg, skb); #if IS_ENABLED(CONFIG_IPV6) } else if (family == AF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6hdr *ip6 = ipv6_hdr(skb); struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)msg->msg_name; sin6->sin6_family = AF_INET6; sin6->sin6_port = 0; sin6->sin6_addr = ip6->saddr; sin6->sin6_flowinfo = 0; if (np->sndflow) sin6->sin6_flowinfo = ip6_flowinfo(ip6); sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr, IP6CB(skb)->iif); *addr_len = sizeof(*sin6); if (inet6_sk(sk)->rxopt.all) pingv6_ops.ip6_datagram_recv_ctl(sk, msg, skb); #endif } else { BUG(); } err = copied; done: skb_free_datagram(sk, skb); out: pr_debug("ping_recvmsg -> %d\n", err); return err; } EXPORT_SYMBOL_GPL(ping_recvmsg); int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { pr_debug("ping_queue_rcv_skb(sk=%p,sk->num=%d,skb=%p)\n", inet_sk(sk), inet_sk(sk)->inet_num, skb); if (sock_queue_rcv_skb(sk, skb) < 0) { kfree_skb(skb); pr_debug("ping_queue_rcv_skb -> failed\n"); return -1; } return 0; } EXPORT_SYMBOL_GPL(ping_queue_rcv_skb); /* * All we need to do is get the socket. */ void ping_rcv(struct sk_buff *skb) { struct sock *sk; struct net *net = dev_net(skb->dev); struct icmphdr *icmph = icmp_hdr(skb); /* We assume the packet has already been checked by icmp_rcv */ pr_debug("ping_rcv(skb=%p,id=%04x,seq=%04x)\n", skb, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence)); /* Push ICMP header back */ skb_push(skb, skb->data - (u8 *)icmph); sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id)); if (sk != NULL) { pr_debug("rcv on socket %p\n", sk); ping_queue_rcv_skb(sk, skb_get(skb)); sock_put(sk); return; } pr_debug("no socket, dropping\n"); /* We're called from icmp_rcv(). kfree_skb() is done there. */ } EXPORT_SYMBOL_GPL(ping_rcv); struct proto ping_prot = { .name = "PING", .owner = THIS_MODULE, .init = ping_init_sock, .close = ping_close, .connect = ip4_datagram_connect, .disconnect = udp_disconnect, .setsockopt = ip_setsockopt, .getsockopt = ip_getsockopt, .sendmsg = ping_v4_sendmsg, .recvmsg = ping_recvmsg, .bind = ping_bind, .backlog_rcv = ping_queue_rcv_skb, .release_cb = ip4_datagram_release_cb, .hash = ping_hash, .unhash = ping_unhash, .get_port = ping_get_port, .obj_size = sizeof(struct inet_sock), }; EXPORT_SYMBOL(ping_prot); #ifdef CONFIG_PROC_FS static struct sock *ping_get_first(struct seq_file *seq, int start) { struct sock *sk; struct ping_iter_state *state = seq->private; struct net *net = seq_file_net(seq); for (state->bucket = start; state->bucket < PING_HTABLE_SIZE; ++state->bucket) { struct hlist_nulls_node *node; struct hlist_nulls_head *hslot; hslot = &ping_table.hash[state->bucket]; if (hlist_nulls_empty(hslot)) continue; sk_nulls_for_each(sk, node, hslot) { if (net_eq(sock_net(sk), net) && sk->sk_family == state->family) goto found; } } sk = NULL; found: return sk; } static struct sock *ping_get_next(struct seq_file *seq, struct sock *sk) { struct ping_iter_state *state = seq->private; struct net *net = seq_file_net(seq); do { sk = sk_nulls_next(sk); } while (sk && (!net_eq(sock_net(sk), net))); if (!sk) return ping_get_first(seq, state->bucket + 1); return sk; } static struct sock *ping_get_idx(struct seq_file *seq, loff_t pos) { struct sock *sk = ping_get_first(seq, 0); if (sk) while (pos && (sk = ping_get_next(seq, sk)) != NULL) --pos; return pos ? NULL : sk; } void *ping_seq_start(struct seq_file *seq, loff_t *pos, sa_family_t family) { struct ping_iter_state *state = seq->private; state->bucket = 0; state->family = family; read_lock_bh(&ping_table.lock); return *pos ? ping_get_idx(seq, *pos-1) : SEQ_START_TOKEN; } EXPORT_SYMBOL_GPL(ping_seq_start); static void *ping_v4_seq_start(struct seq_file *seq, loff_t *pos) { return ping_seq_start(seq, pos, AF_INET); } void *ping_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct sock *sk; if (v == SEQ_START_TOKEN) sk = ping_get_idx(seq, 0); else sk = ping_get_next(seq, v); ++*pos; return sk; } EXPORT_SYMBOL_GPL(ping_seq_next); void ping_seq_stop(struct seq_file *seq, void *v) { read_unlock_bh(&ping_table.lock); } EXPORT_SYMBOL_GPL(ping_seq_stop); static void ping_v4_format_sock(struct sock *sp, struct seq_file *f, int bucket, int *len) { struct inet_sock *inet = inet_sk(sp); __be32 dest = inet->inet_daddr; __be32 src = inet->inet_rcv_saddr; __u16 destp = ntohs(inet->inet_dport); __u16 srcp = ntohs(inet->inet_sport); seq_printf(f, "%5d: %08X:%04X %08X:%04X" " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d%n", bucket, src, srcp, dest, destp, sp->sk_state, sk_wmem_alloc_get(sp), sk_rmem_alloc_get(sp), 0, 0L, 0, from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)), 0, sock_i_ino(sp), atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops), len); } static int ping_v4_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_printf(seq, "%-127s\n", " sl local_address rem_address st tx_queue " "rx_queue tr tm->when retrnsmt uid timeout " "inode ref pointer drops"); else { struct ping_iter_state *state = seq->private; int len; ping_v4_format_sock(v, seq, state->bucket, &len); seq_printf(seq, "%*s\n", 127 - len, ""); } return 0; } static const struct seq_operations ping_v4_seq_ops = { .show = ping_v4_seq_show, .start = ping_v4_seq_start, .next = ping_seq_next, .stop = ping_seq_stop, }; static int ping_seq_open(struct inode *inode, struct file *file) { struct ping_seq_afinfo *afinfo = PDE_DATA(inode); return seq_open_net(inode, file, &afinfo->seq_ops, sizeof(struct ping_iter_state)); } const struct file_operations ping_seq_fops = { .open = ping_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; EXPORT_SYMBOL_GPL(ping_seq_fops); static struct ping_seq_afinfo ping_v4_seq_afinfo = { .name = "icmp", .family = AF_INET, .seq_fops = &ping_seq_fops, .seq_ops = { .start = ping_v4_seq_start, .show = ping_v4_seq_show, .next = ping_seq_next, .stop = ping_seq_stop, }, }; int ping_proc_register(struct net *net, struct ping_seq_afinfo *afinfo) { struct proc_dir_entry *p; p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net, afinfo->seq_fops, afinfo); if (!p) return -ENOMEM; return 0; } EXPORT_SYMBOL_GPL(ping_proc_register); void ping_proc_unregister(struct net *net, struct ping_seq_afinfo *afinfo) { remove_proc_entry(afinfo->name, net->proc_net); } EXPORT_SYMBOL_GPL(ping_proc_unregister); static int __net_init ping_v4_proc_init_net(struct net *net) { return ping_proc_register(net, &ping_v4_seq_afinfo); } static void __net_exit ping_v4_proc_exit_net(struct net *net) { ping_proc_unregister(net, &ping_v4_seq_afinfo); } static struct pernet_operations ping_v4_net_ops = { .init = ping_v4_proc_init_net, .exit = ping_v4_proc_exit_net, }; int __init ping_proc_init(void) { return register_pernet_subsys(&ping_v4_net_ops); } void ping_proc_exit(void) { unregister_pernet_subsys(&ping_v4_net_ops); } #endif void __init ping_init(void) { int i; for (i = 0; i < PING_HTABLE_SIZE; i++) INIT_HLIST_NULLS_HEAD(&ping_table.hash[i], i); rwlock_init(&ping_table.lock); }
./CrossVul/dataset_final_sorted/CWE-20/c/good_5844_1